file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
KazWong/omniverse_sample/script_window/read_robot_joint.py | from omni.isaac.dynamic_control import _dynamic_control
dc = _dynamic_control.acquire_dynamic_control_interface()
art = dc.get_articulation("/World/wheelbarrow")
dof_states = dc.get_articulation_dof_states(art, _dynamic_control.STATE_ALL)
#print(dof_states)
back_left = dc.find_articulation_dof(art, "wheel_left_joint")
back_right = dc.find_articulation_dof(art, "wheel_right_joint")
back_left_state = dc.get_dof_state(back_left)
back_right_state = dc.get_dof_state(back_right)
#print(back_left_state.pos)
#print(back_right_state.pos)
agv_base_link = dc.find_articulation_body(art, "agv_base_link")
base_footprint = dc.find_articulation_body(art, "base_footprint")
wheel_center_link = dc.find_articulation_body(art, "wheel_center_link")
agv_base_link_state = dc.get_rigid_body_angular_velocity(agv_base_link)
base_footprint_state = dc.get_rigid_body_angular_velocity(base_footprint)
wheel_center_link_state = dc.get_rigid_body_angular_velocity(wheel_center_link)
print(agv_base_link_state)
print(base_footprint_state)
print(wheel_center_link_state)
| 1,057 | Python | 35.482757 | 79 | 0.768212 |
KazWong/omniverse_sample/script_window/move_robot_joint.py | from omni.isaac.dynamic_control import _dynamic_control
dc = _dynamic_control.acquire_dynamic_control_interface()
art = dc.get_articulation("/World/soap_odom/odom/robot")
front_left = dc.find_articulation_dof(art, "wheel_front_left_joint")
front_right = dc.find_articulation_dof(art, "wheel_front_right_joint")
back_left = dc.find_articulation_dof(art, "wheel_back_left_joint")
back_right = dc.find_articulation_dof(art, "wheel_back_right_joint")
dc.wake_up_articulation(art)
dc.set_dof_velocity_target(front_left, -3.14)
dc.set_dof_velocity_target(front_right, 3.14)
dc.set_dof_velocity_target(back_left, -3.14)
dc.set_dof_velocity_target(back_right, 3.14)
| 662 | Python | 37.999998 | 70 | 0.758308 |
KazWong/omniverse_sample/script_window/check_collision.py | from omni.physx.scripts import utils
stage = omni.usd.get_context().get_stage()
omni.kit.commands.execute("CreateMeshPrimCommand", prim_type="Cube")
cube_prim = stage.GetPrimAtPath("/World/Cube")
UsdGeom.XformCommonAPI(cube_prim).SetTranslate((0,0,100.0))
utils.setRigidBody(cube_prim, "convexHull", False)
import carb
import omni.physx
from omni.physx import get_physx_scene_query_interface
counter = 0
# Defines a cubic region to check overlap with
extent = carb.Float3(200.0, 200.0, 200.0)
origin = carb.Float3(0.0, 0.0, 0.0)
rotation = carb.Float4(0.0, 0.0, 1.0, 0.0)
def report_hit(hit):
return True
while True:
# physX query to detect number of hits for a cubic region
numHits = get_physx_scene_query_interface().overlap_box(extent, origin, rotation, report_hit, False)
print(numHits)
# physX query to detect number of hits for a spherical region
# numHits = get_physx_scene_query_interface().overlap_sphere(radius, origin, self.report_hit, False)
if numHits > 0:
print("collide")
break
| 1,040 | Python | 30.545454 | 104 | 0.723077 |
KazWong/omniverse_sample/script_window/random_cube.py | from omni.physx.scripts import utils
stage = omni.usd.get_context().get_stage()
prim = stage.DefinePrim(f"/World/cube1", "Cube")
UsdGeom.XformCommonAPI(prim).SetTranslate([500.0, 2.0, 60.0])
UsdGeom.XformCommonAPI(prim).SetScale((50.0, 50.0, 50.0))
prim_path = stage.GetPrimAtPath(f"/World/cube1")
utils.setRigidBody(prim_path, "convexHull", False)
| 352 | Python | 31.090906 | 61 | 0.738636 |
KazWong/omniverse_sample/script_window/spawn_and_move.py | import carb
import omni
import omni.kit.app
import time
from pxr import UsdGeom, Gf, Sdf
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
#from omni.isaac.dynamic_control import _dynamic_control
from omni.physx.scripts import utils
from omni.isaac.dynamic_control import _dynamic_control
result, nucleus = find_nucleus_server()
stage = omni.usd.get_context().get_stage()
prefix = "/World/soap_odom"
prim_path = omni.usd.get_stage_next_free_path(stage, prefix, False)
print(prim_path)
robot_prim = stage.DefinePrim(prim_path, "Xform")
robot_prim.GetReferences().AddReference(nucleus + "/Library/Robots/Soap_0/soap_odom.usd")
omni.timeline.get_timeline_interface().play()
print("play")
dc = _dynamic_control.acquire_dynamic_control_interface()
art = dc.get_articulation("/World/soap_odom/odom/robot")
front_left = dc.find_articulation_dof(art, "wheel_front_left_joint")
front_right = dc.find_articulation_dof(art, "wheel_front_right_joint")
back_left = dc.find_articulation_dof(art, "wheel_back_left_joint")
back_right = dc.find_articulation_dof(art, "wheel_back_right_joint")
dc.wake_up_articulation(art)
app = omni.kit.app.get_app()
while not app.is_running():
time.sleep(1.0)
print("running")
dc.set_dof_velocity_target(front_left, -3.14)
dc.set_dof_velocity_target(front_right, 3.14)
dc.set_dof_velocity_target(back_left, -3.14)
dc.set_dof_velocity_target(back_right, 3.14)
while not app.is_running():
app.update()
#omni.timeline.get_timeline_interface().stop()
| 1,504 | Python | 27.396226 | 89 | 0.753324 |
KazWong/omniverse_sample/script_window/teleport_cube_on_run.py | from omni.isaac.dynamic_control import _dynamic_control
dc = _dynamic_control.acquire_dynamic_control_interface()
cube = dc.get_rigid_body(f"/World/cube1")
dc.wake_up_rigid_body(cube)
tf = _dynamic_control.Transform( (250.0, 250.0, 500.0), (0.0, 0.0, 0.0, 1.0))
dc.set_rigid_body_pose(cube, tf)
| 298 | Python | 28.899997 | 77 | 0.711409 |
KazWong/omniverse_sample/load_env_robot/test_env.py | import numpy as np
class Env_config:
def __init__(self, omni, kit):
self.usd_path = "omniverse://localhost/Library/Robots/config_robot/robot_event_cam.usd"
self.kit = kit
self.omni = omni
def create_objects(self, cube_num, cylinder_num, sphere_num):
from pxr import UsdGeom, Gf, PhysxSchema, UsdPhysics, Sdf, PhysicsSchemaTools
from omni.physx.scripts import utils
TRANSLATION_RANGE = 500.0
object_list = []
# create cube
stage = self.kit.get_stage()
for num in range(cube_num):
# create first cube
result, path = self.omni.kit.commands.execute("CreateMeshPrimCommand", prim_type="Cube")
if num == 0:
object_list.append("/World/Cube")
continue
if num < 10:
object_list.append("/World/Cube_0"+str(num))
else:
object_list.append("/World/Cube_"+str(num))
# create cylinder
for num in range(cylinder_num):
# create first cylinder
result, path = self.omni.kit.commands.execute("CreateMeshPrimCommand", prim_type="Cylinder")
if num == 0:
object_list.append("/World/Cylinder")
continue
if num < 10:
object_list.append("/World/Cylinder_0"+str(num))
else:
object_list.append("/World/Cylinder_"+str(num))
# create sphere
for num in range(sphere_num):
# create first sphere
result, path = self.omni.kit.commands.execute("CreateMeshPrimCommand", prim_type="Sphere")
if num == 0:
object_list.append("/World/Sphere")
continue
if num < 10:
object_list.append("/World/Sphere_0"+str(num))
else:
object_list.append("/World/Sphere_"+str(num))
for mesh in object_list:
translation = np.random.rand(3) * TRANSLATION_RANGE
translation[2] = 40.0
cube_prim = stage.GetPrimAtPath(mesh)
UsdGeom.XformCommonAPI(cube_prim).SetTranslate(translation.tolist())
#UsdGeom.XformCommonAPI(cube_prim).SetRotate((0.0, 0.0, 0.0))
#UsdGeom.XformCommonAPI(cube_prim).SetScale((30.0, 30.0, 30.0))
utils.setRigidBody(cube_prim, "convexHull", False)
utils.setCollider(cube_prim, approximationShape="convexHull")
return object_list
def domain_randomization_test(self, target_list):
import omni.isaac.dr as dr
dr_interface = dr._dr.acquire_dr_interface()
asset_path = "omniverse://localhost/Isaac"
# List of textures to randomize from
texture_list = [
asset_path + "/Samples/DR/Materials/Textures/checkered.png",
asset_path + "/Samples/DR/Materials/Textures/marble_tile.png",
asset_path + "/Samples/DR/Materials/Textures/picture_a.png",
asset_path + "/Samples/DR/Materials/Textures/picture_b.png",
asset_path + "/Samples/DR/Materials/Textures/textured_wall.png",
asset_path + "/Samples/DR/Materials/Textures/checkered_color.png",
]
# domain randomization on position
result, prim = self.omni.kit.commands.execute(
'CreateMovementComponentCommand',
path='/World/movement_component',
prim_paths=target_list,
min_range=(-600.0, -600.0, 50.0),
max_range=(600.0, 600.0, 50.0),
target_position=None,
target_paths=None,
duration=1,
include_children=False,
seed=12345)
# domain randomization on textures
#result, prim = self.omni.kit.commands.execute(
# "CreateTextureComponentCommand",
# path='/World/texture_component',
# prim_paths=target_list,
# enable_project_uvw=False,
# texture_list=texture_list,
# duration=1,
# include_children=False,
# seed=12345)
# domain randomization on scale
result, prim = self.omni.kit.commands.execute(
'CreateScaleComponentCommand',
path='/World/scale_component',
prim_paths=target_list,
min_range=(0.5, 0.5, 1),
max_range=(2.0, 2.0, 1),
uniform_scaling=False,
duration=1,
include_children=False,
seed=12345)
dr_interface.toggle_manual_mode()
| 4,631 | Python | 39.278261 | 104 | 0.556467 |
KazWong/omniverse_sample/load_env_robot/load_env_robot.py | import numpy as np
import random
import os
import sys
import signal
import argparse
from argparse import Namespace
from omni.isaac.python_app import OmniKitHelper
def Run(args):
startup_config = {
"renderer": "RayTracedLighting",
"headless": args.headless,
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
}
kit = OmniKitHelper(startup_config)
#include after kit
import carb
import omni
import omni.kit.app
from pxr import UsdGeom, Gf, Sdf
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
#from omni.isaac.dynamic_control import _dynamic_control
from omni.physx.scripts import utils
result, nucleus = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server, exiting")
exit()
# enable extension
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_manager.set_extension_enabled_immediate("omni.isaac.ros_bridge", True)
ext_manager.set_extension_enabled_immediate("omni.kit.window.stage", True)
#load environment
env_path = nucleus + args.env_path
print(env_path)
omni.usd.get_context().open_stage(env_path, None)
#random 10 objects
stage = kit.get_stage()
TRANSLATION_RANGE = 1000.0
SCALE = 30.0
for i in range(10):
prim_type = random.choice(["Cube", "Sphere", "Cylinder"])
prim = stage.DefinePrim(f"/World/cube{i}", prim_type)
translation = np.random.rand(3) * TRANSLATION_RANGE
translation[2] = 40.0
UsdGeom.XformCommonAPI(prim).SetTranslate(translation.tolist())
UsdGeom.XformCommonAPI(prim).SetScale((SCALE, SCALE, SCALE))
#prim.GetAttribute("primvars:displayColor").Set([np.random.rand(3).tolist()])
prim_path = stage.GetPrimAtPath(f"/World/cube{i}")
utils.setRigidBody(prim_path, "convexHull", False)
#load robot
translation = np.random.rand(3) * TRANSLATION_RANGE
angle = np.random.rand(1)
prefix = "/World/soap_odom"
prim_path = omni.usd.get_stage_next_free_path(stage, prefix, False)
print(prim_path)
robot_prim = stage.DefinePrim(prim_path, "Xform")
robot_prim.GetReferences().AddReference(args.robo_path)
xform = UsdGeom.Xformable(robot_prim)
xform_op = xform.AddXformOp(UsdGeom.XformOp.TypeTransform, UsdGeom.XformOp.PrecisionDouble, "")
mat = Gf.Matrix4d().SetTranslate(translation.tolist())
mat.SetRotateOnly(Gf.Rotation(Gf.Vec3d(0, 0, 1), (angle[0])))
xform_op.Set(mat)
kit.app.update()
kit.app.update()
print("Loading stage...")
while kit.is_loading():
kit.update(1.0 / 60.0)
print("Loading Complete")
omni.kit.commands.execute(
"ChangeProperty", prop_path=Sdf.Path("/World/soap_odom/odom/robot/agv_lidar/ROS_Lidar.enabled"), value=True, prev=None)
omni.kit.commands.execute(
"ChangeProperty", prop_path=Sdf.Path("/World/soap_odom/odom/robot/ROS_PoseTree.enabled"), value=True, prev=None)
omni.kit.commands.execute(
"ChangeProperty", prop_path=Sdf.Path("/World/soap_odom/odom/robot/ROS_JointState.enabled"), value=True, prev=None)
omni.kit.commands.execute("ChangeProperty", prop_path=Sdf.Path("/World/ROS_Clock.enabled"), value=True, prev=None)
kit.play()
while kit.app.is_running():
# Run in realtime mode, we don't specify the step size
kit.update()
kit.stop()
kit.shutdown()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--headless", help="run in headless mode (no GUI)", action="store_true")
parser.add_argument("--env_path", type=str, help="Path to environment usd file", required=True)
parser.add_argument("--robo_path", type=str, help="Path to robot usd file", required=True)
args = parser.parse_args()
print("running with args: ", args)
def handle_exit(*args, **kwargs):
print("Exiting...")
quit()
signal.signal(signal.SIGINT, handle_exit)
Run(args)
| 3,768 | Python | 30.672269 | 127 | 0.718684 |
KazWong/omniverse_sample/load_env_robot/load_env_robot_edited.py | import numpy as np
import random
import os
import sys
import signal
import argparse
from argparse import Namespace
from test_env import Env_config
from test_robot import Robot_config
from omni.isaac.python_app import OmniKitHelper
def Run(args):
startup_config = {
"renderer": "RayTracedLighting",
"headless": args.headless,
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
}
kit = OmniKitHelper(startup_config)
#include after kit
import carb
import omni
import omni.kit.app
from pxr import UsdGeom, Gf, Sdf, UsdPhysics
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
#from omni.isaac.dynamic_control import _dynamic_control
from omni.physx.scripts import utils
result, nucleus = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server, exiting")
exit()
# enable extension
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_manager.set_extension_enabled_immediate("omni.isaac.ros_bridge", True)
ext_manager.set_extension_enabled_immediate("omni.kit.window.stage", True)
#load environment
env_path = nucleus + args.env_path
print(env_path)
omni.usd.get_context().open_stage(env_path, None)
test_env = Env_config(omni,kit)
# create objects
obj_list = test_env.create_objects(4,4,4)
import omni.isaac.dr as dr
dr_interface = dr._dr.acquire_dr_interface()
#print(obj_list)
# domain randomization
test_env.domain_randomization_test(obj_list)
# load robot
stage = kit.get_stage()
TRANSLATION_RANGE = 1000.0
translation = np.random.rand(3) * TRANSLATION_RANGE
angle = np.random.rand(1)
prefix = "/World/soap_odom"
prim_path = omni.usd.get_stage_next_free_path(stage, prefix, False)
print(prim_path)
robot_prim = stage.DefinePrim(prim_path, "Xform")
robot_prim.GetReferences().AddReference(args.robo_path)
xform = UsdGeom.Xformable(robot_prim)
xform_op = xform.AddXformOp(UsdGeom.XformOp.TypeTransform, UsdGeom.XformOp.PrecisionDouble, "")
mat = Gf.Matrix4d().SetTranslate(translation.tolist())
mat.SetRotateOnly(Gf.Rotation(Gf.Vec3d(0, 0, 1), (angle[0])))
xform_op.Set(mat)
DRIVE_STIFFNESS = 10000.0
# Set joint drive parameters
wheel_back_left_joint = UsdPhysics.DriveAPI.Apply(stage.GetPrimAtPath(f"{prim_path}/agv_base_link/wheel_back_left_joint"), "angular")
wheel_back_left_joint.GetDampingAttr().Set(DRIVE_STIFFNESS)
wheel_back_right_joint = UsdPhysics.DriveAPI.Apply(stage.GetPrimAtPath(f"{prim_path}/agv_base_link/wheel_back_right_joint"), "angular")
wheel_back_right_joint.GetDampingAttr().Set(DRIVE_STIFFNESS)
wheel_front_left_joint = UsdPhysics.DriveAPI.Apply(stage.GetPrimAtPath(f"{prim_path}/agv_base_link/wheel_front_left_joint"), "angular")
wheel_front_left_joint.GetDampingAttr().Set(DRIVE_STIFFNESS)
wheel_front_right_joint = UsdPhysics.DriveAPI.Apply(stage.GetPrimAtPath(f"{prim_path}/agv_base_link/wheel_front_right_joint"), "angular")
wheel_front_right_joint.GetDampingAttr().Set(DRIVE_STIFFNESS)
kit.app.update()
kit.app.update()
print("Loading stage...")
while kit.is_loading():
kit.update(1.0 / 60.0)
print("Loading Complete")
#from omni.isaac import RosBridgeSchema
#omni.kit.commands.execute('ROSBridgeCreatePoseTree', path='/World/soap_odom/ROS_PoseTree', parent=None)
#omni.kit.commands.execute("ChangeProperty", prop_path=Sdf.Path("/World/soap_odom/ROS_PoseTree.enabled"), value=True, prev=None)
#omni.kit.commands.execute('RosBridgeCreatePrim', path='/ROS_PoseTree', parent=None, enabled=True, scehma_type=<class 'omni.isaac.RosBridgeSchema.RosPoseTree'>)
# add ros joint state
#omni.kit.commands.execute('ROSBridgeCreateJointState', path='/World/soap_odom/ROS_JointState', parent=None)
#omni.kit.commands.execute("ChangeProperty", prop_path=Sdf.Path("/World/soap_odom/ROS_JointState.enabled"), value=True, prev=None)
#omni.kit.commands.execute('RosBridgeCreatePrim', path='/World/soap_odom/ROS_JointState', parent=None, enabled=True, scehma_type=<class 'omni.isaac.RosBridgeSchema.RosJointState'>)
# add ros lidar
#omni.kit.commands.execute('ROSBridgeCreateLidar', path='/World/soap_odom/agv_lidar/ROS_Lidar', parent=None)
#omni.kit.commands.execute('RosBridgeCreatePrim', path='/World/soap_odom/agv_lidar/ROS_Lidar', parent=None, enabled=True, scehma_type=<class 'omni.isaac.RosBridgeSchema.RosLidar'>)
# add ros clock
omni.kit.commands.execute('ROSBridgeCreateClock',path='/ROS_Clock',parent=None)
omni.kit.commands.execute('ChangeProperty', prop_path=Sdf.Path('/World/ROS_Clock.queueSize'), value=0, prev=10)
#omni.kit.commands.execute("ChangeProperty", prop_path=Sdf.Path("/World/soap_odom/agv_lidar/ROS_Lidar.enabled"), value=True, prev=None)
#omni.kit.commands.execute("ChangeProperty", prop_path=Sdf.Path("/World/soap_odom/ROS_PoseTree.enabled"), value=True, prev=None)
#omni.kit.commands.execute("ChangeProperty", prop_path=Sdf.Path("/World/soap_odom/ROS_JointState.enabled"), value=True, prev=None)
#omni.kit.commands.execute("ChangeProperty", prop_path=Sdf.Path("/World/ROS_Clock.enabled"), value=True, prev=None)
kit.play()
test_rob = Robot_config(stage, omni, robot_prim)
# initial robot
test_rob.teleport((0,0,30), 0)
while kit.app.is_running():
# Run in realtime mode, we don't specify the step size
if test_rob.check_overlap_box() == True:
# # reset robot to origin
print("colide!!, reset robot")
test_rob.teleport((0,0,30), 0)
dr_interface.randomize_once()
#test_rob.check_overlap_box()
kit.update()
kit.stop()
kit.shutdown()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--headless", help="run in headless mode (no GUI)", action="store_true")
parser.add_argument("--env_path", type=str, help="Path to environment usd file", required=True)
parser.add_argument("--robo_path", type=str, help="Path to robot usd file", required=True)
args = parser.parse_args()
print("running with args: ", args)
def handle_exit(*args, **kwargs):
print("Exiting...")
quit()
signal.signal(signal.SIGINT, handle_exit)
Run(args)
| 6,073 | Python | 40.602739 | 184 | 0.739009 |
KazWong/omniverse_sample/load_env_robot/test_robot.py | import numpy as np
class Robot_config:
def __init__(self, stage, omni, robot_prim):
self.usd_path = "omniverse://localhost/Library/Robots/config_robot/robot_event_cam.usd"
self.omni = omni
from omni.isaac.dynamic_control import _dynamic_control
self.dc = _dynamic_control.acquire_dynamic_control_interface()
self.omni = omni
self.robot_prim = robot_prim
self.ar = None
self.stage = stage
def teleport(self, location, rotation, settle=False):
from pxr import Gf
from omni.isaac.dynamic_control import _dynamic_control
print("before teleport", self.ar)
#if self.ar is None:
print(type(self.robot_prim.GetPath().pathString), self.robot_prim.GetPath().pathString)
self.ar = self.dc.get_articulation(self.robot_prim.GetPath().pathString)
print("after teleport", self.ar)
chassis = self.dc.get_articulation_root_body(self.ar)
self.dc.wake_up_articulation(self.ar)
rot_quat = Gf.Rotation(Gf.Vec3d(0, 0, 1), rotation).GetQuaternion()
tf = _dynamic_control.Transform(
location,
(rot_quat.GetImaginary()[0], rot_quat.GetImaginary()[1], rot_quat.GetImaginary()[2], rot_quat.GetReal()),
)
self.dc.set_rigid_body_pose(chassis, tf)
self.dc.set_rigid_body_linear_velocity(chassis, [0, 0, 0])
self.dc.set_rigid_body_angular_velocity(chassis, [0, 0, 0])
self.command((-20, 20, -20, 20))
# Settle the robot onto the ground
if settle:
frame = 0
velocity = 1
while velocity > 0.1 and frame < 120:
self.omni.usd.get_context().update(1.0 / 60.0)
lin_vel = self.dc.get_rigid_body_linear_velocity(chassis)
velocity = np.linalg.norm([lin_vel.x, lin_vel.y, lin_vel.z])
frame = frame + 1
def command(self,motor_value):
chassis = self.dc.get_articulation_root_body(self.ar)
#num_joints = self.dc.get_articulation_joint_count(self.ar)
#num_dofs = self.dc.get_articulation_dof_count(self.ar)
#num_bodies = self.dc.get_articulation_body_count(self.ar)
wheel_back_left = self.dc.find_articulation_dof(self.ar, "wheel_back_left_joint")
wheel_back_right = self.dc.find_articulation_dof(self.ar, "wheel_back_right_joint")
wheel_front_left = self.dc.find_articulation_dof(self.ar, "wheel_front_left_joint")
wheel_front_right = self.dc.find_articulation_dof(self.ar, "wheel_front_right_joint")
self.dc.wake_up_articulation(self.ar)
wheel_back_left_speed = self.wheel_speed_from_motor_value(motor_value[0])
wheel_back_right_speed = self.wheel_speed_from_motor_value(motor_value[1])
wheel_front_left_speed = self.wheel_speed_from_motor_value(motor_value[2])
wheel_front_right_speed = self.wheel_speed_from_motor_value(motor_value[3])
self.dc.set_dof_velocity_target(wheel_back_left, np.clip(wheel_back_left_speed, -10, 10))
self.dc.set_dof_velocity_target(wheel_back_right, np.clip(wheel_back_right_speed, -10, 10))
self.dc.set_dof_velocity_target(wheel_front_left, np.clip(wheel_front_left_speed, -10, 10))
self.dc.set_dof_velocity_target(wheel_front_right, np.clip(wheel_front_right_speed, -10, 10))
# idealized motor model
def wheel_speed_from_motor_value(self, motor_input):
print("speed is ",motor_input)
return motor_input
def check_overlap_box(self):
# Defines a cubic region to check overlap with
import omni.physx
from omni.physx import get_physx_scene_query_interface
import carb
#print("*"*50)
chassis = self.dc.get_articulation_root_body(self.ar)
robot_base_pose = self.dc.get_rigid_body_pose(chassis)
#print("chassis is ", chassis)
#print("pose is ", robot_base_pose)
print("pose is ", robot_base_pose.p)
#print("*"*50)
extent = carb.Float3(38.0, 26.0, 5.0)
# origin = carb.Float3(0.0, 0.0, 0.0)
origin = robot_base_pose.p
rotation = carb.Float4(0.0, 0.0, 1.0, 0.0)
# physX query to detect number of hits for a cubic region
numHits = get_physx_scene_query_interface().overlap_box(extent, origin, rotation, self.report_hit, False)
print("num of overlaps ", numHits)
# physX query to detect number of hits for a spherical region
# numHits = get_physx_scene_query_interface().overlap_sphere(radius, origin, self.report_hit, False)
#self.kit.update()
return numHits > 1
def report_hit(self, hit):
from pxr import UsdGeom, Gf, Vt
# When a collision is detected, the object colour changes to red.
# hitColor = Vt.Vec3fArray([Gf.Vec3f(180.0 / 255.0, 16.0 / 255.0, 0.0)])
# usdGeom = UsdGeom.Mesh.Get(self.stage, hit.rigid_body)
# usdGeom.GetDisplayColorAttr().Set(hitColor)
return True
| 5,081 | Python | 47.4 | 117 | 0.623696 |
KazWong/omniverse_sample/load_env_robot/README.md | # load_env_robot
This sample integral three functions:
1. Use OmniHelper to run omniverse
2. load environment
3. add rigid body and random position and orientation
4. load robot and enable ROS_bridge
5. random robot position and orientation
| 243 | Markdown | 23.399998 | 53 | 0.798354 |
KazWong/omniverse_sample/omnihelper/check_collision.py | import numpy as np
import random
import os
import sys
import signal
import argparse
from argparse import Namespace
from omni.isaac.python_app import OmniKitHelper
def check_overlap_box():
# Defines a cubic region to check overlap with
import omni.physx
from omni.physx import get_physx_scene_query_interface
import carb
#print("*"*50)
extent = carb.Float3(50.0, 50.0, 50.0)
origin = carb.Float3(0.0, 0.0, 0.0)
rotation = carb.Float4(0.0, 0.0, 1.0, 0.0)
# physX query to detect number of hits for a cubic region
numHits = get_physx_scene_query_interface().overlap_box(extent, origin, rotation, report_hit, False)
print("num of overlaps ", numHits)
# physX query to detect number of hits for a spherical region
# numHits = get_physx_scene_query_interface().overlap_sphere(radius, origin, self.report_hit, False)
#self.kit.update()
return numHits > 1
def report_hit(hit):
#from pxr import UsdGeom, Gf, Vt
#stage = kit.get_stage()
## When a collision is detected, the object colour changes to red.
#hitColor = Vt.Vec3fArray([Gf.Vec3f(180.0 / 255.0, 16.0 / 255.0, 0.0)])
#usdGeom = UsdGeom.Mesh.Get(stage, hit.rigid_body)
#usdGeom.GetDisplayColorAttr().Set(hitColor)
return True
def Run(args):
startup_config = {
"renderer": "RayTracedLighting",
"headless": args.headless,
"experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
}
kit = OmniKitHelper(startup_config)
#include after kit
import carb
import omni
import omni.kit.app
from pxr import UsdGeom, Gf, Sdf, UsdPhysics
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
from omni.physx.scripts import utils
result, nucleus = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server, exiting")
exit()
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_manager.set_extension_enabled_immediate("omni.isaac.ros_bridge", True)
ext_manager.set_extension_enabled_immediate("omni.kit.window.stage", True)
env_path = nucleus + args.env_path
print(env_path)
omni.usd.get_context().open_stage(env_path, None)
stage = kit.get_stage()
omni.kit.commands.execute("CreateMeshPrimCommand", prim_type="Cube")
cube_prim = stage.GetPrimAtPath("/World/Cube")
UsdGeom.XformCommonAPI(cube_prim).SetTranslate((0,0,100))
utils.setRigidBody(cube_prim, "convexHull", False)
kit.app.update()
kit.app.update()
print("Loading stage...")
while kit.is_loading():
kit.update(1.0 / 60.0)
print("Loading Complete")
kit.play()
while kit.app.is_running():
# Run in realtime mode, we don't specify the step size
if check_overlap_box() == True:
# # reset robot to origin
print("colide!!")
kit.update()
kit.stop()
kit.shutdown()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--headless", help="run in headless mode (no GUI)", action="store_true")
parser.add_argument("--env_path", type=str, help="Path to environment usd file", required=True)
args = parser.parse_args()
Run(args)
| 3,098 | Python | 29.99 | 104 | 0.698515 |
Moetassem/OmniverseLegRotateExtension/README.md | # Extension Project Template
This project was automatically generated.
- "app" - It is a folder link to the location of your *Omniverse Kit* based app.
- "exts" - It is a folder where you can add new extensions. It was automatically added to extension search path. (Extension Manager -> Gear Icon -> Extension Search Path).
Open this folder using Visual Studio Code. It will suggest you to install few extensions that will make python experience better.
Look for "omni.gym.4LegRL" extension in extension manager and enable it. Try applying changes to any python files, it will hot-reload and you can observe results immediately.
# Sharing Your Extensions
This folder is ready to be pushed to any git repository. Once pushed direct link to a git repository can be added to *Omniverse Kit* extension search paths.
Link might look like this: `git://github.com/[user]/[your_repo].git?branch=main&dir=exts`
git://github.com/Moetassem/OmniverseLegRotateExtension.git?branch=master&dir=exts
Notice `exts` is repo subfolder with extensions. More information can be found in "Git URL as Extension Search Paths" section of developers manual.
To add a link to your *Omniverse Kit* based app go into: Extension Manager -> Gear Icon -> Extension Search Path
| 1,257 | Markdown | 53.69565 | 174 | 0.778839 |
Moetassem/OmniverseLegRotateExtension/exts/omni.gym.4LegRL/omni/gym/4LegRL/Model.py | import omni.ui as ui
class Model(ui.SimpleFloatModel):
def __init__(self, stage, prim_path: str, axis: str):
axes = ['x', 'y', 'z']
if axis not in axes:
raise ValueError("Invalid sim type. Expected one of: %s" % axes)
self.stage = stage
self.prim_path = prim_path
self.axis = axis
self.axisIndex = axes.index(axis)
self.prim = self.stage.GetPrimAtPath(self.prim_path)
self.primRots = self.prim.GetAttribute('xformOp:rotateXYZ').Get()
super().__init__(self.primRots[self.axisIndex])
def getAxisIndex(self):
return self.axisIndex
def getPrimPath(self):
return self.prim_path
# def setPrimPath(self, new_prim_path: str):
# _value_changed()
# self.prim_path = new_prim_path
def _value_changed(self) -> None:
return super()._value_changed() | 884 | Python | 31.777777 | 76 | 0.602941 |
Moetassem/OmniverseLegRotateExtension/exts/omni.gym.4LegRL/omni/gym/4LegRL/extension.py | import omni.ext
import omni.usd
import omni.ui as ui
import omni.kit.commands
from pxr import Usd, Gf, Tf, Trace
import carb
import carb.events
from .Model import *
import traceback
import asyncio
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class MyExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def __init__(self):
self.context = None
self.stage = None
self.selection = None
self.xmodel = None
self.zmodel = None
# self.app = omni.kit.app.get_app()
# pass
def on_startup(self, ext_id):
print("[omni.gym.4LegRL] MyExtension startup")
self.context = omni.usd.get_context()
self.stage = self.context.get_stage()
self.selection = self.context.get_selection()
self.listener = Tf.Notice.Register(Usd.Notice.ObjectsChanged, self._on_change_event, self.stage)
self._window = ui.Window("My Window", width=300, height=300, dockPreference=ui.DockPreference.RIGHT_BOTTOM)
self.load_window()
def _on_selection_event(e: carb.events.IEvent):
if e.type==int(omni.usd.StageEventType.SELECTION_CHANGED):
self.load_window()
# print(f"Selection Changed!: %s \n" % str(self.context.get_selection().get_selected_prim_paths()))
self.selection_event_sub = (
self.context.get_stage_event_stream().create_subscription_to_pop(_on_selection_event, name="Selection")
)
def load_window(self):
if len(self.context.get_selection().get_selected_prim_paths()) > 1:
with self._window.frame:
with ui.VStack():
ui.Label("Select only one component")
elif len(self.context.get_selection().get_selected_prim_paths()) == 1:
self.primSelectedPath = self.context.get_selection().get_selected_prim_paths()[0]
self.xmodel = Model(self.stage, self.primSelectedPath, "x")
self.zmodel = Model(self.stage, self.primSelectedPath, "z")
def _on_slider_change(self):
if self.axisIndex == 0:
# await omni.kit.app.get_app().next_update_async()
omni.kit.commands.execute('ChangePropertyCommand',
prop_path=self.prim_path+'.xformOp:rotateXYZ',
value= Gf.Vec3d(self.get_value_as_float(),self.primRots[1],self.primRots[2]),
prev=self.primRots)
elif self.axisIndex == 2:
# await omni.kit.app.get_app().next_update_async()
omni.kit.commands.execute('ChangePropertyCommand',
prop_path=self.prim_path+'.xformOp:rotateXYZ',
value= Gf.Vec3d(self.primRots[0],self.primRots[1],self.get_value_as_float()),
prev=self.primRots)
self.xmodel.add_value_changed_fn(_on_slider_change)
self.zmodel.add_value_changed_fn(_on_slider_change)
with self._window.frame:
with ui.VStack():
ui.Label(f"Prim Selected: '{(self.primSelectedPath)}'")
with ui.HStack():
ui.Spacer(height=ui.Percent(10))
ui.FloatSlider(self.xmodel, min=-30, max=30)
ui.Spacer(height=ui.Percent(20))
ui.FloatSlider(self.zmodel, min=-30, max=30)
ui.Spacer(height=ui.Percent(20))
else:
with self._window.frame:
with ui.VStack():
ui.Label("Select a Prim first")
@Trace.TraceFunction
def _on_change_event(self, notice, stage):
# await omni.kit.app.get_app().next_update_async()
self.load_window()
def on_shutdown(self):
print("[omni.gym.4LegRL] MyExtension shutdown") | 4,517 | Python | 44.636363 | 119 | 0.552801 |
Moetassem/OmniverseLegRotateExtension/exts/omni.gym.4LegRL/omni/gym/4LegRL/__init__.py | from .extension import *
from .Model import * | 45 | Python | 21.999989 | 24 | 0.755556 |
xyang2013/kit-exts-command-library/exts/dli.example.command_library/dli/example/command_library/extension.py | import omni.kit.commands
import omni.usd
from typing import List
import omni.ui as ui
class ScaleIncrement(omni.kit.commands.Command):
def __init__(self, prim_paths: List[str]):
self.prim_paths = prim_paths
self.stage = omni.usd.get_context().get_stage()
def set_scale(self, undo: bool = False):
for path in self.prim_paths:
prim = self.stage.GetPrimAtPath(path)
old_scale = prim.GetAttribute('xformOp:scale').Get()
new_scale = tuple(x + 1 for x in old_scale)
if undo:
new_scale = tuple(x - 1 for x in old_scale)
prim.GetAttribute('xformOp:scale').Set(new_scale)
def do(self):
self.set_scale()
def undo(self):
self.set_scale(True)
def get_selection() -> List[str]:
"""Get the list of currently selected prims"""
return omni.usd.get_context().get_selection().get_selected_prim_paths()
class MyExtension(omni.ext.IExt):
def on_startup(self, ext_id):
print("[dli.example.command_library] MyExtension startup")
self._window = ui.Window("My Window", width=300, height=300)
with self._window.frame:
with ui.VStack():
ui.Label("Prim Scaler")
def on_click():
prim_paths = get_selection()
omni.kit.commands.execute('ScaleIncrement', prim_paths=prim_paths)
ui.Button("Scale Up!", clicked_fn=lambda: on_click())
def on_shutdown(self):
print("[dli.example.command_library] MyExtension shutdown")
self._window.destroy()
self._window = None
| 1,637 | Python | 29.90566 | 86 | 0.598045 |
AndreiVoica/P10-MAP/README.md |
<a name="P10-MAP"></a>
<!-- PROJECT SHIELDS -->
<!--
*** I'm using markdown "reference style" links for readability.
*** Reference links are enclosed in brackets [ ] instead of parentheses ( ).
*** See the bottom of this document for the declaration of the reference variables
*** for contributors-url, forks-url, etc. This is an optional, concise syntax you may use.
*** https://www.markdownguide.org/basic-syntax/#reference-style-links
-->
[![Contributors][contributors-shield]][contributors-url]
[![Stars][stars-shield]][stars-url]
[![CC BY 4.0][cc-by-shield]][cc-by]
<!-- PROJECT LOGO -->
<br />
<div align="center">
<!-- <a href="https://github.com/AndreiVoica/P10-MAP/">
<img src="images/logo.png" alt="Logo" width="80" height="80">
</a> -->
<h3 align="center">P10 - Material Acceleration Platforms</h3>
<p align="center">
Master Thesis in Robotics - Aalborg University
<br />
<a href="https://www.youtube.com/playlist?list=PLTbrI-WjdIEfSyzKvvQM6LQMU2solaiKI">View Demo</a>
<br />
</div>
<!-- TABLE OF CONTENTS -->
<details>
<summary>Table of Contents</summary>
<ol>
<li>
<a href="#about-the-project">About The Project</a>
<ul>
<li><a href="#built-with">Built With</a></li>
</ul>
</li>
<li>
<a href="#getting-started">Getting Started</a>
<ul>
<li><a href="#prerequisites">Prerequisites</a></li>
<li><a href="#installation">Installation</a></li>
</ul>
</li>
<li><a href="#usage">Usage</a></li>
<li><a href="#contributing">Contributing</a></li>
<li><a href="#license">License</a></li>
<li><a href="#contact">Contact</a></li>
<li><a href="#acknowledgments">Acknowledgments</a></li>
</ol>
</details>
<!-- ABOUT THE PROJECT -->
## About the Project
<p align="center">
<img src="/docs/imgs/Frontpage.png" alt="Frontpage" width="700">
</p>
This project focuses on the transformation of chemistry laboratories into autonomous environments that can accelerate the discovery of new materials. The main goal is to optimize chemical processes that are typically performed by humans and can thus be slow and prone to errors.
The project utilizes robotic solutions and simulation to achieve this goal. The autonomous laboratory will be implemented on the AAU Matrix Production setup. This setup consists of five Kuka robotic manipulators, the B&R Automation Acopos 6D magnetic levitation platform, and various custom-made parts.
For development purposes, Nvidia Isaac Sim is used to create a simulated environment that replicates the physical setup. This allows for the execution of different experiments in a virtual setting. The Robot Operating System (ROS1) is used to control both the simulated Kuka manipulators and their real-world counterparts.
The simulation experiments demonstrate that the system is capable of automatically completing a chemical process. However, transferring these capabilities to the physical setup poses a significant challenge.
The project is the outcome of a Master's thesis in Robotics at Aalborg University.
<p align="right">(<a href="#readme-top">back to top</a>)</p>
### Built With
Ubuntu 20.04 together with Isaac Sim 2022.2.1 and ROS Noetic was used for this project.
<p align="left">
<a href="https://www.python.org" target="_blank"> <img src="https://raw.githubusercontent.com/devicons/devicon/master/icons/python/python-original.svg" alt="python" width="40" height="40"/> </a>
<a href="https://www.w3schools.com/cpp/" target="_blank"> <img src="https://raw.githubusercontent.com/devicons/devicon/master/icons/cplusplus/cplusplus-original.svg" alt="cplusplus" width="40" height="40"/> </a>
<a href="https://www.overleaf.com/"> <img src="https://images.ctfassets.net/nrgyaltdicpt/h9dpHuVys19B1sOAWvbP6/5f8d4c6d051f63e4ba450befd56f9189/ologo_square_colour_light_bg.svg" alt="overleaf_logo" width="40" height="40"> </a>
<a href="https://git-scm.com/" target="_blank"> <img src="https://www.vectorlogo.zone/logos/git-scm/git-scm-icon.svg" alt="git" width="40" height="40"/> </a>
<a href="https://www.linux.org/" target="_blank"> <img src="https://raw.githubusercontent.com/devicons/devicon/master/icons/linux/linux-original.svg" alt="linux" width="40" height="40"/> </a>
<a href="https://www.nvidia.com/en-us/omniverse/" target="_blank"> <img src="https://docs.omniverse.nvidia.com/con_connect/_images/renderer.png" alt="OmniverseIsaacSim" width="40" height="40"/> </a>
<a href="https://www.ros.org/"> <img src="https://upload.wikimedia.org/wikipedia/commons/b/bb/Ros_logo.svg" alt="ros_logo" height="36"> </a>
</p>
<p align="right">(<a href="#readme-top">back to top</a>)</p>
<!-- GETTING STARTED -->
## Getting Started
To get a local copy up and running follow these example steps.
### Prerequisites
[06/06/2023]
* Isaac Sim requirements:
Some minimum requirements are needed to install Isaac Sim, check the [Link](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/requirements.html) for more details.
| Element | Minimum Spec | Good | Ideal |
|---------|------------------------------------|----------------|-----------------------------------------------------|
| OS | Ubuntu 20.04/22.04, Windows 10/11 | Ubuntu 20.04/22.04, Windows 10/11 | Ubuntu 20.04/22.04, Windows 10/11 |
| CPU | Intel Core i7 (7th Generation), AMD Ryzen 5 | Intel Core i7 (9th Generation), AMD Ryzen 7 | Intel Core i9, X-series or higher, AMD Ryzen 9, Threadripper or higher |
| Cores | 4 | 8 | 16 |
| RAM | 32GB* | 64GB* | 64GB* |
| Storage | 50GB SSD | 500GB SSD | 1TB NVMe SSD |
| GPU | GeForce RTX 2070 | GeForce RTX 3080| RTX A6000 |
| VRAM | 8GB* | 10GB* | 48GB* |
Note: GeForce RTX 2060 6GB VRAM is also compatible.
Note: The asterisk (*) indicates that the specified amount is the minimum required, but more is recommended for better performance.
### Installation
* Isaac Sim and MAPs Extension
* ROS
* MoveIt
* KukaVarProxy
* Planar Motor Controller API
1. To install Isaac Sim, follow the instructions in the [Isaac Sim documentation](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/install_workstation.html#).
Once Isaac Sim is installed follow the steps in [MAPs Extension](/docs/installation/MAPs_Extension/README.md)
2. To install ROS, follow the instructions in the [ROS Noetic documentation](http://wiki.ros.org/noetic/Installation/Ubuntu)
3. To install MoveIt, follow the instructions in the [MoveIt documentation](https://moveit.ros.org/install/)
4. To install the KukaVarProxy, follow the instructions in the [KukaVarProxy documentation](https://github.com/ImtsSrl/KUKAVARPROXY)
5. To install the Planar Motor Controller PMC API, follow the instructions in the [planar motor controller API documentation](/docs/installation/planar_motor_control_API/README.md)
<!-- USAGE EXAMPLES -->
## Usage
The following image shows the communication workflow between ROS and physical robots (blue), Simulation environment
(green) and Magnetic levitation platform (orange). Machine Readable Recipe is not implemented.
<p align="center">
<img src="/docs/imgs/Workflow.drawio_v2.png" alt="Workflow Diagram" width="400">
</p>
### How to run the simulation
1. Launch `roscore`
2. Open Isaac Sim and launch MAPs Extension. Check [MAPs Extension](/docs/installation/MAPs_Extension/README.md) for troubleshooting.
3. Press Play in Isaac Sim GUI
4. Launch `roslaunch isaac_moveit kuka_isaac_execution.launch` from a sourced workspace
5. Start the simulation by pressing the `Start` button in the extension GUI
### How to run the Acopos 6D Digital Twin
1. Launch `roscore`
2. Open Isaac Sim and launch MAPs Extension. Check [MAPs Extension](/docs/installation/MAPs_Extension/README.md) for troubleshooting.
3. Check the computer is in the same range as the PMC (by default, PMC IP: 192.168.10.100)
4. In the MAPs GUI press Connect PMC
5. Press Start Real Setup
Note 1: To send random targets for each shuttle, uncomment the following line: `#self._world.add_physics_callback("sim_step_move_acopos", callback_fn=self.send_xbots_positions)` under `async def _on_real_control_event_async`
Note 2: Adjust `self._number_shuttles = 4` with the number of shuttles in the physical setup
<p align="right">(<a href="#readme-top">back to top</a>)</p>
<!-- CONTRIBUTING
## Contributing
Contributions are what make the open source community such an amazing place to learn, inspire, and create. Any contributions you make are **greatly appreciated**.
If you have a suggestion that would make this better, please fork the repo and create a pull request. You can also simply open an issue with the tag "enhancement".
Don't forget to give the project a star! Thanks again!
1. Fork the Project
2. Create your Feature Branch (`git checkout -b feature/AmazingFeature`)
3. Commit your Changes (`git commit -m 'Add some AmazingFeature'`)
4. Push to the Branch (`git push origin feature/AmazingFeature`)
5. Open a Pull Request
<p align="right">(<a href="#readme-top">back to top</a>)</p>
-->
<!-- LICENSE -->
## License
This work is licensed under a
[Creative Commons Attribution 4.0 International License][cc-by].
<p align="right">(<a href="#readme-top">back to top</a>)</p>
<!-- CONTACT -->
## Contact
Daniel Moreno - [LinkedIn](https://www.linkedin.com/in/daniel-mparis/) - [email protected]
Andrei Voica - [LinkedIn](https://www.linkedin.com/in/andrei-voica-825b7a104/) - [email protected]
Project Link: [https://github.com/AndreiVoica/P10-MAP](https://github.com/AndreiVoica/P10-MAP)
<p align="right">(<a href="#readme-top">back to top</a>)</p>
<!-- ACKNOWLEDGMENTS -->
<!-- ## Acknowledgments
Use this space to list resources you find helpful and would like to give credit to. I've included a few of my favorites to kick things off!
* [Choose an Open Source License](https://choosealicense.com)
* [GitHub Emoji Cheat Sheet](https://www.webpagefx.com/tools/emoji-cheat-sheet)
* [Malven's Flexbox Cheatsheet](https://flexbox.malven.co/)
* [Malven's Grid Cheatsheet](https://grid.malven.co/)
* [Img Shields](https://shields.io)
* [GitHub Pages](https://pages.github.com)
* [Font Awesome](https://fontawesome.com)
* [React Icons](https://react-icons.github.io/react-icons/search)
<p align="right">(<a href="#readme-top">back to top</a>)</p>
-->
<!-- MARKDOWN LINKS & IMAGES -->
<!-- https://www.markdownguide.org/basic-syntax/#reference-style-links -->
[contributors-shield]: https://img.shields.io/github/contributors/AndreiVoica/P10-MAP.svg?style=for-the-badge
[contributors-url]: https://github.com/AndreiVoica/P10-MAP/graphs/contributors
[stars-shield]: https://img.shields.io/github/stars/AndreiVoica/P10-MAP.svg?style=for-the-badge
[stars-url]: https://github.com/AndreiVoica/P10-MAP/stargazers
[issues-shield]: https://img.shields.io/github/issues/AndreiVoica/P10-MAP.svg?style=for-the-badge
[issues-url]: https://github.com/othneildrew/Best-README-Template/issues
[license-shield]: https://img.shields.io/github/license/othneildrew/Best-README-Template.svg?style=for-the-badge
[license-url]: https://github.com/othneildrew/Best-README-Template/blob/master/LICENSE.txt
[linkedin-shield]: https://img.shields.io/badge/-LinkedIn-black.svg?style=for-the-badge&logo=linkedin&colorB=555
[linkedin-url]: https://linkedin.com/in/othneildrew
[product-screenshot]: images/screenshot.png
[Next.js]: https://img.shields.io/badge/next.js-000000?style=for-the-badge&logo=nextdotjs&logoColor=white
[Python-url]: https://nextjs.org/
[React.js]: https://img.shields.io/badge/React-20232A?style=for-the-badge&logo=react&logoColor=61DAFB
[React-url]: https://reactjs.org/
[Vue.js]: https://img.shields.io/badge/Vue.js-35495E?style=for-the-badge&logo=vuedotjs&logoColor=4FC08D
[Vue-url]: https://vuejs.org/
[Angular.io]: https://img.shields.io/badge/Angular-DD0031?style=for-the-badge&logo=angular&logoColor=white
[Angular-url]: https://angular.io/
[Svelte.dev]: https://img.shields.io/badge/Svelte-4A4A55?style=for-the-badge&logo=svelte&logoColor=FF3E00
[Svelte-url]: https://svelte.dev/
[Laravel.com]: https://img.shields.io/badge/Laravel-FF2D20?style=for-the-badge&logo=laravel&logoColor=white
[Laravel-url]: https://laravel.com
[Bootstrap.com]: https://img.shields.io/badge/Bootstrap-563D7C?style=for-the-badge&logo=bootstrap&logoColor=white
[Bootstrap-url]: https://getbootstrap.com
[JQuery.com]: https://img.shields.io/badge/jQuery-0769AD?style=for-the-badge&logo=jquery&logoColor=white
[JQuery-url]: https://jquery.com
[![CC BY 4.0][cc-by-image]][cc-by]
[cc-by]: http://creativecommons.org/licenses/by/4.0/
[cc-by-image]: https://i.creativecommons.org/l/by/4.0/88x31.png
[cc-by-shield]: https://img.shields.io/badge/License-CC%20BY%204.0-lightgrey.svg?style=for-the-badge
| 13,213 | Markdown | 44.723183 | 322 | 0.695603 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/package.xml | <package>
<name>kuka_moveit_configuration</name>
<version>0.3.0</version>
<description>
An automatically generated package with all the configuration and launch files for using the kuka_kr3r540 with the MoveIt! Motion Planning Framework
</description>
<author email="[email protected]">Aytac Kahveci</author>
<maintainer email="[email protected]">Aytac Kahveci</maintainer>
<license>BSD</license>
<url type="website">http://moveit.ros.org/</url>
<url type="bugtracker">https://github.com/ros-planning/moveit/issues</url>
<url type="repository">https://github.com/ros-planning/moveit</url>
<buildtool_depend>catkin</buildtool_depend>
<run_depend>moveit_ros_move_group</run_depend>
<run_depend>moveit_kinematics</run_depend>
<run_depend>moveit_planners_ompl</run_depend>
<run_depend>moveit_ros_visualization</run_depend>
<run_depend>joint_state_publisher</run_depend>
<run_depend>robot_state_publisher</run_depend>
<run_depend>xacro</run_depend>
<!-- This package is referenced in the warehouse launch files, but does not build out of the box at the moment. Commented the dependency until this works. -->
<!-- <run_depend>warehouse_ros_mongo</run_depend> -->
<build_depend>kuka_kr3_support</build_depend>
<run_depend>kuka_kr3_support</run_depend>
</package>
| 1,326 | XML | 39.21212 | 160 | 0.739819 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/launch/kuka_kr6r900sixx_moveit_sensor_manager.launch.xml | <launch>
</launch>
| 20 | XML | 4.249999 | 9 | 0.6 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/launch/sensor_manager.launch.xml | <launch>
<!-- This file makes it easy to include the settings for sensor managers -->
<!-- Params for the octomap monitor -->
<!-- <param name="octomap_frame" type="string" value="some frame in which the robot moves" /> -->
<param name="octomap_resolution" type="double" value="0.025" />
<param name="max_range" type="double" value="5.0" />
<!-- Load the robot specific sensor manager; this sets the moveit_sensor_manager ROS parameter -->
<arg name="moveit_sensor_manager" default="kuka_kr3r540" />
<include file="$(find kuka_moveit_configuration)/launch/$(arg moveit_sensor_manager)_moveit_sensor_manager.launch.xml" />
</launch>
| 657 | XML | 42.866664 | 123 | 0.686454 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/launch/fake_moveit_controller_manager.launch.xml | <launch>
<!-- Set the param that trajectory_execution_manager needs to find the controller plugin -->
<param name="moveit_controller_manager" value="moveit_fake_controller_manager/MoveItFakeControllerManager"/>
<!-- The rest of the params are specific to this plugin -->
<rosparam file="$(find kuka_moveit_configuration)/config/fake_controllers.yaml"/>
</launch>
| 374 | XML | 36.499996 | 110 | 0.745989 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/launch/kuka_kr6r900sixx_moveit_controller_manager.launch.xml | <launch>
<arg name="sim" default="false" />
<arg name="moveit_controller_manager" default="moveit_simple_controller_manager/MoveItSimpleControllerManager"/>
<param name="moveit_controller_manager" value="$(arg moveit_controller_manager)"/>
<!-- load controller_list -->
<rosparam file="$(find kuka_moveit_configuration)/config/controller.yaml"/>
<!-- Load standard kuka controller joint names from YAML file to parameter server -->
<include file="$(find kuka_hw_axis)/launch/trajectory.launch"> </include>
</launch>
| 566 | XML | 46.249996 | 113 | 0.685512 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/launch/trajectory_execution2.launch.xml | <launch>
<!-- This file makes it easy to include the settings for trajectory execution -->
<!-- Flag indicating whether MoveIt! is allowed to load/unload or switch controllers -->
<arg name="moveit_manage_controllers" default="true"/>
<param name="moveit_manage_controllers" value="$(arg moveit_manage_controllers)"/>
<!-- When determining the expected duration of a trajectory, this multiplicative factor is applied to get the allowed duration of execution -->
<param name="trajectory_execution/allowed_execution_duration_scaling" value="1.2"/> <!-- default 1.2 -->
<!-- Allow more than the expected execution time before triggering a trajectory cancel (applied after scaling) -->
<param name="trajectory_execution/allowed_goal_duration_margin" value="0.5"/> <!-- default 0.5 -->
<!-- Allowed joint-value tolerance for validation that trajectory's first point matches current robot state -->
<param name="trajectory_execution/allowed_start_tolerance" value="0.01"/> <!-- default 0.01 -->
<!-- Load the robot specific controller manager; this sets the moveit_controller_manager ROS parameter -->
<arg name="moveit_controller_manager" default="kuka_kr3r540" />
<include file="$(find kuka_moveit_configuration)/launch/$(arg moveit_controller_manager)_moveit_controller_manager2.launch.xml" />
</launch>
| 1,343 | XML | 60.090906 | 145 | 0.72971 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/launch/planning_pipeline.launch.xml | <launch>
<!-- This file makes it easy to include different planning pipelines;
It is assumed that all planning pipelines are named XXX_planning_pipeline.launch -->
<arg name="pipeline" default="ompl" />
<include file="$(find kuka_moveit_configuration)/launch/$(arg pipeline)_planning_pipeline.launch.xml" />
</launch>
| 339 | XML | 29.909088 | 106 | 0.707965 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/launch/warehouse_settings.launch.xml | <launch>
<!-- Set the parameters for the warehouse and run the mongodb server. -->
<!-- The default DB port for moveit (not default MongoDB port to avoid potential conflicts) -->
<arg name="moveit_warehouse_port" default="33829" />
<!-- The default DB host for moveit -->
<arg name="moveit_warehouse_host" default="localhost" />
<!-- Set parameters for the warehouse -->
<param name="warehouse_port" value="$(arg moveit_warehouse_port)"/>
<param name="warehouse_host" value="$(arg moveit_warehouse_host)"/>
<param name="warehouse_exec" value="mongod" />
<param name="warehouse_plugin" value="warehouse_ros_mongo::MongoDatabaseConnection" />
</launch>
| 681 | XML | 39.117645 | 99 | 0.688693 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/launch/ompl_planning_pipeline.launch.xml | <launch>
<!-- OMPL Plugin for MoveIt! -->
<arg name="planning_plugin" value="ompl_interface/OMPLPlanner" />
<!-- The request adapters (plugins) used when planning with OMPL.
ORDER MATTERS -->
<arg name="planning_adapters" value="default_planner_request_adapters/AddTimeParameterization
default_planner_request_adapters/FixWorkspaceBounds
default_planner_request_adapters/FixStartStateBounds
default_planner_request_adapters/FixStartStateCollision
default_planner_request_adapters/FixStartStatePathConstraints" />
<arg name="start_state_max_bounds_error" value="0.1" />
<param name="planning_plugin" value="$(arg planning_plugin)" />
<param name="request_adapters" value="$(arg planning_adapters)" />
<param name="start_state_max_bounds_error" value="$(arg start_state_max_bounds_error)" />
<rosparam command="load" file="$(find kuka_moveit_configuration)/config/ompl_planning.yaml"/>
</launch>
| 970 | XML | 41.21739 | 95 | 0.714433 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/config/simple_moveit_controllers.yaml | controller_list:
- name: arm_controller
action_ns: follow_joint_trajectory
type: FollowJointTrajectory
default: True
joints:
- joint_a1
- joint_a2
- joint_a3
- joint_a4
- joint_a5
- joint_a6 | 244 | YAML | 19.416665 | 38 | 0.590164 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/config/chomp_planning.yaml | planning_time_limit: 10.0
max_iterations: 200
max_iterations_after_collision_free: 5
smoothness_cost_weight: 0.1
obstacle_cost_weight: 1.0
learning_rate: 0.01
smoothness_cost_velocity: 0.0
smoothness_cost_acceleration: 1.0
smoothness_cost_jerk: 0.0
ridge_factor: 0.0
use_pseudo_inverse: false
pseudo_inverse_ridge_factor: 1e-4
joint_update_limit: 0.1
collision_clearance: 0.2
collision_threshold: 0.07
use_stochastic_descent: true
enable_failure_recovery: false
max_recovery_attempts: 5
| 487 | YAML | 24.684209 | 38 | 0.794661 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/config/stomp_planning.yaml | stomp/arm:
group_name: arm
optimization:
num_timesteps: 60
num_iterations: 40
num_iterations_after_valid: 0
num_rollouts: 30
max_rollouts: 30
initialization_method: 1 # [1 : LINEAR_INTERPOLATION, 2 : CUBIC_POLYNOMIAL, 3 : MININUM_CONTROL_COST]
control_cost_weight: 0.0
task:
noise_generator:
- class: stomp_moveit/NormalDistributionSampling
stddev: [0.05, 0.05, 0.05, 0.05, 0.05, 0.05]
cost_functions:
- class: stomp_moveit/CollisionCheck
collision_penalty: 1.0
cost_weight: 1.0
kernel_window_percentage: 0.2
longest_valid_joint_move: 0.05
noisy_filters:
- class: stomp_moveit/JointLimits
lock_start: True
lock_goal: True
- class: stomp_moveit/MultiTrajectoryVisualization
line_width: 0.02
rgb: [255, 255, 0]
marker_array_topic: stomp_trajectories
marker_namespace: noisy
update_filters:
- class: stomp_moveit/PolynomialSmoother
poly_order: 6
- class: stomp_moveit/TrajectoryVisualization
line_width: 0.05
rgb: [0, 191, 255]
error_rgb: [255, 0, 0]
publish_intermediate: True
marker_topic: stomp_trajectory
marker_namespace: optimized | 1,264 | YAML | 31.435897 | 106 | 0.631329 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/config/kinematics.yaml | arm:
kinematics_solver: kdl_kinematics_plugin/KDLKinematicsPlugin
kinematics_solver_search_resolution: 0.005
kinematics_solver_timeout: 0.005 | 147 | YAML | 35.999991 | 62 | 0.816327 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/config/gazebo_controllers.yaml | # Publish joint_states
joint_state_controller:
type: joint_state_controller/JointStateController
publish_rate: 100
| 119 | YAML | 22.999995 | 51 | 0.806723 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/config/joint_limits.yaml | # joint_limits.yaml allows the dynamics properties specified in the URDF to be overwritten or augmented as needed
# For beginners, we downscale velocity and acceleration limits.
# You can always specify higher scaling factors (<= 1.0) in your motion requests. # Increase the values below to 1.0 to always move at maximum speed.
default_velocity_scaling_factor: 0.1
default_acceleration_scaling_factor: 0.1
# Specific joint properties can be changed with the keys [max_position, min_position, max_velocity, max_acceleration]
# Joint limits can be turned off with [has_velocity_limits, has_acceleration_limits]
joint_limits:
joint_a1:
has_velocity_limits: true
max_velocity: 9.250245035569947
has_acceleration_limits: false
max_acceleration: 0
joint_a2:
has_velocity_limits: true
max_velocity: 9.232791743050003
has_acceleration_limits: false
max_acceleration: 0
joint_a3:
has_velocity_limits: true
max_velocity: 9.389871375729493
has_acceleration_limits: false
max_acceleration: 0
joint_a4:
has_velocity_limits: true
max_velocity: 10.47197551196598
has_acceleration_limits: false
max_acceleration: 0
joint_a5:
has_velocity_limits: true
max_velocity: 10.47197551196598
has_acceleration_limits: false
max_acceleration: 0
joint_a6:
has_velocity_limits: true
max_velocity: 13.96263401595464
has_acceleration_limits: false
max_acceleration: 0 | 1,447 | YAML | 35.199999 | 150 | 0.744299 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/config/fake_controllers.yaml | controller_list:
- name: fake_arm_controller
#type: $(arg fake_execution_type)
joints:
- joint_a1
- joint_a2
- joint_a3
- joint_a4
- joint_a5
- joint_a6
#initial: # Define initial robot poses per group
# - group: arm
# pose: down | 280 | YAML | 20.615383 | 49 | 0.575 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/config/ros_controllers.yaml | arm_controller:
type: effort_controllers/JointTrajectoryController
joints:
- joint_a1
- joint_a2
- joint_a3
- joint_a4
- joint_a5
- joint_a6
gains:
joint_a1:
p: 100
d: 1
i: 1
i_clamp: 1
joint_a2:
p: 100
d: 1
i: 1
i_clamp: 1
joint_a3:
p: 100
d: 1
i: 1
i_clamp: 1
joint_a4:
p: 100
d: 1
i: 1
i_clamp: 1
joint_a5:
p: 100
d: 1
i: 1
i_clamp: 1
joint_a6:
p: 100
d: 1
i: 1
i_clamp: 1
#Robot_IP: "192.168.1.15"
Robot_Port: 7000 | 617 | YAML | 13.372093 | 52 | 0.442464 |
AndreiVoica/P10-MAP/src/kuka_moveit_configuration/config/controller.yaml |
controller_joint_names:
- joint_a1
- joint_a2
- joint_a3
- joint_a4
- joint_a5
- joint_a6
#Publish all joint states
joint_state_controller:
type: joint_state_controller/JointStateController
publish_rate: 100
# Joint trajectory controller
position_trajectory_controller:
type: "position_controllers/JointGroupPositionController"
joints:
- joint_a1
- joint_a2
- joint_a3
- joint_a4
- joint_a5
- joint_a6
# Joint trajectory controller
arm_controller:
type: "position_controllers/JointTrajectoryController"
joints:
- joint_a1
- joint_a2
- joint_a3
- joint_a4
- joint_a5
- joint_a6
state_publish_rate: 100 # Defaults to 50
action_monitor_rate: 100 # Defaults to 20
controller_list:
- name: "/move_group/arm_controller"
action_ns: follow_joint_trajectory
type: FollowJointTrajectory
joints:
- joint_a1
- joint_a2
- joint_a3
- joint_a4
- joint_a5
- joint_a6
Robot_Port: 7000
| 1,017 | YAML | 18.576923 | 59 | 0.653884 |
AndreiVoica/P10-MAP/src/kuka_kr4_support/config/opw_parameters_kr4r600.yaml | #
# Parameters for use with IK solvers which support OPW (Ortho-Parallel Wrist)
# kinematic configurations, as described in the paper "An Analytical Solution
# of the Inverse Kinematics Problem of Industrial Serial Manipulators with an
# Ortho-parallel Basis and a Spherical Wrist" by Mathias BrandstΓΆtter, Arthur
# Angerer, and Michael Hofbaur (Proceedings of the Austrian Robotics Workshop
# 2014, 22-23 May, 2014, Linz, Austria).
#
# The moveit_opw_kinematics_plugin package provides such a solver.
#
opw_kinematics_geometric_parameters:
a1: 0.0
a2: -0.02
b: 0.0
c1: 0.33
c2: 0.29
c3: 0.31
c4: 0.075
opw_kinematics_joint_offsets: [0.0, deg(-90.0), 0.0, 0.0, 0.0, 0.0]
opw_kinematics_joint_sign_corrections: [-1, 1, 1, -1, 1, -1]
| 772 | YAML | 35.809522 | 77 | 0.703368 |
AndreiVoica/P10-MAP/src/kuka_ros_open_comm/package.xml | <package format="1">
<name>kuka_ros_open_comm</name>
<version>0.12.0</version>
<description>KUKA ROS Open Communication Library.</description>
<maintainer email="[email protected]">Aytaç Kahveci</maintainer>
<license>BSD</license>
<author>Aytaç Kahveci</author>
<buildtool_depend>catkin</buildtool_depend>
<build_depend>roscpp</build_depend>
<run_depend>roscpp</run_depend>
</package>
| 417 | XML | 23.588234 | 73 | 0.726619 |
AndreiVoica/P10-MAP/src/kuka_ros_open_comm/include/kuka_ros_open_comm/KRLAxis.h | /* @author Aytaç Kahveci */
#ifndef KRLAXIS_H
#define KRLAXIS_H
#include <kuka_ros_open_comm/KRLVariable.h>
#include <vector>
#include <string>
#include <map>
#include <stdexcept>
#include <sstream>
#include <algorithm>
/**
* Represents a Axis Struct variable from the KRL language
*
* @author Aytac Kahveci
*/
class KRLAxis
{
public:
template<typename Out>
void split(const std::string &s, char delim, Out result) {
std::stringstream ss;
ss.str(s);
std::string item;
while (std::getline(ss, item, delim)) {
*(result++) = item;
}
}
std::vector<std::string> split(const std::string &s, char delim) {
std::vector<std::string> elems;
split(s, delim, std::back_inserter(elems));
return elems;
}
static inline std::string <rim(std::string &s) {
s.erase(s.begin(), std::find_if(s.begin(), s.end(), std::not1(std::ptr_fun<int, int>(std::isspace))));
return s;
}
static inline std::string &rtrim(std::string &s) {
s.erase(std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun<int, int>(std::isspace))).base(), s.end());
return s;
}
static inline std::string &trim(std::string &s) {
return ltrim(rtrim(s));
}
private:
std::string name_;
long readTime_;
int id_;
KRLVariable *krl_var_;
std::vector<std::string> nodes_;
public:
std::map<std::string, double> map_;
KRLAxis(){}
KRLAxis(std::string name, std::vector<std::string> nodes = {"A1", "A2", "A3", "A4", "A5", "A6"})
{
krl_var_ = new KRLVariable(name);
id_ = krl_var_->getId();
name_ = krl_var_->getName();
nodes_ = nodes;
for(std::string str : nodes)
{
map_.insert(std::pair<std::string,double>(str, 0.0));
}
}
~KRLAxis()
{
delete krl_var_;
}
std::vector<std::string> getNodes()
{
return nodes_;
}
void setA1ToA6(std::vector<double> values) {
if (values.size() != 6) {
throw std::invalid_argument("The number of values should be exatly 6!");
}
setA1(values[0]);
setA2(values[1]);
setA3(values[2]);
setA4(values[3]);
setA5(values[4]);
setA6(values[5]);
}
void setA1(double d)
{
map_["A1"] = d;
}
void setA2(double d)
{
map_["A2"] = d;
}
void setA3(double d)
{
map_["A3"] = d;
}
void setA4(double d)
{
map_["A4"] = d;
}
void setA5(double d)
{
map_["A5"] = d;
}
void setA6(double d)
{
map_["A6"] = d;
}
/**
* Get a double array representation of this object
*
* @return a new double array with the values contained in this struct
*/
std::vector<double> asArray()
{
std::vector<double> arr;
arr.resize(this->getNodes().size());
for (int i = 0; i < arr.size(); i++)
{
arr[i] = map_[this->getNodes()[i]];
}
return arr;
}
std::vector<double> asArrayA1ToA6()
{
std::vector<double> arr = {map_["A1"], map_["A2"], map_["A3"], map_["A4"], map_["A5"], map_["A6"]};
return arr;
}
void setValue(std::string str, std::string obj)
{
std::string::size_type sz;
double db = std::stod(obj, &sz);
map_[str] = db;
}
std::map<std::string, double> getValue()
{
return map_;
}
std::string getStringValue()
{
std::string sb="";
sb.append("{");
unsigned int i = 0;
for(std::string str : nodes_)
{
if(map_.count(str) > 0)
{
double get = map_[str];
map_.erase(map_.find(str));
sb.append(str).append(" ").append(std::to_string(get));
if(!map_.empty() && (i != map_.size()))
{
sb.append(", ");
}
}
}
sb.append("}");
return sb;
}
void setValueFromString(std::string strValue)
{
std::string substring;
if(strValue.find(":") != std::string::npos)
{
std::vector<std::string> split_ = split(strValue,':');
std::string trim_ = trim(split_[1]);
substring = trim_.substr(0, trim_.find('}'));
}
else
{
std::string trim_ = trim(strValue);
substring = trim_.substr(1, trim_.size() - 1);
}
std::vector<std::string> split1 = split(substring,',');
for(std::string n : split1)
{
trim(n);
std::vector<std::string> split2 = split(n, ' ');
setValue(split2[0], split2[1]);
}
}
void update(int id, std::string strValue, long readTime)
{
if( id_ != id)
{
throw std::runtime_error("The returned id does not match the variable id! Should not happen...");
}
readTime_ = readTime;
setValueFromString(strValue);
}
std::vector<unsigned char> getReadCommand()
{
return krl_var_->getReadCommand();
}
std::vector<unsigned char> getWriteCommand()
{
return krl_var_->getWriteCommand(getStringValue());
}
};
#endif
| 5,401 | C | 22.284483 | 118 | 0.495094 |
AndreiVoica/P10-MAP/src/kuka_ros_open_comm/include/kuka_ros_open_comm/KRLVariable.h | /* @author Aytaç Kahveci */
#ifndef KRLVARIABLE_H
#define KRLVARIABLE_H
#include <string>
#include <atomic>
#include <vector>
#include <stdexcept>
class KRLVariable
{
public:
int id_;
std::string name_;
long readTime_ = -1;
public:
KRLVariable(){}
KRLVariable(std::string name)
{
static std::atomic<std::uint32_t> atomicInt { 0 };
name_ = name;
id_ = atomicInt.fetch_add(1, std::memory_order_relaxed);
}
~KRLVariable(){}
int getId()
{
return id_;
}
std::string getName()
{
return name_;
}
long getReadTimeNano()
{
return readTime_;
}
long getReadTimeMillis()
{
return readTime_ / 1000000;
}
double getReadTimeSec()
{
return ((double) readTime_ / 1000000000);
}
std::vector<unsigned char> getReadCommand()
{
std::vector<unsigned char> cmd(name_.c_str(), name_.c_str() + name_.size());
std::vector<unsigned char> header;
std::vector<unsigned char> block;
int varnamelen = cmd.size();
unsigned char hbyte, lbyte;
hbyte = (varnamelen & 0xff00) >> 8;
lbyte = (varnamelen & 0x00ff);
block.push_back(0);
block.push_back(hbyte);
block.push_back(lbyte);
block.insert(block.end(),cmd.begin(),cmd.end());
int blocklength = block.size();
hbyte = ((blocklength & 0xff00) >> 8);
lbyte = (blocklength & 0x00ff);
unsigned char hbytemsg = ((id_ & 0xff00) >> 8);
unsigned char lbytemsg = (id_ & 0x00ff);
header.push_back(hbytemsg);
header.push_back(lbytemsg);
header.push_back(hbyte);
header.push_back(lbyte);
block.insert(block.begin(), header.begin(), header.end());
return block;
}
/*
* The write command. This is what's actually beeing sent to the robot.
* It's a implementation of the OpenShowVar c++ source
* @return the write command
*/
std::vector<unsigned char> getWriteCommand(std::string val)
{
std::vector<unsigned char> cmd(name_.c_str(), name_.c_str() + name_.size());
std::vector<unsigned char> value(val.c_str(), val.c_str() + val.size());
std::vector<unsigned char> header;
std::vector<unsigned char> block;
int varnamelen = cmd.size();
unsigned char hbyte, lbyte;
hbyte = (varnamelen & 0xff00) >> 8;
lbyte = (varnamelen & 0x00ff);
block.push_back((unsigned char) 1);
block.push_back(hbyte);
block.push_back(lbyte);
block.insert(block.end(),cmd.begin(),cmd.end());
int vallen = value.size();
hbyte = (vallen & 0xff00) >> 8;
lbyte = (vallen & 0x00ff);
block.push_back(hbyte);
block.push_back(lbyte);
block.insert(block.end(),value.begin(),value.end());
int blocklength = block.size();
hbyte = ((blocklength & 0xff00) >> 8);
lbyte = (blocklength & 0x00ff);
unsigned char hbytemsg = (id_ & 0xff00) >> 8;
unsigned char lbytemsg = (id_ & 0x00ff);
header.push_back(hbytemsg);
header.push_back(lbytemsg);
header.push_back(hbyte);
header.push_back(lbyte);
block.insert(block.begin(), header.begin(), header.end());
return block;
}
};
#endif
| 3,431 | C | 23.340425 | 85 | 0.556689 |
AndreiVoica/P10-MAP/src/kuka_ros_open_comm/include/kuka_ros_open_comm/KRLE6Pos.h | /* @author Aytaç Kahveci */
#ifndef KRLE6POS_H
#define KRLE6POS_H
#include <kuka_ros_open_comm/KRLFrame.h>
#include <kuka_ros_open_comm/KRLPos.h>
#include <vector>
#include <string>
#include <stdexcept>
class KRLE6Pos : public KRLPos
{
public:
KRLE6Pos(){}
KRLE6Pos(std::string name, std::vector<std::string> nodes = {"X", "Y", "Z", "A", "B", "C", "E1", "E2", "E3", "E4", "E5", "E6", "S", "T"})
:KRLPos(name, nodes)
{
}
~KRLE6Pos(){}
void setE1ToE6(std::vector<double> values)
{
if(values.size() != 6)
{
throw std::invalid_argument("The number of values should be exactly 6!");
}
setE1(values[0]);
setE2(values[1]);
setE3(values[2]);
setE4(values[3]);
setE5(values[4]);
setE6(values[5]);
}
double getE1()
{
return map_["E1"];
}
double getE2()
{
return map_["E2"];
}
double getE3()
{
return map_["E3"];
}
double getE4()
{
return map_["E4"];
}
double getE5()
{
return map_["E5"];
}
double getE6()
{
return map_["E6"];
}
KRLE6Pos setE1(double d)
{
map_.at(getNodes()[6]) = d;
return *this;
}
KRLE6Pos setE2(double d)
{
map_.at(getNodes()[7]) = d;
return *this;
}
KRLE6Pos setE3(double d)
{
map_.at(getNodes()[8]) = d;
return *this;
}
KRLE6Pos setE4(double d)
{
map_.at(getNodes()[9]) = d;
return *this;
}
KRLE6Pos setE5(double d)
{
map_.at(getNodes()[10]) = d;
return *this;
}
KRLE6Pos setE6(double d)
{
map_.at(getNodes()[11]) = d;
return *this;
}
std::vector<double> asArrayE1ToE6()
{
std::vector<double> arr_ = {map_["E1"], map_["E2"],map_["E3"],map_["E4"],map_["E5"],map_["E6"]};
return arr_;
}
};
#endif
| 1,970 | C | 15.991379 | 141 | 0.479695 |
AndreiVoica/P10-MAP/src/kuka_ros_open_comm/include/kuka_ros_open_comm/kuka_client.h | /* @author Aytaç Kahveci */
#ifndef KUKA_ClIENT_H
#define KUKA_ClIENT_H
#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <stdlib.h>
#include <netinet/in.h>
#include <netdb.h>
#include <iostream>
#include <chrono>
#include <stdio.h>
#include <string>
#include <string.h>
#include <ros/ros.h>
#include <netinet/tcp.h>
namespace kuka_hw_interface
{
class kukaClient
{
public:
kukaClient(){}
kukaClient(char* host, int port)
{
sockfd_ = socket(AF_INET, SOCK_STREAM, 0);
if(sockfd_ < 0)
{
ROS_ERROR("ERROR creating a socket");
}
server = gethostbyname(host);
if(server == NULL)
{
ROS_ERROR("ERROR No such host");
}
memset((char*)&serverAddr_, 0, sizeof(serverAddr_));
memcpy((char*)&serverAddr_.sin_addr.s_addr, (char*)server->h_addr, server->h_length);
serverAddr_.sin_port = htons(port);
serverAddr_.sin_family = AF_INET;
if((connect(sockfd_, (struct sockaddr*)&serverAddr_, sizeof(serverAddr_))) < 0)
{
ROS_ERROR("ERROR connecting to the server");
}
}
~kukaClient()
{
close(sockfd_);
}
template <class T>
void readVariable(T *var)
{
std::vector<unsigned char> buffer = var->getReadCommand();
std::vector<unsigned char>::iterator next = buffer.begin();
while (next != buffer.end())
{
int n = send(sockfd_, &(*next), std::distance(next, buffer.end()), 0);
if (n == -1)
{
ROS_ERROR_STREAM("ERROR in readVariable function");
break; // ERROR
}
next += n;
}
char head[7];
int rec=0;
while(rec < sizeof(head))
{
n = recv(sockfd_, &head, sizeof(head)-rec, 0);
if(n < 0)
{
ROS_ERROR_STREAM("ERROR reading header from server");
}
rec += n;
}
char block[getInt(head,2)-3];
rec=0;
while(rec < sizeof(block))
{
n = recv(sockfd_, &block, sizeof(block)-rec, 0);
if(n < 0)
{
ROS_ERROR_STREAM("ERROR reading block from server");
}
rec += n;
}
std::vector<unsigned char> data;
for(char c : head)
{
data.push_back(c);
}
for(char c : block)
{
data.push_back(c);
}
int id = getInt(head, 0);
std::string strValue = "";
for (int i=0; i<data.size(); i++)
{
strValue += data[7+i];
}
ROS_INFO_STREAM("Received value: "<< strValue);
ROS_INFO_STREAM("Received id: "<< id);
var->update(id, strValue, 1);
}
template <class T>
void writeVariable(T *var)
{
std::vector<unsigned char> buffer = var->getWriteCommand();
std::vector<unsigned char>::iterator it = buffer.begin();
while(it != buffer.end())
{
if((n = send(sockfd_, &(*it), std::distance(it,buffer.end()),0)) < 0)
{
ROS_ERROR_STREAM("ERROR in writeVariable function");
break;
}
it += n;
}
char head[7];
int rec=0;
while(rec < sizeof(head))
{
n = recv(sockfd_, &head, sizeof(head)-rec, 0);
if(n < 0)
{
ROS_ERROR_STREAM("ERROR reading header from server");
}
rec += n;
}
char block[getInt(head,2)-3];
rec=0;
while(rec < sizeof(block))
{
n = recv(sockfd_, &block, sizeof(block)-rec, 0);
if(n < 0)
{
ROS_ERROR_STREAM("ERROR reading block from server");
}
rec += n;
}
std::vector<unsigned char> data;
for(char c : head)
{
data.push_back(c);
}
for(char c : block)
{
data.push_back(c);
}
int id = getInt(head, 0);
std::string strValue = "";
for (int i=0; i<data.size(); i++)
{
strValue += data[7+i];
}
ROS_INFO_STREAM("Received value in writeVariable function: "<< strValue);
ROS_INFO_STREAM("Received id in writeVariable function: "<< id);
}
int getInt(char* bytes, int off)
{
int a = (((bytes[off] << 8) & 0xFF00) | (bytes[off + 1] & 0xFF));
return a;
}
private:
int sockfd_, n;
hostent *server;
sockaddr_in serverAddr_, clientAddr_;
};
}
#endif
| 5,282 | C | 28.679775 | 97 | 0.424839 |
AndreiVoica/P10-MAP/src/kuka_ros_open_comm/include/kuka_ros_open_comm/KRLPos.h | /* @author Aytaç Kahveci */
#ifndef KRLPOS_H
#define KRLPOS_H
#include <kuka_ros_open_comm/KRLFrame.h>
#include <vector>
#include <string>
/**
* Represents a Real variable from the KRL language
*/
class KRLPos : public KRLFrame
{
public:
KRLPos(){}
KRLPos(std::string name, std::vector<std::string> nodes = {"X", "Y", "Z", "A", "B", "C", "S", "T"})
: KRLFrame(name, nodes)
{
}
~KRLPos(){}
double getS()
{
return map_["S"];
}
double getT()
{
return map_["T"];
}
void setS(double d)
{
map_.at("S") = d;
}
void setT(double d)
{
map_.at("T") = d;
}
std::vector<double> asArrayXToC()
{
std::vector<double> arr = {map_["X"], map_["Y"], map_["Z"],map_["A"], map_["B"], map_["C"]};
return arr;
}
};
#endif
| 846 | C | 14.125 | 103 | 0.49409 |
AndreiVoica/P10-MAP/src/kuka_ros_open_comm/include/kuka_ros_open_comm/KRLInt.h | /* @author Aytaç Kahveci */
#ifndef KRLINT_H
#define KRLINT_H
#include <kuka_ros_open_comm/KRLVariable.h>
#include <string>
class KRLInt
{
private:
std::string name_;
int id_;
long readTime_;
KRLVariable* krl_var_;
public:
int value_ = (int) NULL;
KRLInt(std::string name)
{
krl_var_ = new KRLVariable(name);
name_ = krl_var_->getName();
id_ = krl_var_->getId();
}
~KRLInt()
{
delete krl_var_;
}
int getValue()
{
return value_;
}
std::string getStringValue()
{
return std::to_string(value_);
}
void setValue(int value)
{
value_ = value;
}
void update(int id, std::string strValue, long readTime)
{
if( id_ != id)
{
throw std::runtime_error("The returned id does not match the variable id! Should not happen...");
}
readTime_ = readTime;
setValueFromString(strValue);
}
std::vector<unsigned char> getReadCommand()
{
return krl_var_->getReadCommand();
}
std::vector<unsigned char> getWriteCommand()
{
return krl_var_->getWriteCommand(getStringValue());
}
private:
void setValueFromString(std::string strValue)
{
std::string::size_type sz;
value_ = std::stoi(strValue, &sz);
}
};
#endif
| 1,356 | C | 17.337838 | 108 | 0.560472 |
AndreiVoica/P10-MAP/src/kuka_ros_open_comm/include/kuka_ros_open_comm/KRLEnum.h | /* @author Aytaç Kahveci */
#ifndef KRLENUM_H
#define KRLENUM_H
#include <kuka_ros_open_comm/KRLVariable.h>
#include <string>
class KRLEnum
{
private:
std::string name_;
int id_;
long readTime_;
KRLVariable* krl_var_;
public:
std::string value_ = (std::string) NULL;
KRLEnum(std::string name)
{
krl_var_ = new KRLVariable(name);
}
~KRLEnum()
{
delete krl_var_;
}
std::string getValue()
{
return value_;
}
std::string getStringValue()
{
return value_;
}
void setValue(std::string value)
{
value_ = value;
}
void update(int id, std::string strValue, long readTime)
{
if( id_ != id)
{
throw std::runtime_error("The returned id does not match the variable id! Should not happen...");
}
readTime_ = readTime;
setValueFromString(strValue);
}
std::vector<unsigned char> getReadCommand()
{
return krl_var_->getReadCommand();
}
std::vector<unsigned char> getWriteCommand()
{
return krl_var_->getWriteCommand(getStringValue());
}
private:
void setValueFromString(std::string strValue)
{
value_ = strValue;
}
};
#endif
| 1,261 | C | 16.774648 | 108 | 0.570975 |
AndreiVoica/P10-MAP/src/kuka_ros_open_comm/include/kuka_ros_open_comm/KRLStruct.h | /* @author Aytaç Kahveci */
#ifndef KRLSTRUCT_H
#define KRLSTRUCT_H
#include <string>
#include <vector>
#include <map>
#include <sstream>
#include <algorithm>
template <class T>
class KRLStruct
{
public:
KRLStruct(){}
KRLStruct(std::vector<std::string> nodes)
{
nodes_ = nodes;
}
~KRLStruct(){}
/**
* The nodes
* @return the name of the variables that this struct contains
*/
std::vector<std::string> getNodes()
{
return nodes_;
}
};
#endif
| 518 | C | 12.307692 | 66 | 0.596525 |
AndreiVoica/P10-MAP/src/kuka_ros_open_comm/include/kuka_ros_open_comm/KRLE6Axis.h | /* @author Aytaç Kahveci */
#ifndef KRLE6AXIS_H
#define KRLE6AXIS_H
#include <kuka_ros_open_comm/KRLAxis.h>
#include <vector>
#include <string>
#include <stdexcept>
/**
* Represents a E6Axis struct variable from the KRL language
*
* @author Aytac Kahveci
*/
class KRLE6Axis : public KRLAxis
{
public:
KRLE6Axis(){}
KRLE6Axis(std::string name ,std::vector<std::string> nodes = {"A1", "A2", "A3", "A4", "A5", "A6", "E1", "E2", "E3", "E4", "E5", "E6"})
:KRLAxis(name,nodes)
{
}
~KRLE6Axis(){}
void setE1ToE6(std::vector<double> values)
{
if (values.size() != 6)
{
throw std::invalid_argument("The number of values should be exatly 6!");
}
setE1(values[0]);
setE2(values[1]);
setE3(values[2]);
setE4(values[3]);
setE5(values[4]);
setE6(values[5]);
}
void setE1(double d)
{
map_.at(getNodes()[6]) = d;
}
void setE2(double d)
{
map_.at(getNodes()[7]) = d;
}
void setE3(double d)
{
map_.at(getNodes()[8]) = d;
}
void setE4(double d)
{
map_.at(getNodes()[9]) = d;
}
void setE5(double d)
{
map_.at(getNodes()[10]) = d;
}
void setE6(double d)
{
map_.at(getNodes()[11]) = d;
}
std::vector<double> asArrayE1ToE6()
{
std::vector<double> arr {map_["E1"], map_["E2"],map_["E3"],map_["E4"],map_["E5"],map_["E6"]};
return arr;
}
};
#endif
| 1,526 | C | 17.178571 | 138 | 0.509174 |
AndreiVoica/P10-MAP/src/kuka_ros_open_comm/include/kuka_ros_open_comm/KRLFrame.h | /* @author Aytaç Kahveci */
#ifndef KRLFRAME_H
#define KRLFRAME_H
#include <kuka_ros_open_comm/KRLVariable.h>
#include <string>
#include <vector>
#include <map>
#include <stdexcept>
#include <algorithm>
#include <iostream>
#include <sstream>
/**
* Represents a Frame struct variable from the KRL language
*
* @author Aytaç Kahveci
*/
class KRLFrame
{
public:
template<typename Out>
void split(const std::string &s, char delim, Out result) {
std::stringstream ss;
ss.str(s);
std::string item;
while (std::getline(ss, item, delim)) {
*(result++) = item;
}
}
std::vector<std::string> split(const std::string &s, char delim) {
std::vector<std::string> elems;
split(s, delim, std::back_inserter(elems));
return elems;
}
static inline std::string <rim(std::string &s) {
s.erase(s.begin(), std::find_if(s.begin(), s.end(), std::not1(std::ptr_fun<int, int>(std::isspace))));
return s;
}
static inline std::string &rtrim(std::string &s) {
s.erase(std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun<int, int>(std::isspace))).base(), s.end());
return s;
}
static inline std::string &trim(std::string &s) {
return ltrim(rtrim(s));
}
private:
std::string name_;
long readTime_;
int id_;
KRLVariable *krl_variable_;
std::vector<std::string> nodes_;
public:
std::map<std::string, double> map_;
KRLFrame(){}
KRLFrame(std::string name, std::vector<std::string> nodes = {"X", "Y", "Z", "A", "B", "C"})
{
krl_variable_ = new KRLVariable(name);
id_ = krl_variable_->getId();
name_ = krl_variable_->getName();
nodes_ = nodes;
for(std::string str : nodes_)
{
map_.insert(std::pair<std::string,double>(str, 0.0));
}
}
~KRLFrame()
{}
std::vector<std::string> getNodes()
{
return nodes_;
}
double getX() {
return map_["X"];
}
double getY() {
return map_["Y"];
}
double getZ() {
return map_["Z"];
}
double getA() {
return map_["A"];
}
double getB() {
return map_["B"];
}
double getC() {
return map_["C"];
}
KRLFrame setX(double d) {
map_[getNodes()[0]] = d;
return *this;
}
KRLFrame setY(double d) {
map_[getNodes()[1]] = d;
return *this;
}
KRLFrame setZ(double d) {
map_[getNodes()[2]] = d;
return *this;
}
KRLFrame setA(double d) {
map_[getNodes()[3]] = d;
return *this;
}
KRLFrame setB(double d) {
map_[getNodes()[4]] = d;
return *this;
}
KRLFrame setC(double d) {
map_[getNodes()[5]] = d;
return *this;
}
void setXToZ(std::vector<double> values)
{
if(values.size() != 3)
{
throw std::invalid_argument("The number of values should be exatly 3!");
}
setX(values[0]);
setY(values[1]);
setZ(values[2]);
}
void setAToC(std::vector<double> values)
{
if(values.size() != 3)
{
throw std::invalid_argument("The number of values should be exactly 3!");
}
setA(values[0]);
setB(values[1]);
setC(values[2]);
}
std::vector<double> asArray() {
std::vector<double> arr;
arr.resize(getNodes().size());
for (int i = 0; i < arr.size(); i++) {
arr[i] = map_[getNodes()[i]];
}
return arr;
}
std::vector<double> asArrayXToZ() {
std::vector<double> arr = {map_["X"], map_["Y"], map_["Z"]};
return arr;
}
std::vector<double> asArrayAToC() {
std::vector<double> arr = {map_["A"], map_["B"], map_["C"]};
return arr;
}
void setValue(std::string str, std::string obj) {
std::string::size_type sz;
double db = std::stod(obj, &sz);
map_[str] = db;
}
std::map<std::string, double> getValue() {
return map_;
}
std::string getStringValue()
{
std::string sb;
sb.append("{");
unsigned int i = 0;
for(std::string str : nodes_)
{
if(map_.count(str) > 0)
{
double get = map_[str];
map_.erase(map_.find(str));
sb.append(str).append(" ").append(std::to_string(get));
if(!map_.empty() && (i != map_.size()))
{
sb.append(", ");
}
}
}
sb.append("}");
return sb;
}
void setValueFromString(std::string strValue)
{
std::string substring;
if(strValue.find(":") != std::string::npos)
{
std::vector<std::string> split_ = split(strValue,':');
std::string trim_ = trim(split_[1]);
substring = trim_.substr(0, trim_.find('}'));
}
else
{
std::string trim_ = trim(strValue);
substring = trim_.substr(1, trim_.size() - 1);
}
std::vector<std::string> split1 = split(substring,',');
for(std::string n : split1)
{
trim(n);
std::vector<std::string> split2 = split(n, ' ');
setValue(split2[0], split2[1]);
}
}
void update(int id, std::string strValue, long readTime)
{
if( id_ != id)
{
throw std::runtime_error("The returned id does not match the variable id! Should not happen...");
}
readTime_ = readTime;
setValueFromString(strValue);
}
std::vector<unsigned char> getReadCommand()
{
return krl_variable_->getReadCommand();
}
std::vector<unsigned char> getWriteCommand()
{
return krl_variable_->getWriteCommand(getStringValue());
}
};
#endif
| 5,987 | C | 22.030769 | 117 | 0.498079 |
AndreiVoica/P10-MAP/src/kr3_config_pipette/package.xml | <package>
<name>kr3_config_pipette</name>
<version>0.3.0</version>
<description>
An automatically generated package with all the configuration and launch files for using the kuka_kr3r540 with the MoveIt Motion Planning Framework
</description>
<author email="[email protected]">Andrei Voica</author>
<maintainer email="[email protected]">Andrei Voica</maintainer>
<license>BSD</license>
<url type="website">http://moveit.ros.org/</url>
<url type="bugtracker">https://github.com/ros-planning/moveit/issues</url>
<url type="repository">https://github.com/ros-planning/moveit</url>
<buildtool_depend>catkin</buildtool_depend>
<run_depend>moveit_ros_move_group</run_depend>
<run_depend>moveit_fake_controller_manager</run_depend>
<run_depend>moveit_kinematics</run_depend>
<run_depend>moveit_planners</run_depend>
<run_depend>moveit_ros_visualization</run_depend>
<run_depend>moveit_setup_assistant</run_depend>
<run_depend>moveit_simple_controller_manager</run_depend>
<run_depend>joint_state_publisher</run_depend>
<run_depend>joint_state_publisher_gui</run_depend>
<run_depend>robot_state_publisher</run_depend>
<run_depend>rviz</run_depend>
<run_depend>tf2_ros</run_depend>
<run_depend>xacro</run_depend>
<!-- The next 2 packages are required for the gazebo simulation.
We don't include them by default to prevent installing gazebo and all its dependencies. -->
<!-- <run_depend>joint_trajectory_controller</run_depend> -->
<!-- <run_depend>gazebo_ros_control</run_depend> -->
<!-- This package is referenced in the warehouse launch files, but does not build out of the box at the moment. Commented the dependency until this works. -->
<!-- <run_depend>warehouse_ros_mongo</run_depend> -->
<run_depend>kuka_kr3_support</run_depend>
</package>
| 1,834 | XML | 42.690475 | 160 | 0.731734 |
AndreiVoica/P10-MAP/src/kr3_config_pipette/launch/fake_moveit_controller_manager.launch.xml | <launch>
<!-- execute the trajectory in 'interpolate' mode or jump to goal position in 'last point' mode -->
<arg name="fake_execution_type" default="interpolate" />
<!-- Set the param that trajectory_execution_manager needs to find the controller plugin -->
<param name="moveit_controller_manager" value="moveit_fake_controller_manager/MoveItFakeControllerManager"/>
<!-- The rest of the params are specific to this plugin -->
<rosparam subst_value="true" file="$(find kr3_config_pipette)/config/fake_controllers.yaml"/>
</launch>
| 548 | XML | 41.230766 | 110 | 0.729927 |
AndreiVoica/P10-MAP/src/kr3_config_pipette/launch/stomp_planning_pipeline.launch.xml | <launch>
<!-- Stomp Plugin for MoveIt -->
<arg name="planning_plugin" value="stomp_moveit/StompPlannerManager" />
<arg name="start_state_max_bounds_error" value="0.1" />
<arg name="jiggle_fraction" value="0.05" />
<!-- The request adapters (plugins) used when planning. ORDER MATTERS! -->
<arg name="planning_adapters"
default="default_planner_request_adapters/LimitMaxCartesianLinkSpeed
default_planner_request_adapters/AddTimeParameterization
default_planner_request_adapters/FixWorkspaceBounds
default_planner_request_adapters/FixStartStateBounds
default_planner_request_adapters/FixStartStateCollision
default_planner_request_adapters/FixStartStatePathConstraints" />
<param name="planning_plugin" value="$(arg planning_plugin)" />
<param name="request_adapters" value="$(arg planning_adapters)" />
<param name="start_state_max_bounds_error" value="$(arg start_state_max_bounds_error)" />
<param name="jiggle_fraction" value="$(arg jiggle_fraction)" />
<rosparam command="load" file="$(find kr3_config_pipette)/config/stomp_planning.yaml"/>
</launch>
| 1,168 | XML | 47.708331 | 91 | 0.699486 |
AndreiVoica/P10-MAP/src/kr3_config_pipette/launch/pilz_industrial_motion_planner_planning_pipeline.launch.xml | <launch>
<!-- The request adapters (plugins) used when planning. ORDER MATTERS! -->
<arg name="planning_adapters" default="" />
<param name="planning_plugin" value="pilz_industrial_motion_planner::CommandPlanner" />
<param name="request_adapters" value="$(arg planning_adapters)" />
<!-- Define default planner (for all groups) -->
<param name="default_planner_config" value="PTP" />
<!-- MoveGroup capabilities to load for this pipeline, append sequence capability -->
<param name="capabilities" value="pilz_industrial_motion_planner/MoveGroupSequenceAction
pilz_industrial_motion_planner/MoveGroupSequenceService" />
</launch>
| 685 | XML | 41.874997 | 95 | 0.691971 |
AndreiVoica/P10-MAP/src/kr3_config_pipette/launch/simple_moveit_controller_manager.launch.xml | <launch>
<!-- Define the MoveIt controller manager plugin to use for trajectory execution -->
<param name="moveit_controller_manager" value="moveit_simple_controller_manager/MoveItSimpleControllerManager" />
<!-- Load controller list to the parameter server -->
<rosparam file="$(find kr3_config_pipette)/config/simple_moveit_controllers.yaml" />
<rosparam file="$(find kr3_config_pipette)/config/ros_controllers.yaml" />
</launch>
| 443 | XML | 48.333328 | 115 | 0.747178 |
AndreiVoica/P10-MAP/src/kr3_config_pipette/launch/ros_control_moveit_controller_manager.launch.xml | <launch>
<!-- Define MoveIt controller manager plugin -->
<param name="moveit_controller_manager" value="moveit_ros_control_interface::MoveItControllerManager" />
</launch>
| 175 | XML | 34.199993 | 105 | 0.754286 |
AndreiVoica/P10-MAP/src/kr3_config_pipette/launch/ompl-chomp_planning_pipeline.launch.xml | <launch>
<!-- load OMPL planning pipeline, but add the CHOMP planning adapter. -->
<include file="$(find kr3_config_pipette)/launch/ompl_planning_pipeline.launch.xml">
<arg name="planning_adapters"
default="default_planner_request_adapters/LimitMaxCartesianLinkSpeed
default_planner_request_adapters/AddTimeParameterization
default_planner_request_adapters/FixWorkspaceBounds
default_planner_request_adapters/FixStartStateBounds
default_planner_request_adapters/FixStartStateCollision
default_planner_request_adapters/FixStartStatePathConstraints
chomp/OptimizerAdapter"
/>
</include>
<!-- load chomp config -->
<rosparam command="load" file="$(find kr3_config_pipette)/config/chomp_planning.yaml" />
<!-- override trajectory_initialization_method: Use OMPL-generated trajectory -->
<param name="trajectory_initialization_method" value="fillTrajectory"/>
</launch>
| 1,020 | XML | 47.619045 | 90 | 0.688235 |
AndreiVoica/P10-MAP/src/kr3_config_pipette/launch/planning_pipeline.launch.xml | <launch>
<!-- This file makes it easy to include different planning pipelines;
It is assumed that all planning pipelines are named XXX_planning_pipeline.launch -->
<arg name="pipeline" default="ompl" />
<include ns="$(arg pipeline)" file="$(dirname)/$(arg pipeline)_planning_pipeline.launch.xml" />
</launch>
| 327 | XML | 28.818179 | 97 | 0.69419 |
AndreiVoica/P10-MAP/src/kr3_config_pipette/launch/trajectory_execution.launch.xml | <launch>
<!-- This file summarizes all settings required for trajectory execution -->
<!-- Define moveit controller manager plugin: fake, simple, or ros_control -->
<arg name="moveit_controller_manager" />
<arg name="fake_execution_type" default="interpolate" />
<!-- Flag indicating whether MoveIt is allowed to load/unload or switch controllers -->
<arg name="moveit_manage_controllers" default="true"/>
<param name="moveit_manage_controllers" value="$(arg moveit_manage_controllers)"/>
<!-- When determining the expected duration of a trajectory, this multiplicative factor is applied to get the allowed duration of execution -->
<param name="trajectory_execution/allowed_execution_duration_scaling" value="1.2"/> <!-- default 1.2 -->
<!-- Allow more than the expected execution time before triggering a trajectory cancel (applied after scaling) -->
<param name="trajectory_execution/allowed_goal_duration_margin" value="0.5"/> <!-- default 0.5 -->
<!-- Allowed joint-value tolerance for validation that trajectory's first point matches current robot state -->
<param name="trajectory_execution/allowed_start_tolerance" value="0.01"/> <!-- default 0.01 -->
<!-- We use pass_all_args=true here to pass fake_execution_type, which is required by fake controllers, but not by real-robot controllers.
As real-robot controller_manager.launch files shouldn't be required to define this argument, we use the trick of passing all args. -->
<include file="$(dirname)/$(arg moveit_controller_manager)_moveit_controller_manager.launch.xml" pass_all_args="true" />
</launch>
| 1,609 | XML | 66.083331 | 145 | 0.73151 |
AndreiVoica/P10-MAP/src/kuka_config_multiple/config/simple_moveit_controllers.yaml | controller_list:
- name: kr3_1_arm_controller
action_ns: follow_joint_trajectory
type: FollowJointTrajectory
default: True
joints:
- kr3_1_joint_a1
- kr3_1_joint_a2
- kr3_1_joint_a3
- kr3_1_joint_a4
- kr3_1_joint_a5
- kr3_1_joint_a6
- name: kr3_1_hand_controller
action_ns: follow_joint_trajectory
type: FollowJointTrajectory
default: True
joints:
- kr3_1_schunk_joint_left
- kr3_1_schunk_joint_right
- name: kr3_2_arm_controller
action_ns: follow_joint_trajectory
type: FollowJointTrajectory
default: True
joints:
- kr3_2_joint_a1
- kr3_2_joint_a2
- kr3_2_joint_a3
- kr3_2_joint_a4
- kr3_2_joint_a5
- kr3_2_joint_a6
- name: kr3_2_hand_controller
action_ns: follow_joint_trajectory
type: FollowJointTrajectory
default: True
joints:
- kr3_2_schunk_joint_left
- kr3_2_schunk_joint_right
- name: kr3_3_arm_controller
action_ns: follow_joint_trajectory
type: FollowJointTrajectory
default: True
joints:
- kr3_3_joint_a1
- kr3_3_joint_a2
- kr3_3_joint_a3
- kr3_3_joint_a4
- kr3_3_joint_a5
- kr3_3_joint_a6
- name: kr3_4_arm_controller
action_ns: follow_joint_trajectory
type: FollowJointTrajectory
default: True
joints:
- kr3_4_joint_a1
- kr3_4_joint_a2
- kr3_4_joint_a3
- kr3_4_joint_a4
- kr3_4_joint_a5
- kr3_4_joint_a6
- name: kr4_5_arm_controller
action_ns: follow_joint_trajectory
type: FollowJointTrajectory
default: True
joints:
- kr4_5_joint_a1
- kr4_5_joint_a2
- kr4_5_joint_a3
- kr4_5_joint_a4
- kr4_5_joint_a5
- kr4_5_joint_a6
- name: kr4_5_hand_controller
action_ns: follow_joint_trajectory
type: FollowJointTrajectory
default: True
joints:
- kr4_5_schunk_joint_left
- kr4_5_schunk_joint_right | 1,960 | YAML | 24.467532 | 38 | 0.612245 |
AndreiVoica/P10-MAP/src/kuka_config_multiple/config/kinematics.yaml | kr3_1_arm:
kinematics_solver: kdl_kinematics_plugin/KDLKinematicsPlugin
kinematics_solver_search_resolution: 0.005
kinematics_solver_timeout: 0.005
kr3_2_arm:
kinematics_solver: kdl_kinematics_plugin/KDLKinematicsPlugin
kinematics_solver_search_resolution: 0.005
kinematics_solver_timeout: 0.005
kr3_3_arm:
kinematics_solver: kdl_kinematics_plugin/KDLKinematicsPlugin
kinematics_solver_search_resolution: 0.005
kinematics_solver_timeout: 0.005
kr3_4_arm:
kinematics_solver: kdl_kinematics_plugin/KDLKinematicsPlugin
kinematics_solver_search_resolution: 0.005
kinematics_solver_timeout: 0.005
kr4_5_arm:
kinematics_solver: kdl_kinematics_plugin/KDLKinematicsPlugin
kinematics_solver_search_resolution: 0.005
kinematics_solver_timeout: 0.005 | 769 | YAML | 37.499998 | 62 | 0.806242 |
AndreiVoica/P10-MAP/src/kuka_config_multiple/config/fake_controllers.yaml | controller_list:
- name: fake_kr3_1_arm_controller
type: $(arg fake_execution_type)
joints:
- kr3_1_joint_a1
- kr3_1_joint_a2
- kr3_1_joint_a3
- kr3_1_joint_a4
- kr3_1_joint_a5
- kr3_1_joint_a6
- name: fake_kr3_1_hand_controller
type: $(arg fake_execution_type)
joints:
- kr3_1_schunk_joint_left
- kr3_1_schunk_joint_right
- name: fake_kr3_2_arm_controller
type: $(arg fake_execution_type)
joints:
- kr3_2_joint_a1
- kr3_2_joint_a2
- kr3_2_joint_a3
- kr3_2_joint_a4
- kr3_2_joint_a5
- kr3_2_joint_a6
- name: fake_kr3_2_hand_controller
type: $(arg fake_execution_type)
joints:
- kr3_2_schunk_joint_left
- kr3_2_schunk_joint_right
- name: fake_kr3_3_arm_controller
type: $(arg fake_execution_type)
joints:
- kr3_3_joint_a1
- kr3_3_joint_a2
- kr3_3_joint_a3
- kr3_3_joint_a4
- kr3_3_joint_a5
- kr3_3_joint_a6
- name: fake_kr3_3_hand_controller
type: $(arg fake_execution_type)
joints:
[]
- name: fake_kr3_4_arm_controller
type: $(arg fake_execution_type)
joints:
- kr3_4_joint_a1
- kr3_4_joint_a2
- kr3_4_joint_a3
- kr3_4_joint_a4
- kr3_4_joint_a5
- kr3_4_joint_a6
- name: fake_kr3_4_hand_controller
type: $(arg fake_execution_type)
joints:
[]
- name: fake_kr4_5_arm_controller
type: $(arg fake_execution_type)
joints:
- kr4_5_joint_a1
- kr4_5_joint_a2
- kr4_5_joint_a3
- kr4_5_joint_a4
- kr4_5_joint_a5
- kr4_5_joint_a6
- name: fake_kr4_5_hand_controller
type: $(arg fake_execution_type)
joints:
- kr4_5_schunk_joint_left
- kr4_5_schunk_joint_right
initial: # Define initial robot poses per group
- group: kr3_1_hand
pose: kr3_1_open
- group: kr3_2_hand
pose: kr3_2_open
- group: kr4_5_hand
pose: kr4_5_open | 1,952 | YAML | 24.697368 | 48 | 0.578381 |
AndreiVoica/P10-MAP/src/kuka_config_multiple/config/ros_controllers.yaml | kr3_1_arm_controller:
type: effort_controllers/JointTrajectoryController
joints:
- kr3_1_joint_a1
- kr3_1_joint_a2
- kr3_1_joint_a3
- kr3_1_joint_a4
- kr3_1_joint_a5
- kr3_1_joint_a6
gains:
kr3_1_joint_a1:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_1_joint_a2:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_1_joint_a3:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_1_joint_a4:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_1_joint_a5:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_1_joint_a6:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_1_hand_controller:
type: effort_controllers/JointTrajectoryController
joints:
- kr3_1_schunk_joint_left
- kr3_1_schunk_joint_right
gains:
kr3_1_schunk_joint_left:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_1_schunk_joint_right:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_2_arm_controller:
type: effort_controllers/JointTrajectoryController
joints:
- kr3_2_joint_a1
- kr3_2_joint_a2
- kr3_2_joint_a3
- kr3_2_joint_a4
- kr3_2_joint_a5
- kr3_2_joint_a6
gains:
kr3_2_joint_a1:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_2_joint_a2:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_2_joint_a3:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_2_joint_a4:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_2_joint_a5:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_2_joint_a6:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_2_hand_controller:
type: effort_controllers/JointTrajectoryController
joints:
- kr3_2_schunk_joint_left
- kr3_2_schunk_joint_right
gains:
kr3_2_schunk_joint_left:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_2_schunk_joint_right:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_3_arm_controller:
type: effort_controllers/JointTrajectoryController
joints:
- kr3_3_joint_a1
- kr3_3_joint_a2
- kr3_3_joint_a3
- kr3_3_joint_a4
- kr3_3_joint_a5
- kr3_3_joint_a6
gains:
kr3_3_joint_a1:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_3_joint_a2:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_3_joint_a3:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_3_joint_a4:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_3_joint_a5:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_3_joint_a6:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_4_arm_controller:
type: effort_controllers/JointTrajectoryController
joints:
- kr3_4_joint_a1
- kr3_4_joint_a2
- kr3_4_joint_a3
- kr3_4_joint_a4
- kr3_4_joint_a5
- kr3_4_joint_a6
gains:
kr3_4_joint_a1:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_4_joint_a2:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_4_joint_a3:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_4_joint_a4:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_4_joint_a5:
p: 100
d: 1
i: 1
i_clamp: 1
kr3_4_joint_a6:
p: 100
d: 1
i: 1
i_clamp: 1
kr4_5_arm_controller:
type: effort_controllers/JointTrajectoryController
joints:
- kr4_5_joint_a1
- kr4_5_joint_a2
- kr4_5_joint_a3
- kr4_5_joint_a4
- kr4_5_joint_a5
- kr4_5_joint_a6
gains:
kr4_5_joint_a1:
p: 100
d: 1
i: 1
i_clamp: 1
kr4_5_joint_a2:
p: 100
d: 1
i: 1
i_clamp: 1
kr4_5_joint_a3:
p: 100
d: 1
i: 1
i_clamp: 1
kr4_5_joint_a4:
p: 100
d: 1
i: 1
i_clamp: 1
kr4_5_joint_a5:
p: 100
d: 1
i: 1
i_clamp: 1
kr4_5_joint_a6:
p: 100
d: 1
i: 1
i_clamp: 1
kr4_5_hand_controller:
type: effort_controllers/JointTrajectoryController
joints:
- kr4_5_schunk_joint_left
- kr4_5_schunk_joint_right
gains:
kr4_5_schunk_joint_left:
p: 100
d: 1
i: 1
i_clamp: 1
kr4_5_schunk_joint_right:
p: 100
d: 1
i: 1
i_clamp: 1 | 4,216 | YAML | 16.004032 | 52 | 0.481262 |
AndreiVoica/P10-MAP/src/kuka_hw_axis/README.md | kuka_hw_axis package allows controlling Kuka robot in joint space.
| 68 | Markdown | 33.499983 | 67 | 0.808824 |
AndreiVoica/P10-MAP/src/kuka_hw_axis/src/kuka_hw_axis_interface_main.cpp | /* @author Aytaç Kahveci */
#include <kuka_hw_axis/kuka_hardware_interface.h>
#include <unistd.h>
int main(int argc, char **argv)
{
ROS_INFO_STREAM_NAMED("hardware_axis_interface", "Starting hardware interface...");
ros::init(argc, argv, "kuka_hardware_axis_interface");
ros::AsyncSpinner spinner(1);
spinner.start();
ros::NodeHandle nh;
kuka_hw_interface::kukaHardwareInterface robot;
ros::Time timestamp;
timestamp = ros::Time::now();
ros::Rate loop_rate(10);
robot.start();
controller_manager::ControllerManager controller_manager(&robot, nh);
sleep(1);
while(ros::ok())
{
ros::Duration period = ros::Time::now() - timestamp;
robot.read();
timestamp = ros::Time::now();
controller_manager.update(timestamp, period);
robot.write();
//usleep(100);
loop_rate.sleep();
}
spinner.stop();
ROS_INFO_STREAM_NAMED("hardware_axis_interface", "Shutting down.");
return 0;
}
| 1,007 | C++ | 21.4 | 87 | 0.622642 |
AndreiVoica/P10-MAP/src/kuka_hw_axis/src/kuka_hw_interface_axis.cpp | /* @author Aytaç Kahveci */
#include <kuka_hw_axis/kuka_hardware_interface.h>
#include <math.h>
#define PI 3.14159
#include <stdexcept>
namespace kuka_hw_interface
{
kukaHardwareInterface::kukaHardwareInterface()
{
pos_ = new double[6]{0, 0, 0, 0, 0, 0};
vel_ = new double[6]{0, 0, 0, 0, 0, 0};
eff_ = new double[6]{0, 0, 0, 0, 0, 0};
cmd_pos_ = new double[6]{0, 0, 0, 0, 0, 0};
last_cmd_pos_ = new double[6]{0, 0, 0, 0, 0, 0};
cmd_vel_ = new double[6]{0, 0, 0, 0, 0, 0};
cmd_eff_ = new double[6]{0, 0, 0, 0, 0, 0};
registerInterface(&joint_state_interface_);
registerInterface(&position_joint_interface_);
if(!nh_.getParam("controller_joint_names", joint_names_))
{
ROS_ERROR("Couldn't find required parameter 'controller_joint_names' on the parameter server.");
throw std::runtime_error("Couldn't find required parameter 'controller_joint_names' on the parameter server.");
}
for(size_t i=0; i<n_dof_; ++i)
{
joint_state_interface_.registerHandle(hardware_interface::JointStateHandle(joint_names_[i], &pos_[i], &vel_[i], &eff_[i]));
position_joint_interface_.registerHandle(hardware_interface::JointHandle(joint_state_interface_.getHandle(joint_names_[i]), &cmd_pos_[i]));
}
ROS_INFO_STREAM_NAMED("hardware_interface", "Loaded kuka_hardware_interface");
}
kukaHardwareInterface::~kukaHardwareInterface()
{
delete [] pos_;
delete [] vel_;
delete [] eff_;
delete [] cmd_pos_;
delete [] cmd_vel_;
delete [] cmd_eff_;
delete axisAct;
delete myAxis;
}
void kukaHardwareInterface::read()
{
client_->readVariable<KRLE6Axis>(axisAct);
for(size_t i=0; i<n_dof_; i++)
{
pos_[i] = axisAct->asArrayA1ToA6()[i]*PI/180;
}
}
void kukaHardwareInterface::write()
{
bool changes_pos=false;
for(std::size_t i=0; i<n_dof_; i++)
{
if(last_cmd_pos_[i]!=cmd_pos_[i])
{
last_cmd_pos_[i]= cmd_pos_[i];
changes_pos = true;
}
}
if(changes_pos)
{
myAxis->setA1ToA6({cmd_pos_[0]*180/PI,cmd_pos_[1]*180/PI,cmd_pos_[2]*180/PI,cmd_pos_[3]*180/PI,cmd_pos_[4]*180/PI,cmd_pos_[5]*180/PI});
client_->writeVariable<KRLAxis>(myAxis);
ROS_INFO_STREAM("POSSENDED "<< 0<<": "<< cmd_pos_[0]);
ROS_INFO("SEND POS!");
}
}
void kukaHardwareInterface::start()
{
std::string host_;
if(!nh_.getParam("Robot_IP",host_))
{
ROS_ERROR_STREAM("Couldn't find required parameter 'Robot_IP' on the parameter server");
throw std::runtime_error("Couldn't find required parameter 'Robot_IP' on the parameter server");
}
hostName_ = new char[host_.length() + 1];
strcpy(hostName_,host_.c_str());
if(!nh_.getParam("Robot_Port",port_))
{
ROS_ERROR_STREAM("Couldn't find required parameter 'Robot_Port' on the parameter server");
throw std::runtime_error("Couldn't find required parameter 'Robot_Port' on the parameter server");
}
client_ = new kukaClient(hostName_,port_);
ROS_INFO_STREAM_NAMED("kuka_hardware_interface", "Got connection from robot");
axisAct = new KRLE6Axis("$AXIS_ACT",{"A1", "A2", "A3", "A4", "A5", "A6", "E1", "E2", "E3", "E4", "E5", "E6"});
myAxis = new KRLAxis("MYAXIS");
}
}
| 3,637 | C++ | 35.38 | 151 | 0.550179 |
AndreiVoica/P10-MAP/src/kuka_hw_axis/include/kuka_hw_axis/kuka_hardware_interface.h | /* @author Aytaç Kahveci */
#ifndef KUKA_HARDWARE_INTERFACE_H
#define KUKA_HARDWARE_INTERFACE_H
#include <vector>
#include <string>
//ROS
#include <ros/ros.h>
//ros_control
#include <hardware_interface/joint_command_interface.h>
#include <hardware_interface/joint_state_interface.h>
#include <hardware_interface/robot_hw.h>
#include <realtime_tools/realtime_publisher.h>
#include <controller_manager/controller_manager.h>
#include <kuka_ros_open_comm/KRLAxis.h>
#include <kuka_ros_open_comm/KRLE6Axis.h>
//Timers
#include <chrono>
//KUKA CrossCommClient
#include <kuka_ros_open_comm/kuka_client.h>
namespace kuka_hw_interface
{
class kukaHardwareInterface : public hardware_interface::RobotHW
{
private:
ros::NodeHandle nh_;
unsigned int n_dof_ = 6;
std::vector<std::string> joint_names_;
double *pos_;
double *vel_;
double *eff_;
double *cmd_pos_;
double *last_cmd_pos_;
double *cmd_vel_;
double *cmd_eff_;
hardware_interface::JointStateInterface joint_state_interface_;
hardware_interface::PositionJointInterface position_joint_interface_;
ros::Duration control_period_;
double loop_hz_;
kukaClient* client_;
char *hostName_;
int port_;
public:
kukaHardwareInterface();
~kukaHardwareInterface();
void read();
void write();
void start();
KRLE6Axis *axisAct;
KRLAxis *myAxis;
};
} // namespace kuka_hw_interface
#endif
| 1,549 | C | 20.830986 | 77 | 0.649451 |
AndreiVoica/P10-MAP/src/kuka_hw_axis/config/hardware_controllers.yaml | #Publish all joint states
joint_state_controller:
type: joint_state_controller/JointStateController
publish_rate: 100
# Joint trajectory controller
position_trajectory_controller:
type: "position_controllers/JointGroupPositionController"
joints:
- joint_a1
- joint_a2
- joint_a3
- joint_a4
- joint_a5
- joint_a6
# Joint trajectory controller
arm_controller:
type: "position_controllers/JointTrajectoryController"
joints:
- joint_a1
- joint_a2
- joint_a3
- joint_a4
- joint_a5
- joint_a6
#state_publish_rate: 50 # Defaults to 50
#action_monitor_rate: 20 # Defaults to 20
Robot_IP: "192.168.1.15"
Robot_Port: 7000 #7001
| 690 | YAML | 19.939393 | 59 | 0.694203 |
AndreiVoica/P10-MAP/src/kuka_hw_axis/config/controller_joint_names.yaml | controller_joint_names:
- joint_a1
- joint_a2
- joint_a3
- joint_a4
- joint_a5
- joint_a6
| 114 | YAML | 13.374998 | 23 | 0.54386 |
AndreiVoica/P10-MAP/src/kuka_hw_axis/config/hardware_controllers2.yaml | #Publish all joint states
joint_state_controller:
type: joint_state_controller/JointStateController
publish_rate: 100
# Joint trajectory controller
position_trajectory_controller:
type: "position_controllers/JointGroupPositionController"
joints:
- kr3_2_joint_a1
- kr3_2_joint_a2
- kr3_2_joint_a3
- kr3_2_joint_a4
- kr3_2_joint_a5
- kr3_2_joint_a6
# Joint trajectory controller
arm_controller:
type: "position_controllers/JointTrajectoryController"
joints:
- kr3_2_joint_a1
- kr3_2_joint_a2
- kr3_2_joint_a3
- kr3_2_joint_a4
- kr3_2_joint_a5
- kr3_2_joint_a6
#state_publish_rate: 50 # Defaults to 50
#action_monitor_rate: 20 # Defaults to 20
# Robot_IP: "192.168.1.12"
Robot_Port: 7000 #7001
| 764 | YAML | 22.181818 | 59 | 0.689791 |
AndreiVoica/P10-MAP/src/isaac_moveit/scripts/kuka_combined_joints_publisher.py | #!/usr/bin/env python
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import sys
import copy
import time
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
from geometry_msgs.msg import Pose, PoseArray, Quaternion
import math
from math import pi, tau, dist, fabs, cos
from moveit_commander.conversions import pose_to_list
from sensor_msgs.msg import JointState
from std_msgs.msg import String
from tf.transformations import euler_from_quaternion, quaternion_from_euler
# import dynamic_reconfigure.client
class kuka_combined_joints_publisher:
def __init__(self):
self.joints_dict = {}
self.joint_request = JointState()
self.pose_request = Pose()
moveit_commander.roscpp_initialize(sys.argv)
self.robot = moveit_commander.RobotCommander()
self.robot_joints = self.robot.get_joint_names()
self.scene = moveit_commander.PlanningSceneInterface()
# Default group name
self.group_name = "kr3_1_arm"
self.move_group = moveit_commander.MoveGroupCommander(self.group_name)
self.eef_link = self.move_group.get_end_effector_link()
self.move_group.allow_replanning(True)
self.display_trajectory_publisher = rospy.Publisher(
"/move_group/display_planned_path",
moveit_msgs.msg.DisplayTrajectory,
queue_size=20,
)
# # IP reconfiguration TEST
# rospy.init_node('dynamic_reconfigurator', anonymous=True)
# self.client = dynamic_reconfigure.client.Client("/move_group")
# Initialize ROS node
rospy.init_node("kuka_combined_joints_publisher")
# Publisher for joint commands
self.pub = rospy.Publisher("/joint_command", JointState, queue_size=1)
# TBD, publish joints only for selected move group
self.pub_test = rospy.Publisher("/arm_controller/command", JointState, queue_size=1)
# Control from Rviz
rospy.Subscriber("/joint_command_desired", JointState, self.joint_states_callback, queue_size=1)
# Control each robot from Isaac (1st select group, then get joint states)
rospy.Subscriber("/joint_move_group_isaac", String, self.select_move_group, queue_size=1)
rospy.Subscriber("/joint_command_isaac", JointState, self.go_to_joint_states_callback_isaac, queue_size=1)
rospy.Subscriber("/pose_command_isaac", Pose, self.go_to_pose_callback_isaac, queue_size=1)
rospy.Subscriber("/cartesian_path_command_isaac", PoseArray, self.go_to_cartesian_path_callback_isaac, queue_size=10)
# Rviz Control
def joint_states_callback(self, message):
rospy.loginfo("Rviz message: %s", message)
joint_commands = JointState()
joint_commands.header = message.header
for i, name in enumerate(message.name):
# Storing arm joint names and positions
self.joints_dict[name] = message.position[i]
# if name == "joint_left":
# # Adding additional panda_finger_joint2 state info (extra joint used in isaac sim)
# # panda_finger_joint2 mirrors panda_finger_joint1
# joints_dict["joint_right"] = message.position[i]
joint_commands.name = self.joints_dict.keys()
joint_commands.position = self.joints_dict.values()
# Publishing combined message containing all arm and finger joints
self.pub.publish(joint_commands)
self.pub_test.publish(joint_commands)
rospy.loginfo("joint commands Rviz: %s", joint_commands)
return
def select_move_group(self, message):
rospy.loginfo("Robot joints: %s", self.robot_joints)
self.group_name = message.data
self.move_group = moveit_commander.MoveGroupCommander(self.group_name)
self.eef_link = self.move_group.get_end_effector_link()
if self.eef_link == "":
eef_name = self.group_name.split('_')[0] + '_' + self.group_name.split('_')[1]
self.eef_link = eef_name + "_link_6"
rospy.loginfo("End effector link: %s", self.eef_link)
# params = { 'Robot_IP' : '192.168.1.1'}
# config = self.client.update_configuration(params)
rospy.loginfo("Selected move group: %s", self.group_name)
return
def go_to_joint_states_callback_isaac(self, message):
rospy.loginfo("Message topic: %s", message)
# Get current joint positions
joint_goal = self.move_group.get_current_joint_values()
rospy.loginfo("Joint goal1: %s", joint_goal)
# Get requested joint positions
joint_goal = message.position
rospy.loginfo("Joint goal2: %s", joint_goal)
# Go to requested joint positions
self.move_group.go(joint_goal, wait=True)
rospy.loginfo("Joint goal3: %s", joint_goal)
# self.move_group.stop()
# for i, name in enumerate(message.name):
# # Storing arm joint names and positions
# self.joints_dict[name] = joint_goal[i]
# rospy.loginfo("Joint joints dict: %s", self.joints_dict)
# # Creating joint command message
# joint_commands = JointState()
# joint_commands.header = message.header
# joint_commands.name = self.joints_dict.keys()
# joint_commands.position = self.joints_dict.values()
# # Publishing combined message containing all arm and finger joints
# self.pub.publish(joint_commands)
# Clearing joint dictionary
#self.joints_dict = {}
# Variable to test if joint positions are within tolerance
current_joints = self.move_group.get_current_joint_values()
return self.all_close(joint_goal, current_joints, 0.01)
def go_to_cartesian_path_callback_isaac(self, message):
# Set a list of waypoints for the Cartesian path
waypoints = message.poses
rospy.loginfo("Cartesian path waypoints: %s", waypoints)
# Set the start state to the current state
self.move_group.set_start_state_to_current_state()
# Compute the Cartesian path
(plan, fraction) = self.move_group.compute_cartesian_path(waypoints, # waypoint poses
0.02, # eef_step
0.0) # jump_threshold
# Execute the plan
self.move_group.execute(plan, wait=True)
def go_to_pose_callback_isaac(self, message):
target_pose = Pose()
target_pose = message
rospy.loginfo("Target pose: %s", target_pose)
self.move_group.set_pose_target(target_pose, self.eef_link) # Reference from end-effector link (gripper base_link)
# [0,0,0,1] Sets the orientation of the end-effector link to robot base_link (world)
# Pose orientation is given in quaternions with this shape: (w,x,y,z)
self.move_group.go(target_pose, wait=True)
current_joints = self.move_group.get_current_joint_values()
return #self.all_close(target_pose, current_joints, 0.01)
def all_close(self, goal, actual, tolerance):
"""
Convenience method for testing if the values in two lists are within a tolerance of each other.
For Pose and PoseStamped inputs, the angle between the two quaternions is compared (the angle
between the identical orientations q and -q is calculated correctly).
@param: goal A list of floats, a Pose or a PoseStamped
@param: actual A list of floats, a Pose or a PoseStamped
@param: tolerance A float
@returns: bool
"""
if type(goal) is list:
for index in range(len(goal)):
if abs(actual[index] - goal[index]) > tolerance:
return False
elif type(goal) is geometry_msgs.msg.PoseStamped:
return self.all_close(goal.pose, actual.pose, tolerance)
elif type(goal) is geometry_msgs.msg.Pose:
x0, y0, z0, qx0, qy0, qz0, qw0 = pose_to_list(actual)
x1, y1, z1, qx1, qy1, qz1, qw1 = pose_to_list(goal)
# Euclidean distance
d = dist((x1, y1, z1), (x0, y0, z0))
# phi = angle between orientations
cos_phi_half = fabs(qx0 * qx1 + qy0 * qy1 + qz0 * qz1 + qw0 * qw1)
return d <= tolerance and cos_phi_half >= cos(tolerance / 2.0)
return True
if __name__ == "__main__":
kuka_combined_joints_publisher()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
""" What I send:
header:
seq: 170
stamp:
secs: 3010
nsecs: 716823688
frame_id: "base_link"
name:
- joint_a1
- joint_a2
- joint_a3
- joint_a4
- joint_a5
- joint_a6
position: [0.686481519975468, -2.3677175964755364, 2.5781044455248914, 2.145352880856928, 1.9185556285919494, -5.372349182452595]
velocity: []
effort: []
"""
""" What I get:
header:
seq: 45 <-- Several messages, not only one
stamp:
secs: 315
nsecs: 450016452
frame_id: "world"
name:
- kr3_1_joint_a1
- kr3_1_joint_a2
- kr3_1_joint_a3
- kr3_1_joint_a4
- kr3_1_joint_a5
- kr3_1_joint_a6
- kr3_1_schunk_joint_left
- kr3_1_schunk_joint_right
- kr3_2_joint_a1
- kr3_2_joint_a2
- kr3_2_joint_a3
- kr3_2_joint_a4
- kr3_2_joint_a5
- kr3_2_joint_a6
- kr3_2_schunk_joint_left
- kr3_2_schunk_joint_right
- kr3_3_joint_a1
- kr3_3_joint_a2
- kr3_3_joint_a3
- kr3_3_joint_a4
- kr3_3_joint_a5
- kr3_3_joint_a6
- kr3_4_joint_a1
- kr3_4_joint_a2
- kr3_4_joint_a3
- kr3_4_joint_a4
- kr3_4_joint_a5
- kr3_4_joint_a6
- kr4_5_joint_a1
- kr4_5_joint_a2
- kr4_5_joint_a3
- kr4_5_joint_a4
- kr4_5_joint_a5
- kr4_5_joint_a6
- kr4_5_schunk_joint_left
- kr4_5_schunk_joint_right
position: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.8726288771072731, -1.0000287031046116, -0.785433093176108, -1.5708897833434887, 8.03748164791614e-05, 1.0472336069120263, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
velocity: []
effort: []
"""
"""
position:
x: 0.6
y: 0.49601638140176374
z: 0.3
orientation:
x: 0.0
y: 0.0
z: 0.707
w: 0.707
"""
| 10,945 | Python | 30.635838 | 289 | 0.617725 |
AndreiVoica/P10-MAP/src/kuka_hw_cart/README.md | kuka_hw_cart package allows controlling Kuka robot in cartesian space.
| 71 | Markdown | 34.999983 | 70 | 0.830986 |
AndreiVoica/P10-MAP/src/kr3_config_gripper/config/fake_controllers.yaml | controller_list:
- name: fake_hand_controller
type: $(arg fake_execution_type)
joints:
- joint_left
- joint_right
- name: fake_arm_controller
type: $(arg fake_execution_type)
joints:
- joint_a1
- joint_a2
- joint_a3
- joint_a4
- joint_a5
- joint_a6
initial: # Define initial robot poses per group
- group: hand
pose: open
- group: arm
pose: down | 426 | YAML | 20.349999 | 48 | 0.584507 |
AndreiVoica/P10-MAP/src/kr3_config_gripper/config/ros_controllers.yaml | hand_controller:
type: velocity_controllers/JointTrajectoryController
joints:
- joint_left
- joint_right
gains:
joint_left:
p: 100
d: 1
i: 1
i_clamp: 1
joint_right:
p: 100
d: 1
i: 1
i_clamp: 1
arm_controller:
type: velocity_controllers/JointTrajectoryController
joints:
- joint_a1
- joint_a2
- joint_a3
- joint_a4
- joint_a5
- joint_a6
gains:
joint_a1:
p: 20000
d: 1
i: 1
i_clamp: 1
joint_a2:
p: 20000
d: 1
i: 1
i_clamp: 1
joint_a3:
p: 20000
d: 1
i: 1
i_clamp: 1
joint_a4:
p: 20000
d: 1
i: 1
i_clamp: 1
joint_a5:
p: 20000
d: 1
i: 1
i_clamp: 1
joint_a6:
p: 20000
d: 1
i: 1
i_clamp: 1 | 850 | YAML | 14.196428 | 54 | 0.469412 |
AndreiVoica/P10-MAP/maps/maps_extension.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
from omni.isaac.examples.base_sample import BaseSampleExtension
from omni.isaac.examples.maps import MAPs
import asyncio
import omni.ui as ui
from omni.isaac.ui.ui_utils import btn_builder
class MAPsExtension(BaseSampleExtension):
def on_startup(self, ext_id: str):
super().on_startup(ext_id)
super().start_extension(
menu_name="",
submenu_name="",
name="MAPs",
title="Material Acceleration Platform AAU",
doc_link="https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/tutorial_core_hello_world.html",
overview="This Example introduces the user on how to do cool stuff with Isaac Sim through scripting in asynchronous mode.",
sample=MAPs(),
file_path=os.path.abspath(__file__),
number_of_extra_frames=3,
)
self.task_ui_elements = {}
frame = self.get_frame(index=0)
self.build_simulation_controls_ui(frame)
frame = self.get_frame(index=1)
self.build_real_controls_ui(frame)
frame = self.get_frame(index=2)
self.build_experiment_controls_ui(frame)
return
def _on_sim_control_button_event(self):
asyncio.ensure_future(self.sample._on_sim_control_event_async())
self.task_ui_elements["Simulation Control"].enabled = False
self.task_ui_elements["Real Setup Control"].enabled = True
self.task_ui_elements["Connect PMC"].enabled = True
self.task_ui_elements["Start Experiment"].enabled = True
return
def _on_real_control_button_event(self):
asyncio.ensure_future(self.sample._on_real_control_event_async())
self.task_ui_elements["Real Setup Control"].enabled = False
self.task_ui_elements["Simulation Control"].enabled = True
self.task_ui_elements["Connect PMC"].enabled = False
self.task_ui_elements["Start Experiment"].enabled = True
return
def _on_connect_pmc_button_event(self):
self.sample._connect_pmc()
self.task_ui_elements["Real Setup Control"].enabled = True
self.task_ui_elements["Simulation Control"].enabled = True
self.task_ui_elements["Connect PMC"].enabled = False
self.task_ui_elements["Start Experiment"].enabled = True
return
def _on_start_experiment_button_event(self):
asyncio.ensure_future(self.sample._on_start_experiment_event_async())
self.task_ui_elements["Real Setup Control"].enabled = False
self.task_ui_elements["Simulation Control"].enabled = False
self.task_ui_elements["Connect PMC"].enabled = False
self.task_ui_elements["Start Experiment"].enabled = True
return
def post_reset_button_event(self):
self.task_ui_elements["Simulation Control"].enabled = True
self.task_ui_elements["Real Setup Control"].enabled = True
self.task_ui_elements["Connect PMC"].enabled = True
self.task_ui_elements["Start Experiment"].enabled = True
return
def post_load_button_event(self):
self.task_ui_elements["Simulation Control"].enabled = True
self.task_ui_elements["Real Setup Control"].enabled = False
self.task_ui_elements["Connect PMC"].enabled = True
self.task_ui_elements["Start Experiment"].enabled = True
return
def post_clear_button_event(self):
self.task_ui_elements["Simulation Control"].enabled = False
self.task_ui_elements["Real Setup Control"].enabled = False
self.task_ui_elements["Connect PMC"].enabled = False
self.task_ui_elements["Start Experiment"].enabled = False
return
def build_simulation_controls_ui(self, frame):
with frame:
with ui.VStack(spacing=5):
# Update the Frame Title
frame.title = "Simulation"
frame.visible = True
dict = {
"label": "Simulation Control",
"type": "button",
"text": "Start Simulation",
"tooltip": "Simulation Control",
"on_clicked_fn": self._on_sim_control_button_event,
}
self.task_ui_elements["Simulation Control"] = btn_builder(**dict)
self.task_ui_elements["Simulation Control"].enabled = False
def build_real_controls_ui(self, frame):
with frame:
with ui.VStack(spacing=5):
# Update the Frame Title
frame.title = "Real Setup"
frame.visible = True
dict = {
"label": "Connect PMC",
"type": "button",
"text": "Connect",
"tooltip": "Connect PMC",
"on_clicked_fn": self._on_connect_pmc_button_event,
}
self.task_ui_elements["Connect PMC"] = btn_builder(**dict)
self.task_ui_elements["Connect PMC"].enabled = False
dict = {
"label": "Real Setup Control",
"type": "button",
"text": "Start Real Setup",
"tooltip": "Real Setup Control",
"on_clicked_fn": self._on_real_control_button_event,
}
self.task_ui_elements["Real Setup Control"] = btn_builder(**dict)
self.task_ui_elements["Real Setup Control"].enabled = False
def build_experiment_controls_ui(self, frame):
with frame:
with ui.VStack(spacing=5):
# Update the Frame Title
frame.title = "Experiment Control"
frame.visible = True
dict = {
"label": "Start Experiment",
"type": "button",
"text": "Start",
"tooltip": "Start Experiment",
"on_clicked_fn": self._on_start_experiment_button_event,
}
self.task_ui_elements["Start Experiment"] = btn_builder(**dict)
self.task_ui_elements["Start Experiment"].enabled = False
# dict = {
# "label": "Real Setup Control",
# "type": "button",
# "text": "Start Real Setup",
# "tooltip": "Real Setup Control",
# "on_clicked_fn": self._on_real_control_button_event,
# }
# self.task_ui_elements["Real Setup Control"] = btn_builder(**dict)
# self.task_ui_elements["Real Setup Control"].enabled = False
| 7,136 | Python | 40.982353 | 135 | 0.579036 |
AndreiVoica/P10-MAP/maps/maps_reader.py | import yaml
class ControllerJointsLoader:
"""
This class loads the controller joints data from the simple_moveit_controllers.yaml file
"""
def __init__(self, filename):
self.filename = filename
self.controller_data = {}
self.load_controller_data()
def load_controller_data(self):
with open(self.filename, 'r') as file:
data = yaml.safe_load(file)
arm_count = 1
hand_count = 1
for controller in data['controller_list']:
# Remove "_controller" from the name
name = controller['name'].replace('_controller', '')
# Extract the base name for eef_link
eef_name = name.split('_')[0] + '_' + name.split('_')[1]
# Create robot_arm or robot_hand key
if 'arm' in name:
planning_group = 'robot_arm_' + str(arm_count)
arm_count += 1
elif 'hand' in name:
robot_number = name.split('_')[1]
planning_group = 'robot_hand_' + robot_number
hand_count += 1
else:
continue
# Store the planning_group, its corresponding joints, controller name and eef_link in the dictionary
self.controller_data[planning_group] = {
'planning_group': name,
'joints': controller['joints'],
'eef_link': eef_name + '_link_6'
}
def get_controller_data(self):
return self.controller_data
class RecipeLoader:
def __init__(self, actions_file):
self.actions_file = actions_file
def read_instructions_from_yaml(self):
with open(self.actions_file, 'r') as file:
instructions_list = yaml.safe_load(file)
# for instruction in instructions_list:
# instruction_name = instruction['name']
# parameters = instruction['parameters']
return instructions_list
# Using the class
# filename = '/home/robotlab/Documents/Github/P10-MAP/src/kuka_config_multiple/config/simple_moveit_controllers.yaml' # Path to the yaml file
# controller_data = loader.get_controller_data()
# print(controller_data) | 2,216 | Python | 35.344262 | 141 | 0.575361 |
AndreiVoica/P10-MAP/maps/maps.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# Author: Daniel Moreno ParΓs ([email protected]) & Andrei Voica ([email protected])
from omni.isaac.examples.maps.maps_reader import ControllerJointsLoader
from omni.isaac.examples.maps.maps_reader import RecipeLoader
from omni.isaac.examples.base_sample import BaseSample
from omni.isaac.core import World
from omni.isaac.core.prims import GeometryPrim, XFormPrim, RigidPrim
import omni.kit.commands
from pxr import Sdf, Gf, UsdPhysics
from omni.isaac.core.utils.rotations import euler_angles_to_quat, quat_to_euler_angles, euler_to_rot_matrix
import numpy as np
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.core.robots import Robot
from omni.isaac.core.utils.types import ArticulationAction
from omni.isaac.core.articulations import Articulation, ArticulationSubset
from omni.isaac.core.physics_context import PhysicsContext
import carb
# PMC Library Imports
from pmclib import system_commands as sys # PMC System related commands
from pmclib import xbot_commands as bot # PMC Mover related commands
from pmclib import pmc_types # PMC API Types
import time
import random
import functools
from omni.isaac.core.utils import viewports, extensions
from omni.isaac.core.utils.prims import set_targets
import asyncio
import rospy
from sensor_msgs.msg import JointState
from std_msgs.msg import String, Header
from geometry_msgs.msg import Pose, PoseStamped, PoseArray, Point, Quaternion
import math
# import sys
# Action graph imports
import omni.graph.core as og
import rosgraph
#########################################################################################################
class MAPs(BaseSample):
def __init__(self) -> None:
super().__init__()
# # Positions are relative to parents, so set them with reversed values
# SCENE GEOMETRY
# env (group) spacing:
self._env_spacing = 2.0
self.last_update_time = time.time()
# Lab Setup:
self._lab_setup_position = np.array([0.0, 0.0, 0.0]) # Gf.Vec3f(0.5, 0.0, 0.0)
self._lab_setup_orientation = np.array([0, 0, 0, 1])
self._lab_setup_scale = 1.0
# Shuttles Grid:
self._grid_position = np.array([1.2877, -1.0415, 0.0])
shuttle_orientation = np.pi/2
self._grid_orientation = np.array([np.cos(shuttle_orientation/2), 0, 0, np.sin(shuttle_orientation/2)]) #Rotates 90 degrees around z-axis
# Shuttles:
self._number_shuttles = 4
# self._shuttle_position = np.array([1.2277, -0.9815, 1.07])
self._shuttle_position = np.array([0.06, 0.06, 1.07]) #([0.06, 0.06, 1.07])
self._platform_limits = np.array([0.0, 0.0, 0.832, 0.596]) # x_min, y_min, x_max, y_max
self._shuttle_scale = 0.01
# self.xbot_ids = [1, 2, 3, 4, 5, 6, 7, 8]
self.xbot_ids = [i for i in range(1, self._number_shuttles + 1)]
self.targets_x = []
self.targets_y = []
# Trays
self._number_tray_vial = 1
self._tray_vial_position = np.array([0.35992, -0.15884, 1.063]) #([0.06, 0.06, 1.10])
# self._tray_vial_position = np.array([1.2277, -1.2, 1])
self._tray_vial_scale = 0.0098
self._number_tray_beaker = 1
# self._tray_beaker_position = np.array([0.30, 0.06, 1.090])
self._tray_beaker_position = np.array([0.57916, -0.15884, 1.063])
self._tray_beaker_scale = 0.0099
# Flyways:
# DEFINE FLYWAYS MATRIX
self.flyways_matrix = np.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
# Flyway offsets
self._flyway_position = np.array([1.165, -0.92398, 0.99302])
self._flyway_orientation = np.array([0, 0, 0, 1])
self._flyway_scale = 0.01
# Grid for BFS Algorithm
self.grid = []
for row in self.flyways_matrix:
grid_row = []
for cell in row:
grid_row.extend([cell]*2) # each cell in flyways_matrix is 2 cells in grid
for _ in range(2): # each cell in flyways_matrix is 2 cells in grid
self.grid.append(grid_row)
# Magnetic Stirrer
self._stirrer_position = np.array([-0.15554, 0.71716, 1.0049])
self._stirrer_orientation = np.array(euler_angles_to_quat([np.pi/2, 0, 0]))
self._stirrer_scale = 1.0
# HPLC
#self._hplc_position = np.array([1.05725, -0.10274, 0.0369])
self._hplc_position = np.array([-1.03033, 0.15418, 1.0049])
self._hplc_orientation = np.array(euler_angles_to_quat([0, 0, 0]))
self._hplc_scale = 0.01
# Loading station
#self._loading_station_position = np.array([1.42345, -0.53634, 0.0369])
self._loading_station_position = np.array([-1.47442, 0.58184, 1.0049])
self._loading_station_orientation = np.array(euler_angles_to_quat([0, 0, np.pi/2]))
self._loading_station_scale = 0.01
# Removing station
#self._removing_station_position = np.array([1.15523, -1.15596, 0.0369])
self._removing_station_position = np.array([-1.04649, 1.23841, 1.0049])
self._removing_station_orientation = np.array(euler_angles_to_quat([0, 0, 0]))
self._removing_station_scale = 0.01
# Kuka Multiple Arms:
self._kuka_arms_position = np.array([0.0, 0.0, 1.0])
self._kuka_arms_orientation = np.array(euler_angles_to_quat([0, 0, 0]))
self._kuka_arms_scale = 1.0
# Repository path:
# self.repo_folder = "/home/andrei/P10-MAP/"
self.repo_folder = "/home/robotlab/Documents/Github/P10-MAP/"
# USD asset paths:
# self.asset_folder = "omniverse://localhost/Projects/MAPs-AAU/Assets/"
self.asset_folder = self.repo_folder + "assets/"
self.asset_paths = {
#"kr3": self.asset_folder + "kr3r540/kr3r540_v3/kr3r540_v3.usd",
#"kr3": self.asset_folder + "kr3r540/kr3r540_v4/kr3r540_v4.usd", # Schunk Kr3
"kr3": self.asset_folder + "kr3r540_v4/kr3r540_v4g.usd", # Schunk Kr3
"kr4": self.asset_folder + "kr4r600/kr4r600_v2.usd",
"kuka_multiple": self.asset_folder + "kuka_multiple_arms/kuka_multiple_arms_5.usd",
"franka": "omniverse://localhost/NVIDIA/Assets/Isaac/2022.2.1/Isaac/Robots/Franka/franka_alt_fingers.usd",
"flyway": self.asset_folder + "flyways/flyway_segment.usd",
# "shuttle": self.asset_folder + "120x120x10/acopos_shuttle_120.usd", # Basic shuttle
"shuttle": self.asset_folder + "120x120x10/shuttle_wh.usd",
"tray_vial" : self.asset_folder + "Trays/Tray_vial_w.usd",
"tray_flask" : self.asset_folder + "Trays/Tray_beaker_w.usd",
"vial" : self.asset_folder + "vials/vial.usd",
"stirrer" : self.asset_folder + "Magnetic_stirrer/Magnetic_stirrer.usd",
#"lab_setup": self.asset_folder + "Lab_setup_v2.usd" # Lab Setup with robots
#"lab_setup": self.asset_folder + "Lab_setup_v1.usd" # Lab Setup without robots
"lab_setup": self.asset_folder + "Lab_setup_v0.usd", # Lab Setup without robots or Acopos Matrix
"hplc": self.asset_folder + "Loading_station/Loading_station.usd",
"loading_station": self.asset_folder + "Loading_station/Loading_station.usd",
"removing_station": self.asset_folder + "Loading_station/Loading_station.usd"
}
# Prim paths Dictionaries:
self.shuttles_prim_dict = {} # Dictionary to store shuttle prim paths
self.items_prim_dict = {} # Dictionary to store tray vial prim paths
self.eef_link_prim_dict = {} # Dictionary to store eef link prim paths for each robot
self.gripper_prim_dict = {} # Dictionary to store gripper prim paths for each robot
self.current_pos_dict = {} # Dictionary to store shuttle current positions
# Get dictionary with planning group, joints and eef link for each robot
self.filename = self.repo_folder + "src/kuka_config_multiple/config/simple_moveit_controllers.yaml"
self.joints_loader = ControllerJointsLoader(self.filename)
self.robot_joints_data = self.joints_loader.get_controller_data()
print(self.robot_joints_data)
# Load recipe with experiment instructions
self.actions_file = self.repo_folder + "recipe/recipe_v2.yaml"
self.actions_loader = RecipeLoader(self.actions_file)
self.recipe = self.actions_loader.read_instructions_from_yaml()
# Ros topics messages
self.planning_group = String() # ROS topic name for move group
self.joint_state_request = JointState()
self.pose_request = Pose()
self.cartesian_path_request = PoseArray()
self.action_completed = False
self.start_time = 0
self.action_times = {}
self.control_switch = 0 # 0: Sim, 1: PMC
return
# This function is called to setup the assets in the scene for the first time
# Class variables should not be assigned here, since this function is not called
# after a hot-reload, its only called to load the world starting from an EMPTY stage
def setup_scene(self):
# Check if ROS master is running
if not rosgraph.is_master_online():
carb.log_error("Please run roscore before executing this script")
# A world is defined in the BaseSample, can be accessed everywhere EXCEPT __init__
world = self.get_world()
world = World.instance()
# stage.SetDefaultPrim(world)
world.scene.add_default_ground_plane() # adds a default ground plane to the scene
# Add physics context
physx_context = PhysicsContext()
# Enable GPU dynamics
physx_context.enable_gpu_dynamics(True)
# Reload recipe
self.recipe = self.actions_loader.read_instructions_from_yaml()
# Add Xform reference for the shuttles
world.scene.add(XFormPrim(prim_path="/World/LabSetup", name=f"LabSetup"))
# Add Xform reference for the shuttles
world.scene.add(XFormPrim(prim_path="/World/LabSetup/Grid", name=f"Grid"))
# Add Xform reference for the flyways
for i in range(len(self.flyways_matrix)):
for j in range(len(self.flyways_matrix[i])):
if self.flyways_matrix[i][j] == 1:
add_reference_to_stage(usd_path=self.asset_paths["flyway"],
prim_path="/World/LabSetup/Grid/flyway_{}{}".format((i+1),(j+1)))
world.scene.add(GeometryPrim(prim_path="/World/LabSetup/Grid/flyway_{}{}".format((i+1),(j+1)),
name="flyway_{}{}_ref_geom".format(i+1, j+1), collision=True))
# Add shuttles references
for i in range(self._number_shuttles):
add_reference_to_stage(usd_path=self.asset_paths["shuttle"], prim_path="/World/LabSetup/Grid/shuttle_{}".format(i+1))
world.scene.add(RigidPrim(prim_path="/World/LabSetup/Grid/shuttle_{}".format(i+1),
name="shuttle_{}_ref_geom".format(i+1),
position= self._shuttle_position + np.array([0.12 *i, 0, 0]),
scale = np.full((3,), self._shuttle_scale),
mass = 0.30))
self.targets_x = np.append(self.targets_x, self._shuttle_position[0] + np.array([0.12 *i]))
self.targets_y = np.append(self.targets_y, self._shuttle_position[1])
# Add Trays
for i in range(self._number_tray_vial):
add_reference_to_stage(usd_path=self.asset_paths["tray_vial"], prim_path="/World/LabSetup/Grid/tray_vial_{}".format(i+1))
world.scene.add(RigidPrim(prim_path="/World/LabSetup/Grid/tray_vial_{}".format(i+1),
name="tray_vial_{}_ref_geom".format(i+1),
position= self._tray_vial_position + np.array([0.12 *i, 0, 0]),
scale = np.full((3,), self._tray_vial_scale),
mass = 0.15))
for i in range(self._number_tray_beaker):
add_reference_to_stage(usd_path=self.asset_paths["tray_flask"], prim_path="/World/LabSetup/Grid/tray_beaker_{}".format(i+1))
world.scene.add(RigidPrim(prim_path="/World/LabSetup/Grid/tray_beaker_{}".format(i+1),
name="tray_beaker_{}_ref_geom".format(i+1),
position= self._tray_beaker_position + np.array([0.12 *i, 0, 0]),
scale = np.full((3,), self._tray_beaker_scale),
mass = 0.15))
# Add Magnetic Stirrer
add_reference_to_stage(usd_path=self.asset_paths["stirrer"],
prim_path="/World/LabSetup/Stirrer")
world.scene.add(RigidPrim(prim_path ="/World/LabSetup/Stirrer",
name="magnetic_stirrer",
position = self._stirrer_position,
orientation = self._stirrer_orientation,
mass = 3))
# Add HPLC
add_reference_to_stage(usd_path=self.asset_paths["hplc"],
prim_path="/World/LabSetup/hplc")
world.scene.add(RigidPrim(prim_path ="/World/LabSetup/hplc",
name="hplc",
position = self._hplc_position,
orientation = self._hplc_orientation,
mass = 20))
# Add Loading station
add_reference_to_stage(usd_path=self.asset_paths["loading_station"],
prim_path="/World/LabSetup/loading_station")
world.scene.add(RigidPrim(prim_path ="/World/LabSetup/loading_station",
name="loading_station",
position = self._loading_station_position,
orientation = self._loading_station_orientation,
mass = 20))
# Add Remove station
add_reference_to_stage(usd_path=self.asset_paths["removing_station"],
prim_path="/World/LabSetup/removing_station")
world.scene.add(RigidPrim(prim_path ="/World/LabSetup/removing_station",
name="removing_station",
position = self._removing_station_position,
orientation = self._removing_station_orientation,
mass = 20))
# Add Robots references
add_reference_to_stage(usd_path=self.asset_paths["kuka_multiple"],
prim_path="/World/Kuka_Multiple_Arms")
self.kukas = world.scene.add(Articulation(prim_path ="/World/Kuka_Multiple_Arms",
name="Kuka_Multiple_Arms",
position = self._kuka_arms_position,
orientation = self._kuka_arms_orientation))
return
# Here we assign the class's variables this function is called after load button is pressed
# regardless starting from an empty stage or not this is called after setup_scene and after
# one physics time step to propagate appropriate physics handles which are needed to retrieve
# many physical properties of the different objects
async def setup_post_load(self):
# Load World and Assets --CHECK
self._world = self.get_world()
self._world.scene.enable_bounding_boxes_computations()
# Camera Initial Viewport
viewports.set_camera_view(eye=np.array([3.3, -0.7, 2.2]), target=np.array([0.8, -0.7, 1.05]))
# Add USD Assets
await self._add_lab_setup()
await self._add_shuttles_grid()
for i in range(len(self.flyways_matrix)):
for j in range(len(self.flyways_matrix[i])):
if self.flyways_matrix[i][j] == 1:
await self._add_flyway(i, j)
# Shuttles Prim Dictionary
stage = omni.usd.get_context().get_stage()
for shuttle_number in range(self._number_shuttles):
shuttle_path = "/World/LabSetup/Grid/shuttle_{}".format(shuttle_number + 1)
prim = stage.GetPrimAtPath(shuttle_path)
if prim:
key_name = "prim_{}".format(shuttle_number + 1)
self.shuttles_prim_dict[key_name] = prim
else:
print("Error: shuttle prim not found at path {}".format(shuttle_path))
# Items Prim Dictionary
for tray_vial in range(self._number_tray_vial):
tray_vial_path = "/World/LabSetup/Grid/tray_vial_{}".format(tray_vial + 1)
prim = stage.GetPrimAtPath(tray_vial_path)
if prim:
key_name = "prim_tray_vial_{}".format(tray_vial + 1)
self.items_prim_dict[key_name] = prim
for tray_beaker in range(self._number_tray_beaker):
tray_beaker_path = "/World/LabSetup/Grid/tray_beaker_{}".format(tray_beaker + 1)
prim = stage.GetPrimAtPath(tray_beaker_path)
if prim:
key_name = "prim_tray_beaker_{}".format(tray_beaker + 1)
while key_name in self.items_prim_dict: # Check if the key already exists in the dictionary
tray_beaker += self._number_tray_vial # Increment the index by the number of vials
tray_beaker_path = "/World/LabSetup/Grid/tray_beaker_{}".format(tray_beaker + 1)
prim = stage.GetPrimAtPath(tray_beaker_path)
key_name = "prim_{}_beaker".format(tray_beaker + 1)
self.items_prim_dict[key_name] = prim
print("ITEMS DICT: ", self.items_prim_dict)
# Iterate over each robot
for robot_name, robot_data in self.robot_joints_data.items():
eef_link = robot_data['eef_link']
eef_link_path = "/World/Kuka_Multiple_Arms/{}".format(eef_link)
prim = stage.GetPrimAtPath(eef_link_path)
if prim:
self.eef_link_prim_dict[robot_name] = prim
else:
print("Error: eef link prim not found at path {}".format(eef_link_path))
print("EEF DICT: ", self.eef_link_prim_dict)
# Create rospy node to publish requested joint positions
rospy.init_node('isaac_joint_request_publisher')
self.pub_group = rospy.Publisher('/joint_move_group_isaac', String, queue_size=10)
self.pub_joints = rospy.Publisher('/joint_command_isaac', JointState, queue_size=10)
self.pub_pose = rospy.Publisher('/pose_command_isaac', Pose, queue_size=10)
self.pub_cartesian_path = rospy.Publisher('/cartesian_path_command_isaac', PoseArray, queue_size=10)
# Creating a action graph with ROS component nodes
try:
og.Controller.edit(
{"graph_path": "/World/Kuka_Multiple_Arms/ActionGraph", "evaluator_name": "execution"},
{
og.Controller.Keys.CREATE_NODES: [
("OnImpulseEvent", "omni.graph.action.OnImpulseEvent"),
("ReadSimTime", "omni.isaac.core_nodes.IsaacReadSimulationTime"),
("PublishJointState", "omni.isaac.ros_bridge.ROS1PublishJointState"),
("SubscribeJointState", "omni.isaac.ros_bridge.ROS1SubscribeJointState"),
("ArticulationController", "omni.isaac.core_nodes.IsaacArticulationController"),
("PublishTF", "omni.isaac.ros_bridge.ROS1PublishTransformTree"),
("PublishClock", "omni.isaac.ros_bridge.ROS1PublishClock"),
],
og.Controller.Keys.CONNECT: [
("OnImpulseEvent.outputs:execOut", "PublishJointState.inputs:execIn"),
("OnImpulseEvent.outputs:execOut", "SubscribeJointState.inputs:execIn"),
("OnImpulseEvent.outputs:execOut", "PublishTF.inputs:execIn"),
("OnImpulseEvent.outputs:execOut", "PublishClock.inputs:execIn"),
("OnImpulseEvent.outputs:execOut", "ArticulationController.inputs:execIn"),
("ReadSimTime.outputs:simulationTime", "PublishJointState.inputs:timeStamp"),
("ReadSimTime.outputs:simulationTime", "PublishClock.inputs:timeStamp"),
("ReadSimTime.outputs:simulationTime", "PublishTF.inputs:timeStamp"),
("SubscribeJointState.outputs:jointNames", "ArticulationController.inputs:jointNames"),
("SubscribeJointState.outputs:positionCommand", "ArticulationController.inputs:positionCommand"),
("SubscribeJointState.outputs:velocityCommand", "ArticulationController.inputs:velocityCommand"),
("SubscribeJointState.outputs:effortCommand", "ArticulationController.inputs:effortCommand"),
],
og.Controller.Keys.SET_VALUES: [
# Setting the /Kuka target prim to Articulation Controller node
("SubscribeJointState.inputs:topicName", "joint_command"),
("ArticulationController.inputs:usePath", False),
("ArticulationController.inputs:robotPath", ""),
],
},
)
except Exception as e:
print(e)
# Setting the /Kuka target prim to Publish JointState node
set_targets(
prim = stage.GetPrimAtPath("/World/Kuka_Multiple_Arms/ActionGraph/PublishJointState"),
attribute="inputs:targetPrim",
target_prim_paths=["/World/Kuka_Multiple_Arms"]
)
# Setting the /Kuka target prim to Articulation Controller node
set_targets(
prim = stage.GetPrimAtPath("/World/Kuka_Multiple_Arms/ActionGraph/ArticulationController"),
attribute="inputs:targetPrim",
target_prim_paths=["/World/Kuka_Multiple_Arms"]
)
# Setting the /Kuka target prim to Publish Transform Tree node
set_targets(
prim = stage.GetPrimAtPath("/World/Kuka_Multiple_Arms/ActionGraph/PublishTF"),
attribute = "inputs:targetPrims",
target_prim_paths=["/World/Kuka_Multiple_Arms"]
)
# Control Switch
if self.control_switch == 0:
# self._world.add_physics_callback("sim_step_shuttles", callback_fn=self.sim_xbots_movement_2)
# rospy.init_node('isaac_test', anonymous=True)
# self.pub_joints = rospy.Publisher("/joint_command_desired", queue_size=1)
#self.on_impulse_event()
self._world.add_physics_callback("sim_step_impulse", callback_fn=self.on_impulse_event)
# self._world.add_physics_callback("sim_step_shuttles", self.sim_xbots_movement)
elif self.control_switch == 1:
self._connect_pmc() # Connect to PMC
self._world.add_physics_callback("sim_step_read_acopos", callback_fn=self.read_xbots_positions) #callback names have to be unique
self._world.add_physics_callback("sim_step_move_acopos", callback_fn=self.send_xbots_positions)
return
# Add Lab Setup reference
async def _add_lab_setup(self):
self._lab_setup_ref_geom = self._world.scene.get_object(f"LabSetup")
# self._lab_setup_ref_geom.set_local_scale(np.array([self._lab_setup_scale]))
self._lab_setup_ref_geom.set_world_pose(position=self._lab_setup_position,
orientation=self._lab_setup_orientation)
self._lab_setup_ref_geom.set_default_state(position=self._lab_setup_position,
orientation=self._lab_setup_orientation)
# self._lab_setup_ref_geom.set_collision_approximation("none")
# self._convexIncludeRel.AddTarget(self._table_ref_geom.prim_path)
# Add flyways to the scene
async def _add_flyway(self, x, y):
self._flyway_ref_geom = self._world.scene.get_object(f"flyway_{x+1}{y+1}_ref_geom")
self._flyway_ref_geom.set_local_scale(np.array([self._flyway_scale]))
self._flyway_ref_geom.set_world_pose(position = self._flyway_position + (-0.24 * (x), +0.24 * (y), 0))
self._flyway_ref_geom.set_default_state(position = self._flyway_position)
self._flyway_ref_geom.set_collision_approximation("none")
# Add xForm shuttles reference
async def _add_shuttles_grid(self):
self._shuttles_grid_ref_geom = self._world.scene.get_object(f"Grid")
self._shuttles_grid_ref_geom.set_world_pose(position=self._grid_position,
orientation=self._grid_orientation)
self._shuttles_grid_ref_geom.set_default_state(position=self._grid_position,
orientation=self._grid_orientation)
## Interface Functions:
async def _on_sim_control_event_async(self):
world = self.get_world()
self.targets_x, self.targets_y = self.create_random_coordinates(self._number_shuttles)
if world.physics_callback_exists("sim_step_read_acopos"):
world.remove_physics_callback("sim_step_read_acopos")
# world.add_physics_callback("sim_step_shuttles", self.sim_xbots_movement)
await world.play_async()
return
async def _on_real_control_event_async(self):
world = self.get_world()
if world.physics_callback_exists("sim_step_shuttles"):
world.remove_physics_callback("sim_step_shuttles")
self._world.add_physics_callback("sim_step_read_acopos", callback_fn=self.read_xbots_positions) #callback names have to be unique
#self._world.add_physics_callback("sim_step_move_acopos", callback_fn=self.send_xbots_positions) ## Random positions right now
await world.play_async()
return
## CONTROL FUNCTIONS
# Function to move selected robot to desired joints position
def move_to_joint_state(self, planning_group, joint_state_request):
moveit_planning_group = self.robot_joints_data[planning_group]["planning_group"]
self.planning_group = planning_group
self.joint_state_request.position = joint_state_request
self.pub_group.publish(moveit_planning_group)
self.pub_joints.publish(self.joint_state_request)
if len(joint_state_request) == 6:
# Add a physics callback to check when the action has been completed
callback_fn = functools.partial(self.on_sim_step_check, planning_group, joint_state_request)
self._world.add_physics_callback("sim_step_check", callback_fn)
return
# Function to move selected robot to desired pose
def move_to_pose(self, planning_group, position, orientation = [0.0, np.pi/2, 0.0]):
moveit_planning_group = self.robot_joints_data[planning_group]["planning_group"]
self.planning_group = planning_group
quaternion = euler_angles_to_quat(orientation) # reverse the order of the angles to be rxyz as in ROS
self.pose_request = self.create_pose_msg(position, quaternion)
self.pub_group.publish(moveit_planning_group)
self.pub_pose.publish(self.pose_request)
# Add a physics callback to check when the action has been completed
callback_fn = functools.partial(self.on_sim_step_check, planning_group, position)
self._world.add_physics_callback("sim_step_check", callback_fn)
return
# Function to move selected robot to desired pose using cartesian path
def move_along_cartesian_path(self, planning_group, waypoints):
moveit_planning_group = self.robot_joints_data[planning_group]["planning_group"]
self.planning_group = planning_group
self.cartesian_path_request.header.stamp = rospy.Time.now()
self.cartesian_path_request.header.frame_id = 'world' # or whatever frame_id you are using
for waypoint in waypoints:
position, orientation = waypoint
quaternion = euler_angles_to_quat(orientation) # convert euler to quaternion
pose = self.create_pose_msg(position, quaternion)
self.cartesian_path_request.poses.append(pose)
print("Cartesian path request: ", self.cartesian_path_request)
self.pub_group.publish(moveit_planning_group)
self.pub_cartesian_path.publish(self.cartesian_path_request)
# Add a physics callback to check when the action has been completed
self._world.add_physics_callback("sim_step_check", lambda arg: self.on_sim_step_check(planning_group, position))
return
# Function to move selected selected shuttle to desired position
def move_shuttle_to_target(self, xbot_id: int , target_x, target_y):
# Check if the xbot_id exists in xbot_ids
if xbot_id in self.xbot_ids:
# Update the corresponding target_x and target_y values
self.targets_x[xbot_id - 1] = target_x
self.targets_y[xbot_id - 1] = target_y
else:
# If the xbot_id doesn't exist in xbot_ids, raise an exception
raise ValueError(f"xbot_id {xbot_id} not found in xbot_ids")
desired_position = [target_x, target_y]
print("xbot_id_function: ", xbot_id)
print("desired_position: ", desired_position)
# Add physics callback to move the selected shuttle to the desired position
# and to check when the action has been completed
self._world.add_physics_callback("sim_step_shuttles", self.sim_xbots_movement)
callback_fn = functools.partial(self.on_sim_step_check, xbot_id, desired_position)
self._world.add_physics_callback("sim_step_check", callback_fn)
# self._world.add_physics_callback("sim_step_check", lambda xbot_id=xbot_id, desired_position=desired_position: self.on_sim_step_check(xbot_id, desired_position))
def attach_object(self, planning_group, state, item):
# Attach the shuttle to the robot arm
callback_fn = functools.partial(self.on_sim_attach_object, planning_group, state, item)
self._world.add_physics_callback("sim_attach_object", callback_fn)
def on_sim_attach_object(self, planning_group, state, item, step_size = 0.01):
# Get prim of item
prim_item = self.items_prim_dict['prim_{}'.format(item)]
# Get prim of robot arm
offset = 0.02
if isinstance(planning_group, int):
if state == True:
shuttle_pos = self.get_shuttle_position(planning_group)
prim_item.GetAttribute('xformOp:translate').Set(( shuttle_pos[0], shuttle_pos[1] , shuttle_pos[2] + offset ))
elif isinstance(planning_group, str):
# eef_pos, eef_orient = self.get_eef_link_position(planning_group)
print("Not working")
# # Set the position of the item
# item_pos = prim_item.GetAttribute('xformOp:translate').Get()
# prim_item.GetAttribute('xformOp:translate').Set(( eef_pos[1] + 1.03443 , -eef_pos[0] +1.46063 -0.18, eef_pos[2] + 1 ))
# # CHECK Maybe create a fake point in between both grippers to attach the item to the robot arm
# # Convert orientation from GfQuatd to GfQuatf
# # eef_orient_f = Gf.Quatf(eef_orient)
# # print(eef_orient_f)
# # prim_item.GetAttribute('xformOp:orient').Set(eef_orient_f)
# # Transform orientation from euler angles to quaternion
# quat_prim = euler_angles_to_quat([0.0, 0.0, 0.0])
# quat = Gf.Quatf(*quat_prim)
# # Set Orientation of item
# prim_item.GetAttribute('xformOp:orient').Set(quat)
# Function to open and close the gripper
def gripper_control(self, planning_group, state):
if state == "open":
self.move_to_joint_state(planning_group, [0.0 , 0.0])
elif state == "close":
self.move_to_joint_state(planning_group, [-0.0030 , -0.0030])
else:
raise ValueError(f"state {state} not found in gripper_control")
desired_position = state
callback_fn = functools.partial(self.on_sim_step_check, planning_group, desired_position)
self._world.add_physics_callback("sim_step_check", callback_fn)
## GET DATA FUNCTIONS
def get_eef_link_position(self, robot_arm):
# Collect end effector position
prim_eef_link = self.eef_link_prim_dict[robot_arm] # Robot arm is a string (e.g. "robot_arm_1"")
eef_pos = prim_eef_link.GetAttribute('xformOp:translate').Get()
eef_orient = prim_eef_link.GetAttribute('xformOp:orient').Get()
return eef_pos[0], eef_pos[1], eef_pos[2], eef_orient
# return eef_pos, eef_orient
def get_shuttle_position(self, xbot_id):
# Retrieve the shuttle position
shuttle_prim = self.shuttles_prim_dict["prim_{}".format(xbot_id)]
shuttle_pos = shuttle_prim.GetAttribute('xformOp:translate').Get()
return shuttle_pos[0], shuttle_pos[1], shuttle_pos[2]
def get_gripper_joints_position(self, robot_hand):
""" Get the current joint positions of the robot gripper """
# Create an ArticulationSubset instance
articulation_subset = ArticulationSubset(articulation=self.kukas, joint_names=self.robot_joints_data[robot_hand]['joints'])
# Get the joint positions
joint_pos_left, joint_pos_right = articulation_subset.get_joint_positions()
return joint_pos_left, joint_pos_right
def get_joints_position(self, robot_arm):
""" Get the current joint positions of the robot arm """
articulation_subset = ArticulationSubset(articulation=self.kukas, joint_names=self.robot_joints_data[robot_arm]['joints'])
current_joint_values = articulation_subset.get_joint_positions()
# # Debugging:
# current_joint_states = self.get_joints_position("robot_arm_1")
# carb.log_warn("Current joint states {}: {}".format("robot_arm_1" ,repr(current_joint_states)))
# current_joint_states = self.get_joints_position("robot_arm_2")
# carb.log_warn("Current joint states {}: {}".format("robot_arm_2" ,repr(current_joint_states)))
# current_joint_states = self.get_joints_position("robot_arm_3")
# carb.log_warn("Current joint states {}: {}".format("robot_arm_3" ,repr(current_joint_states)))
# current_joint_states = self.get_joints_position("robot_arm_4")
# carb.log_warn("Current joint states {}: {}".format("robot_arm_4" ,repr(current_joint_states)))
# current_joint_states = self.get_joints_position("robot_arm_5")
# carb.log_warn("Current joint states {}: {}".format("robot_arm_5" ,repr(current_joint_states)))
return current_joint_values
def has_reached_position(self, planning_group, desired_position, tolerance=0.01):
"""
Check if the robot/shuttle has reached the desired position
planning_group: string (Robot Arm) or integer (Shuttle)
desired_position: list of 3 floats [x, y, z] in Isaac Sim coordinates or string ("open" or "close") for gripper
"""
if isinstance(desired_position, str) and desired_position in ["open", "close"]:
# Get the gripper joint positions
joint_pos_left, joint_pos_right = self.get_gripper_joints_position(planning_group)
print("Joint positions: ", joint_pos_left, joint_pos_right)
# Check if the gripper is open or closed
if (desired_position == "close" and (joint_pos_left < -0.0010 or joint_pos_right < -0.0010)) or \
(desired_position == "open" and (joint_pos_left > -0.0001 and joint_pos_right > -0.0001)):
elapsed_time = time.time() - self.start_time
action_name = "{} gripper {}".format(planning_group, desired_position)
self.action_times.setdefault(action_name, []).append(elapsed_time)
carb.log_warn("{} completed in {:.3f} seconds".format(action_name, elapsed_time))
self.action_completed = True # Set the action_completed flag to True
return True
else:
print("Current gripper position: ", joint_pos_left, joint_pos_right)
return False
elif isinstance(planning_group, str): # Get current position of the robot
if len(desired_position) == 3: # Move to Pose checking (eef position)
current_position = self.get_eef_link_position(planning_group)
# Compute the distance between the current position and the desired position
distance = math.sqrt((current_position[0] - desired_position[0])**2 +
(current_position[1] - desired_position[1])**2 +
(current_position[2] - desired_position[2])**2)
# Check if the distance is within the tolerance
if distance <= tolerance:
elapsed_time = time.time() - self.start_time
action_name = "{} moved to target".format(planning_group)
self.action_times.setdefault(action_name, []).append(elapsed_time)
carb.log_warn("{} completed in {:.3f} seconds".format(action_name, elapsed_time))
self.action_completed = True # Set the action_completed flag to True
current_joint_states = self.get_joints_position(planning_group)
carb.log_warn("Current joint states: {}".format(repr(current_joint_states)))
return True
else:
print("Current position: ", current_position)
print("Distance: ", distance)
return False
elif len(desired_position) == 6: # Move to Joint States checking (joint positions)
current_joint_states = self.get_joints_position(planning_group)
# Compute the distance between the current position and the desired position
distance = math.sqrt((current_joint_states[0] - desired_position[0])**2 +
(current_joint_states[1] - desired_position[1])**2 +
(current_joint_states[2] - desired_position[2])**2 +
(current_joint_states[3] - desired_position[3])**2 +
(current_joint_states[4] - desired_position[4])**2 +
(current_joint_states[5] - desired_position[5])**2)
# Check if the distance is within the tolerance
if distance <= tolerance:
elapsed_time = time.time() - self.start_time
action_name = "{} moved to joint states".format(planning_group)
self.action_times.setdefault(action_name, []).append(elapsed_time)
carb.log_warn("{} completed in {:.3f} seconds".format(action_name, elapsed_time))
self.action_completed = True
else:
print("Current position: ", current_joint_states)
print("Distance: ", distance)
return False
else:
print("Invalid desired position")
return False
if isinstance(planning_group, int):
# Get current position of the shuttle
current_position = self.get_shuttle_position(planning_group)
# Compute the distance between the current position and the desired position
distance = math.sqrt((current_position[0] - desired_position[0])**2 +
(current_position[1] - desired_position[1])**2)
# Check if the distance is within the tolerance
if distance <= tolerance:
elapsed_time = time.time() - self.start_time
action_name = "Shuttle {} moved to target".format(planning_group)
self.action_times.setdefault(action_name, []).append(elapsed_time)
carb.log_warn("{} completed in {:.3f} seconds".format(action_name, elapsed_time))
self.action_completed = True # Set the action_completed flag to True
return True
else:
print("Current position: ", current_position)
print("Distance: ", distance)
return False
else:
print("Invalid planning group type. Must be a string for a robot arm or integer for a shuttle.")
return False
def print_action_times_summary(self):
# Print the sum of times for each action, ordered by the number of actions
for action, times in sorted(self.action_times.items(), key=lambda item: len(item[1])):
carb.log_warn("Action '{}': completed {} times, total time {:.3f} seconds".format(action, len(times), sum(times)))
def execute_actions(self):
if len(self.recipe) > 0:
if self.action_completed:
action = self.recipe.pop(0) # Retrieve the first action in the list
action_name = action['action']
parameters = action['parameters']
self.start_time = time.time() # Record the start time for each action
# Print the action to be executed with its parameters
carb.log_warn("Executing action: " + action_name + ": " + str(parameters))
world= self.get_world()
if world.physics_callback_exists("sim_step_check"):
world.remove_physics_callback("sim_step_check")
if world.physics_callback_exists("sim_step_shuttles"):
world.remove_physics_callback("sim_step_shuttles")
self.action_completed = False # Set the action_completed flag to False
if action_name == 'MOVE_TO_JOINT_STATE':
self.move_to_joint_state(**parameters) # The ** operator is used to unpack the dictionary into keyword arguments
elif action_name == 'MOVE_TO_POSE_MOVEIT':
self.move_to_pose(**parameters) # The ** operator is used to unpack the dictionary into keyword arguments
elif action_name == 'MOVE_TO_POSE_IN_PLATFORM':
position_xy = self.platform_pos_to_coordinates(parameters['position'][0],parameters['position'][1], moveit_offset = True)
self.move_to_pose(parameters['planning_group'], [position_xy[0],position_xy[1],parameters['position'][2]], parameters['orientation'])
elif action_name == 'MOVE_ALONG_CARTESIAN_PATH':
self.move_along_cartesian_path(**parameters)
elif action_name == 'MOVE_SHUTTLE_TO_TARGET':
position_xy = self.platform_pos_to_coordinates(parameters['target_x'],parameters['target_y'], moveit_offset = False)
xbot_id = int(parameters['xbot_id'])
self.move_shuttle_to_target(xbot_id, position_xy[0], position_xy[1])
elif action_name == 'GRIPPER_CONTROL':
self.gripper_control(**parameters)
elif action_name == 'ATTACH_OBJECT':
if parameters['state'] == True:
self.attach_object(**parameters)
elif parameters['state'] == False:
world= self.get_world()
if world.physics_callback_exists("sim_attach_object"):
world.remove_physics_callback("sim_attach_object") # Remove the physics callback
self.action_completed = True
else:
print("Invalid action name: ", action_name)
else:
world= self.get_world()
if world.physics_callback_exists("sim_step_auto_play"):
world.remove_physics_callback("sim_step_auto_play")
if world.physics_callback_exists("sim_step_check"):
world.remove_physics_callback("sim_step_check")
if world.physics_callback_exists("sim_step_shuttles"):
world.remove_physics_callback("sim_step_shuttles")
carb.log_warn("Recipe completed!")
self.print_action_times_summary()
async def _on_start_experiment_event_async(self):
self.action_completed = True
self._world.add_physics_callback("sim_step_auto_play", callback_fn=self.on_automatic_execution)
#self.execute_actions()
return
def on_impulse_event(self, step_size):
# Tick the Publish/Subscribe JointState, Publish TF and Publish Clock nodes each frame
og.Controller.set(og.Controller.attribute("/World/Kuka_Multiple_Arms/ActionGraph/OnImpulseEvent.state:enableImpulse"), True)
def on_sim_step_check(self, planning_group, desired_position, step_size=1):
# Check if the robot has reached the desired position
self.has_reached_position(planning_group, desired_position)
def on_automatic_execution(self, step_size=1):
# Execute the actions in the recipe
self.execute_actions()
async def setup_pre_reset(self):
# world = self.get_world()
# if world.physics_callback_exists("sim_step"):
# world.remove_physics_callback("sim_step")
return
async def setup_post_reset(self):
# await self._world.play_async()
return
def world_cleanup(self):
return
# Move xbots in simulation (No collision detection)
def sim_xbots_movement(self, step_size):
max_speed = 1.0 # m/s
move_increment = step_size * max_speed
for shuttle_number in range(self._number_shuttles):
prim = self.shuttles_prim_dict["prim_{}".format(shuttle_number + 1)]
current_pos = prim.GetAttribute('xformOp:translate').Get()
#Move shuttle up
if (self.targets_y[shuttle_number]) > current_pos[1]:
prim.GetAttribute('xformOp:translate').Set((current_pos[0], current_pos[1] + move_increment, current_pos[2]))
if (current_pos[1] + move_increment) > self.targets_y[shuttle_number]:
prim.GetAttribute('xformOp:translate').Set((current_pos[0], self.targets_y[shuttle_number], current_pos[2]))
# Move shuttle down
elif (self.targets_y[shuttle_number]) < current_pos[1]:
prim.GetAttribute('xformOp:translate').Set((current_pos[0], current_pos[1] - move_increment, current_pos[2]))
if (current_pos[1] - move_increment) < self.targets_y[shuttle_number]:
prim.GetAttribute('xformOp:translate').Set((current_pos[0], self.targets_y[shuttle_number], current_pos[2]))
# check if we reached the target in y axis, then start moving in x direction.
if abs(current_pos[1] - self.targets_y[shuttle_number]) < move_increment:
# Move shuttle right
if (self.targets_x[shuttle_number]) > current_pos[0]:
prim.GetAttribute('xformOp:translate').Set((current_pos[0] + move_increment, current_pos[1], current_pos[2]))
if (current_pos[0] + move_increment) > self.targets_x[shuttle_number]:
prim.GetAttribute('xformOp:translate').Set((self.targets_x[shuttle_number], current_pos[1], current_pos[2]))
# Move shuttle left
elif (self.targets_x[shuttle_number]) < current_pos[0]:
prim.GetAttribute('xformOp:translate').Set((current_pos[0] - move_increment, current_pos[1], current_pos[2]))
if (current_pos[0] - move_increment) < self.targets_x[shuttle_number]:
prim.GetAttribute('xformOp:translate').Set((self.targets_x[shuttle_number], current_pos[1], current_pos[2]))
# Move xbots in simulation (Checking other shuttles in the path)
def sim_xbots_movement_collision(self, step_size):
max_speed = 1.0 # m/s
move_increment = step_size * max_speed
for xbot in range(self._number_shuttles):
prim = self.shuttles_prim_dict["prim_{}".format(xbot + 1)]
current_pos = prim.GetAttribute('xformOp:translate').Get()
x_pos_control = []
y_pos_control = []
# Collect all shuttles positions
for shuttle_number in range(self._number_shuttles):
prim_others = self.shuttles_prim_dict["prim_{}".format(shuttle_number + 1)]
shuttles_pos = prim_others.GetAttribute('xformOp:translate').Get()
x_pos_control.append(shuttles_pos[0])
y_pos_control.append(shuttles_pos[1])
# Decide which direction to move in
dx = self.targets_x[xbot] - current_pos[0]
dy = self.targets_y[xbot] - current_pos[1]
for shuttle in range(self._number_shuttles):
continue_flag = False
if xbot != shuttle and ((current_pos[1] > (y_pos_control[shuttle] - 0.0602 - move_increment) and current_pos[1] < (y_pos_control[shuttle] + 0.0602 + move_increment)) or (current_pos[0] > (x_pos_control[shuttle] - 0.0602 - move_increment) and current_pos[0] < (x_pos_control[shuttle] + 0.0602 + move_increment))):
continue_flag = True
if continue_flag:
continue
# if moving right is safe
if dx > 0:
prim.GetAttribute('xformOp:translate').Set((current_pos) + (move_increment, 0.0, 0.0))
if (current_pos[0] + move_increment) > self.targets_x[xbot]:
prim.GetAttribute('xformOp:translate').Set((self.targets_x[xbot], current_pos[1], current_pos[2]))
break
# if moving left is safe
elif dx < 0:
prim.GetAttribute('xformOp:translate').Set((current_pos) - (move_increment, 0.0 , 0.0))
if (current_pos[0] - move_increment) < self.targets_x[xbot]:
prim.GetAttribute('xformOp:translate').Set((self.targets_x[xbot], current_pos[1], current_pos[2]))
break
# if moving up is safe
if dy > 0:
prim.GetAttribute('xformOp:translate').Set((current_pos[0], current_pos[1] + move_increment, current_pos[2]))
if (current_pos[1] + move_increment) > self.targets_y[xbot]:
prim.GetAttribute('xformOp:translate').Set((current_pos[0], self.targets_y[xbot], current_pos[2]))
break
# if moving down is safe
elif dy < 0:
prim.GetAttribute('xformOp:translate').Set((current_pos[0], current_pos[1] - move_increment, current_pos[2]))
if (current_pos[1] - move_increment) < self.targets_y[xbot]:
prim.GetAttribute('xformOp:translate').Set((current_pos[0], self.targets_y[xbot], current_pos[2]))
break
# Read shuttles position and orientation from physical setup
def read_xbots_positions(self, step_size):
# Read info for every shuttle
xbot_list = bot.get_all_xbot_info(1)
xbot_positions = [(xbot.x_pos, xbot.y_pos, xbot.z_pos,
xbot.rx_pos, xbot.ry_pos, xbot.rz_pos,
xbot.xbot_state) for xbot in xbot_list]
# Set position and orientation of shuttles
for shuttle_number in range(self._number_shuttles):
prim = self.shuttles_prim_dict["prim_{}".format(shuttle_number + 1)]
# Set position of shuttle
prim.GetAttribute('xformOp:translate').Set((xbot_positions[shuttle_number][0],
xbot_positions[shuttle_number][1] ,
xbot_positions[shuttle_number][2] + 1.06))
# Transform orientation from euler angles to quaternion
quat_prim = (euler_angles_to_quat([xbot_positions[shuttle_number][3],
xbot_positions[shuttle_number][4],
xbot_positions[shuttle_number][5]]))
# quat = Gf.Quatd(*quat_prim)
quat = Gf.Quatf(*quat_prim)
# Set Orientation of shuttle
prim.GetAttribute('xformOp:orient').Set(quat)
def send_xbots_positions(self, step_size):
"""Send commands to the Xbots to move them to the next target position.
Planning is done using PMC algorithm."""
# Only update the Xbots if at least some time (s) have passed since the last update
if time.time() - self.last_update_time >= 0.5:
#print(bot.get_xbot_status(xbot_id=xid).xbot_state)
xbot_list = bot.get_all_xbot_info(1)
xbot_positions = [(xbot.x_pos, xbot.y_pos, xbot.z_pos,
xbot.rx_pos, xbot.ry_pos, xbot.rz_pos,
xbot.xbot_state) for xbot in xbot_list]
# Don't send commands while the xbots are moving
if all(xbot_state[6] == pmc_types.XbotState.XBOT_IDLE for xbot_state in xbot_positions): #xbot_state[6] --> xbot_state
# Get random unique targets for each shuttle
targets_x, targets_y = self.create_random_coordinates(self._number_shuttles)
print("target_x ", targets_x)
print("target_y ", targets_y)
bot.auto_driving_motion_si(self._number_shuttles, xbot_ids=self.xbot_ids, targets_x=targets_x, targets_y=targets_y)
else:
print("Xbots are moving")
self.last_update_time = time.time()
# if bot.get_xbot_status(xbot_id=xid).xbot_state is pmc_types.XbotState.XBOT_IDLE:
# #self.sample_motions(input_id=xid)
# bot.auto_driving_motion_si(8, xbot_ids=xbot_ids, targets_x=targets_x, targets_y=targets_y)
# # Recover Disabled xbot
# elif bot.get_xbot_status(xbot_id=xid).xbot_state is pmc_types.XbotState.XBOT_DISABLED:
# bot.recover_accident_xbot(xbot_id=xid)
# print("Recovering xbot: ", xid)
def create_random_coordinates(self, num_shuttles):
"""Create random coordinates for each shuttle in num_shuttles"""
x_coords = []
y_coords = []
coords_dict = {}
for i in range(num_shuttles):
while True:
x = random.randint(0, 5) * 0.12 + 0.06
y = random.randint(0, 7) * 0.12 + 0.06
if (x, y) not in coords_dict:
coords_dict[(x, y)] = 1
break
x_coords.append(x)
y_coords.append(y)
return x_coords, y_coords
def sample_motions(self, input_id):
max_speed = 1.0
max_accel = 10.0
bot.linear_motion_si(xbot_id=input_id, target_x=0.18, target_y=0.06,
max_speed=max_speed, max_accel=max_accel)
bot.linear_motion_si(xbot_id=input_id, target_x=0.18, target_y=0.90,
max_speed=max_speed, max_accel=max_accel)
bot.linear_motion_si(xbot_id=input_id, target_x=0.66, target_y=0.90,
max_speed=max_speed, max_accel=max_accel)
bot.linear_motion_si(xbot_id=input_id, target_x=0.66, target_y=0.06,
max_speed=max_speed, max_accel=max_accel)
# bot.rotary_motion_timed_spin(xbot_id=input_id,rot_time=3, target_rz=3.14,
# max_speed=3.0, max_accel=max_accel)
# bot.linear_motion_si(xbot_id=input_id, target_x=0.60, target_y=0.36,
# max_speed=max_speed, max_accel=max_accel)
# bot.rotary_motion_timed_spin(xbot_id=input_id,rot_time=3, target_rz=3.14,
# max_speed=3.0, max_accel=max_accel)
def wait_for_xbot_done(xid):
while bot.get_xbot_status(xbot_id=xid).xbot_state is not pmc_types.XbotState.XBOT_IDLE:
time.sleep(0.5)
def _connect_pmc(self):
"""Connect to PMC and gain mastership"""
# Connect to PMC
sys.auto_connect_to_pmc()
if not sys.auto_connect_to_pmc():
sys.connect_to_pmc("192.168.10.100") #sys.auto_connect_to_pmc()
carb.log_warn("Connected: " + str(sys.auto_connect_to_pmc()))
carb.log_warn("Status: " + str(sys.get_pmc_status()))
# Gain mastership
if not sys.is_master():
sys.gain_mastership()
carb.log_warn("Master: " + str(sys.is_master()))
# Activate xBots
bot.activate_xbots()
def create_pose_msg(self, position, orientation, frame_id=''):
"""Create a ROS Pose message object from a position and orientation"""
# Create a new Pose object
pose = Pose()
# Given a position and orientation as lists and a frame id as string, return a PoseStamped message object.
header = Header()
header.frame_id = frame_id
header.stamp = rospy.Time.now()
# Set the position field of the Pose object
position_obj = Point()
position_obj.x = position[0]
position_obj.y = position[1]
position_obj.z = position[2]
pose.position = position_obj
# Set the orientation field of the Pose object (rxyz)
orientation_obj = Quaternion()
orientation_obj.x = orientation[1]
orientation_obj.y = orientation[2]
orientation_obj.z = orientation[3]
orientation_obj.w = orientation[0]
pose.orientation = orientation_obj
pose_stamped = PoseStamped(header=header, pose=pose)
return pose
def platform_pos_to_coordinates(self, x_pos, y_pos, moveit_offset=False, robot_arm=None):
"""
Description: Converts the acopos platform positions to the platform coordinates.
Origin of the matrix is at the bottom left corner (0,0)
Limits of the acopos matrix are (5,7)
Use moveit_offset to convert the coordinates to the moveit frame --> Robot arms EEF Poses.
"""
## TBD - Add robot end effector offsets
if robot_arm is None:
robot_arm = self.planning_group #if self.planning_group is not None else None
lim_x = self.flyways_matrix.shape[1] * 2
lim_y = self.flyways_matrix.shape[0] * 2
# Moveit offset
offset = [1.275, -1.04, 0.0]
if moveit_offset:
x_coord = -(y_pos * 0.12 + 0.06) + offset[0]
y_coord = (x_pos * 0.12 + 0.06) + offset[1]
else:
x_coord = x_pos * 0.12 + 0.06
y_coord = y_pos * 0.12 + 0.06
if x_pos > (lim_x - 1):
raise ValueError("x_pos exceeds the size of the platform.")
if y_pos > (lim_y - 1):
raise ValueError("y_pos exceeds the size of the platform.")
return x_coord, y_coord
##############################################################################################################
########################################## BFS Algortihm #####################################################
##############################################################################################################
def sim_xbots_movement_bfs(self, step_size):
max_speed = 1.0 # m/s
move_increment = step_size * max_speed
for xbot in range(self._number_shuttles):
prim = self.shuttles_prim_dict["prim_{}".format(xbot + 1)]
current_pos = prim.GetAttribute('xformOp:translate').Get()
# Convert current_pos and target to grid coordinates
start = (int((current_pos[0] + 0.06)/0.12), len(self.grid) - 1 - int((current_pos[1] + 0.06)/0.12))
end = (int((self.targets_x[xbot] + (0.06 if self.targets_x[xbot] % 0.12 != 0 else 0))/0.12),
len(self.grid) - 1 - int((self.targets_y[xbot] + (0.06 if self.targets_y[xbot] % 0.12 != 0 else 0))/0.12))
path = self.bfs(self.grid, start, end)
if path is not None and len(path) > 1:
next_step = path[1] # Take the second step in the path (first is current position)
# Convert next_step from grid coordinates back to simulation's coordinates
next_step_sim = ((next_step[0]*0.12) + (0.06 if next_step[0] % 1 != 0 else 0),
((len(self.grid) - 1 - next_step[1])*0.12) + (0.06 if (len(self.grid) - 1 - next_step[1]) % 1 != 0 else 0))
prim.GetAttribute('xformOp:translate').Set((next_step_sim[0], next_step_sim[1], current_pos[2]))
print("Moving xbot {} to {}".format(xbot + 1, next_step_sim))
# BFS algorithm
def bfs(self, grid, start, end):
queue = []
queue.append([start]) # Wrap the start tuple in a list
visited = set(start) # Create a set to store visited nodes
while queue:
path = queue.pop(0)
node = path[-1] # Get the last node in this path
if node == end:
return path
for direction in [(0, 1), (0, -1), (1, 0), (-1, 0)]: # Right, Left, Down, Up
new_node = (node[0] + direction[0], node[1] + direction[1])
if (new_node[0] >= 0 and new_node[0] < len(grid) and # Check grid boundaries
new_node[1] >= 0 and new_node[1] < len(grid[0]) and
grid[new_node[0]][new_node[1]] == 1 and # Check if new_node is walkable
new_node not in visited): # Check if the node has not been visited
new_path = list(path)
new_path.append(new_node)
queue.append(new_path)
visited.add(new_node) # Add the new node to the visited set
print("No valid path found.")
return None | 63,928 | Python | 49.417192 | 344 | 0.580184 |
AndreiVoica/P10-MAP/user_examples/acopos.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from omni.isaac.examples.base_sample import BaseSample
from omni.isaac.core import World
from omni.isaac.core.prims import GeometryPrim, XFormPrim
import omni.kit.commands
from pxr import Sdf, Gf, UsdPhysics
from omni.isaac.core.utils.rotations import euler_angles_to_quat
import numpy as np
from omni.isaac.core.utils.nucleus import get_assets_root_path, get_server_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.core.robots import Robot
from omni.isaac.core.utils.types import ArticulationAction
import carb
from pmclib import system_commands as sys # PMC System related commands
from pmclib import xbot_commands as bot # PMC Mover related commands
from pmclib import pmc_types # PMC API Types
import time
import random
class Acopos(BaseSample):
def __init__(self) -> None:
super().__init__()
# SCENE GEOMETRY
# env (group) spacing:
self._env_spacing = 2.0
self.last_update_time = time.time()
# Lab Setup:
self._lab_setup_position = np.array([0.0, 0.0, 0.0]) # Gf.Vec3f(0.5, 0.0, 0.0)
self._lab_setup_orientation = np.array([0, 0, 0, 1])
self._lab_setup_scale = 1.0
# Shuttles:
self._number_shuttles = 8
self._shuttle_position = np.array([1.2277, -0.9815, 1.07]) # Gf.Vec3f(0.5, 0.0, 0.0)
self._platform_limits = np.array([0.0, 0.0, 0.832, 0.596]) # x_min, y_min, x_max, y_max
self._target = np.array([0.8, 0.52])
self._shuttle_scale = 0.01
# Shuttles Grid:
self._grid_position = np.array([1.2877, -1.0415, 0.0]) # Gf.Vec3f(0.5, 0.0, 0.0)
shuttle_orientation = np.pi/2
self._grid_orientation = np.array([np.cos(shuttle_orientation/2), 0, 0, np.sin(shuttle_orientation/2)]) #Rotates 90 degrees around z-axis
# USD asset paths:
self.asset_folder = "omniverse://localhost/Projects/MAPs-AAU/Assets/"
self.asset_paths = {
"shuttle": self.asset_folder + "120x120x10/acopos_shuttle_120.usd",
"lab_setup": self.asset_folder + "Lab_setup_v2.usd"
}
self.prim_dict = {}
self.control_switch = 1 # 0: Sim, 1: PMC
return
# This function is called to setup the assets in the scene for the first time
# Class variables should not be assigned here, since this function is not called
# after a hot-reload, its only called to load the world starting from an EMPTY stage
def setup_scene(self):
# A world is defined in the BaseSample, can be accessed everywhere EXCEPT __init__
world = self.get_world()
world = World.instance()
world.scene.add_default_ground_plane() # adds a default ground plane to the scene
# Add Lab Setup Reference
add_reference_to_stage(usd_path=self.asset_paths["lab_setup"], prim_path="/World")
world.scene.add(GeometryPrim(prim_path="/World", name=f"lab_setup_ref_geom", collision=True))
# Add Xform reference for the shuttles
world.scene.add(XFormPrim(prim_path="/World/LabSetup/Grid", name=f"Grid"))
# Add shuttles references
for i in range(self._number_shuttles):
add_reference_to_stage(usd_path=self.asset_paths["shuttle"], prim_path="/World/LabSetup/Grid/shuttle_{}".format(i+1))
world.scene.add(GeometryPrim(prim_path="/World/LabSetup/Grid/shuttle_{}".format(i+1),
name="shuttle_{}_ref_geom".format(i+1), collision=True))
# world.scene.add(Robot(prim_path="/World/LabSetup/Grid/shuttle_{}".format(i+1),
# name="shuttle_{}_ref_geom".format(i+1)))
return
# Here we assign the class's variables this function is called after load button is pressed
# regardless starting from an empty stage or not this is called after setup_scene and after
# one physics time step to propagate appropriate physics handles which are needed to retrieve
# many physical properties of the different objects
async def setup_post_load(self):
# Load World and Assets
self._world = self.get_world()
self._world.scene.enable_bounding_boxes_computations()
# Add USD Assets
await self._add_lab_setup()
await self._add_shuttles_grid()
for i in range(self._number_shuttles):
await self._add_shuttle(i)
# Shuttles Prim Dictionary
stage = omni.usd.get_context().get_stage()
for shuttle_number in range(self._number_shuttles):
shuttle_path = "/World/LabSetup/Grid/shuttle_{}".format(shuttle_number + 1)
prim = stage.GetPrimAtPath(shuttle_path)
if prim:
key_name = "prim_{}".format(shuttle_number + 1)
self.prim_dict[key_name] = prim
else:
print("Error: shuttle prim not found at path {}".format(shuttle_path))
# Control Switch
if self.control_switch == 0:
self._world.add_physics_callback("sim_step", callback_fn=self.sim_xbots_movement)
elif self.control_switch == 1:
self.connect_pmc() # Connect to PMC
self._world.add_physics_callback("sim_step", callback_fn=self.read_xbots_positions) #callback names have to be unique
self._world.add_physics_callback("sim_step_move", callback_fn=self.send_xbots_positions)
return
# Add Lab Setup reference
async def _add_lab_setup(self):
self._lab_setup_ref_geom = self._world.scene.get_object(f"lab_setup_ref_geom")
self._lab_setup_ref_geom.set_local_scale(np.array([self._lab_setup_scale]))
self._lab_setup_ref_geom.set_world_pose(position=self._lab_setup_position,
orientation=self._lab_setup_orientation)
self._lab_setup_ref_geom.set_default_state(position=self._lab_setup_position,
orientation=self._lab_setup_orientation)
# lb = self._world.scene.compute_object_AABB(name=f"lab_setup_ref_geom")
# zmin = lb[0][2]
# zmax = lb[1][2]
# self._lab_setup_position[2] = -zmin
# self._lab_setup_height = zmax
self._lab_setup_ref_geom.set_collision_approximation("none")
#self._convexIncludeRel.AddTarget(self._table_ref_geom.prim_path)
# Add xForm shuttles reference
async def _add_shuttles_grid(self):
self._shuttles_grid_ref_geom = self._world.scene.get_object(f"Grid")
self._shuttles_grid_ref_geom.set_world_pose(position=self._grid_position,
orientation=self._grid_orientation)
self._shuttles_grid_ref_geom.set_default_state(position=self._grid_position,
orientation=self._grid_orientation)
# Add shuttles to the scene
async def _add_shuttle(self, shuttle_number):
self._shuttle_ref_geom = self._world.scene.get_object(f"shuttle_{shuttle_number+1}_ref_geom")
self._shuttle_ref_geom.set_local_scale(np.array([self._shuttle_scale]))
self._shuttle_ref_geom.set_world_pose(position= self._shuttle_position + (-0.121 * (shuttle_number), 0, 0))
self._shuttle_ref_geom.set_default_state(position=self._shuttle_position)
self._shuttle_ref_geom.set_collision_approximation("none")
#self._shuttle_articulation_controller = self._shuttle.get_articulation_controller()
async def on_sim_control_event_async(self):
world = self.get_world()
world.add_physics_callback("sim_step", self.sim_xbots_movement)
await world.play_async()
return
# def on_pmc_connection_event(self):
# self.connect_pmc()
# return
async def setup_pre_reset(self):
return
async def setup_post_reset(self):
return
def world_cleanup(self):
return
def sim_xbots_movement(self, step_size):
#print("step_size: ", step_size)
#print(self.translate)
#stage = omni.usd.get_context().get_stage()
max_speed = 3.0 # m/s
max_accel = 10.0 # m/s^2
for shuttle_number in range(1):
# for shuttle_number in range(self._number_shuttles):
prim = self.prim_dict["prim_{}".format(shuttle_number + 1)]
current_pos = prim.GetAttribute('xformOp:translate').Get()
# Move cube to the right
if (self._target[1] + 0.1) < current_pos[0]:
prim.GetAttribute('xformOp:translate').Set((current_pos)-(step_size*max_speed, 0.0, 0.0))
continue
# Move cube to the left
elif (self._target[1] - 0.1) > current_pos[0]:
prim.GetAttribute('xformOp:translate').Set((current_pos)+(step_size*max_speed, 0.0, 0.0))
continue
#current_pos = prim.GetAttribute('xformOp:translate').Get()
# Move cube up
if (self._target[0] + 0.1) > current_pos[1]:
prim.GetAttribute('xformOp:translate').Set((current_pos)+(0.0, step_size*max_speed, 0.0))
continue
# Move cube down
elif (self._target[0] - 0.1) < current_pos[1]:
prim.GetAttribute('xformOp:translate').Set((current_pos)-(0.0, step_size*max_speed, 0.0))
continue
# Read shuttles position and orientation from physical setup
def read_xbots_positions(self, step_size):
# Read info for every shuttle
xbot_list = bot.get_all_xbot_info(1)
xbot_positions = [(xbot.x_pos, xbot.y_pos, xbot.z_pos,
xbot.rx_pos, xbot.ry_pos, xbot.rz_pos,
xbot.xbot_state) for xbot in xbot_list]
# Set position and orientation of shuttles
for shuttle_number in range(self._number_shuttles):
prim = self.prim_dict["prim_{}".format(shuttle_number + 1)]
# Set position of shuttle
prim.GetAttribute('xformOp:translate').Set((xbot_positions[shuttle_number][0],
xbot_positions[shuttle_number][1] ,
xbot_positions[shuttle_number][2] + 1.06))
# Transform orientation from euler angles to quaternion
quat_prim = (euler_angles_to_quat([xbot_positions[shuttle_number][3],
xbot_positions[shuttle_number][4],
xbot_positions[shuttle_number][5]]))
quat = Gf.Quatd(*quat_prim)
# Set Orientation of shuttle
prim.GetAttribute('xformOp:orient').Set(quat)
def send_xbots_positions(self, step_size):
xbot_ids = [1, 2, 3, 4, 5, 6, 7, 8]
# Only update the Xbots if at least 2 seconds have passed since the last update
if time.time() - self.last_update_time >= 2:
#print(bot.get_xbot_status(xbot_id=xid).xbot_state)
xbot_list = bot.get_all_xbot_info(1)
xbot_positions = [(xbot.x_pos, xbot.y_pos, xbot.z_pos,
xbot.rx_pos, xbot.ry_pos, xbot.rz_pos,
xbot.xbot_state) for xbot in xbot_list]
# Don't send commands while the xbots are moving
if all(xbot_state[6] == pmc_types.XbotState.XBOT_IDLE for xbot_state in xbot_positions):
# Get random unique targets for each shuttle
targets_x, targets_y = self.create_random_coordinates(self._number_shuttles)
print("target_x ", targets_x)
print("target_y ", targets_y)
bot.auto_driving_motion_si(8, xbot_ids=xbot_ids, targets_x=targets_x, targets_y=targets_y)
else:
print("Xbots are moving")
self.last_update_time = time.time()
# if bot.get_xbot_status(xbot_id=xid).xbot_state is pmc_types.XbotState.XBOT_IDLE:
# #self.sample_motions(input_id=xid)
# bot.auto_driving_motion_si(8, xbot_ids=xbot_ids, targets_x=targets_x, targets_y=targets_y)
# # Recover Disabled xbot
# elif bot.get_xbot_status(xbot_id=xid).xbot_state is pmc_types.XbotState.XBOT_DISABLED:
# bot.recover_accident_xbot(xbot_id=xid)
# print("Recovering xbot: ", xid)
def create_random_coordinates(self, num_shuttles):
x_coords = []
y_coords = []
coords_dict = {}
for i in range(num_shuttles):
while True:
x = random.randint(0, 6) * 0.12 + 0.06
y = random.randint(0, 8) * 0.12 + 0.06
if (x, y) not in coords_dict:
coords_dict[(x, y)] = 1
break
x_coords.append(x)
y_coords.append(y)
return x_coords, y_coords
def sample_motions(self, input_id):
max_speed = 1.0
max_accel = 10.0
bot.linear_motion_si(xbot_id=input_id, target_x=0.18, target_y=0.06,
max_speed=max_speed, max_accel=max_accel)
bot.linear_motion_si(xbot_id=input_id, target_x=0.18, target_y=0.90,
max_speed=max_speed, max_accel=max_accel)
bot.linear_motion_si(xbot_id=input_id, target_x=0.66, target_y=0.90,
max_speed=max_speed, max_accel=max_accel)
bot.linear_motion_si(xbot_id=input_id, target_x=0.66, target_y=0.06,
max_speed=max_speed, max_accel=max_accel)
# bot.rotary_motion_timed_spin(xbot_id=input_id,rot_time=3, target_rz=3.14,
# max_speed=3.0, max_accel=max_accel)
# bot.linear_motion_si(xbot_id=input_id, target_x=0.60, target_y=0.36,
# max_speed=max_speed, max_accel=max_accel)
# bot.rotary_motion_timed_spin(xbot_id=input_id,rot_time=3, target_rz=3.14,
# max_speed=3.0, max_accel=max_accel)
def wait_for_xbot_done(xid):
while bot.get_xbot_status(xbot_id=xid).xbot_state is not pmc_types.XbotState.XBOT_IDLE:
time.sleep(0.5)
def connect_pmc(self):
# Connect to PMC
if not sys.auto_connect_to_pmc():
sys.connect_to_pmc("192.168.10.100") #sys.auto_connect_to_pmc()
print("Connected: ", sys.auto_connect_to_pmc())
print("Status: ", sys.get_pmc_status())
# Gain mastership
if not sys.is_master():
sys.gain_mastership()
print("Master: ", sys.is_master())
# Activate xBots
bot.activate_xbots()
############################################################################################################
def print_cube_info(self, step_size):
position, orientation = self._cube.get_world_pose()
linear_velocity = self._cube.get_linear_velocity()
# will be shown on terminal
print("Cube position is : " + str(position))
print("Cube's orientation is : " + str(orientation))
print("Cube's linear velocity is : " + str(linear_velocity))
def _change_property(self, prim_path: str, attribute_name:str, value:float):
usd_path = Sdf.Path(prim_path + "." + attribute_name)
omni.kit.commands.execute(
"ChangeProperty",
prop_path=usd_path,
value=value,
prev=self._get_property(prim_path, attribute_name),
)
def _get_property(self, prim_path: str, attribute: str):
prim = self.stage.GetPrimAtPath(prim_path)
prim_property = prim.GetAttribute(attribute)
return prim_property.Get() | 16,317 | Python | 40.734015 | 145 | 0.585953 |
AndreiVoica/P10-MAP/scripts/TestScripts/avoid_obstacles.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
from omni.isaac.motion_generation.lula import RmpFlow
from omni.isaac.motion_generation import ArticulationMotionPolicy
from omni.isaac.core.robots import Robot
from omni.isaac.core.objects import cuboid
from omni.isaac.core import World
from omni.isaac.core.utils.stage import open_stage
import numpy as np
import os
import argparse
#TODO: Fill in tutorial directory with absolute path to this file
TUTORIAL_DIRECTORY = "/home/robotlab/Documents/Github/P10-MAP/"
rmp_config_dir = os.path.join(TUTORIAL_DIRECTORY,"Assets/kr3r540/")
parser = argparse.ArgumentParser()
parser.add_argument("--urdf_path",type=str,default="kr3r540.urdf")
parser.add_argument("--rmpflow_config_path",type=str,default="kr3r540_rmpflow_config.yaml")
parser.add_argument("--end_effector_frame_name",type=str,default="gripper_base_link")
args = parser.parse_args()
open_stage(usd_path=os.path.join(rmp_config_dir,"kr3r540.usd"))
my_world = World(stage_units_in_meters=1.0)
robot = my_world.scene.add(Robot(prim_path="/kuka_kr3r540", name="Kr3r540"))
#Initialize an RmpFlow object
rmpflow = RmpFlow(
robot_description_path = os.path.join(rmp_config_dir,"kr3_description_v1.0.yaml"),
urdf_path = os.path.join(rmp_config_dir,args.urdf_path),
rmpflow_config_path = os.path.join(rmp_config_dir,args.rmpflow_config_path),
end_effector_frame_name = args.end_effector_frame_name, #This frame name must be present in the URDF
maximum_substep_size = .0034
)
physics_dt = 1/60.
articulation_rmpflow = ArticulationMotionPolicy(robot,rmpflow,physics_dt)
articulation_controller = robot.get_articulation_controller()
#Make a target to follow
target_cube = cuboid.VisualCuboid("/World/target",position = np.array([.5,0,.5]),color = np.array([1.,0,0]),size = .1)
#Make an obstacle to avoid
obstacle = cuboid.VisualCuboid("/World/obstacle",position = np.array([.8,0,.5]),color = np.array([0,1.,0]), size = .1)
rmpflow.add_obstacle(obstacle)
my_world.reset()
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
#Set rmpflow target to be the current position of the target cube.
rmpflow.set_end_effector_target(
target_position=target_cube.get_world_pose()[0],
#target_orientation=target_cube.get_world_pose()[1]
)
#Query the current obstacle position
rmpflow.update_world()
actions = articulation_rmpflow.get_next_articulation_action()
articulation_controller.apply_action(actions)
simulation_app.close()
| 3,123 | Python | 37.567901 | 118 | 0.741274 |
AndreiVoica/P10-MAP/scripts/TestScripts/test.py | from omni.isaac.manipulators import SingleManipulator
from omni.isaac.manipulators.grippers import ParallelGripper
from omni.isaac.core.utils.types import ArticulationAction
import numpy as np
from omni.isaac.dynamic_control import _dynamic_control
dc = _dynamic_control.acquire_dynamic_control_interface()
articulation1 = dc.get_articulation("/World/kr3_01")
# Call this each frame of simulation step if the state of the articulation is changing.
dc.wake_up_articulation(articulation1)
joint_angles1 = [np.random.rand(9) * 2 - 1]
dc.set_articulation_dof_position_targets(articulation1, joint_angles1)
dc = _dynamic_control.acquire_dynamic_control_interface()
articulation2 = dc.get_articulation("/World/kr3_02")
# Call this each frame of simulation step if the state of the articulation is changing.
dc.wake_up_articulation(articulation2)
joint_angles2 = [np.random.rand(9) * 2 - 1]
dc.set_articulation_dof_position_targets(articulation2, joint_angles2)
dc = _dynamic_control.acquire_dynamic_control_interface()
articulation3 = dc.get_articulation("/World/kr3_03")
# Call this each frame of simulation step if the state of the articulation is changing.
dc.wake_up_articulation(articulation3)
joint_angles3 = [np.random.rand(9) * 2 - 1]
dc.set_articulation_dof_position_targets(articulation3, joint_angles3)
dc = _dynamic_control.acquire_dynamic_control_interface()
articulation4 = dc.get_articulation("/World/kr3_04")
# Call this each frame of simulation step if the state of the articulation is changing.
dc.wake_up_articulation(articulation4)
joint_angles4 = [np.random.rand(9) * 2 - 1]
dc.set_articulation_dof_position_targets(articulation4, joint_angles4)
dc = _dynamic_control.acquire_dynamic_control_interface()
articulation5 = dc.get_articulation("/World/kr4")
# Call this each frame of simulation step if the state of the articulation is changing.
dc.wake_up_articulation(articulation5)
joint_angles5 = [np.random.rand(9) * 2 - 1]
dc.set_articulation_dof_position_targets(articulation5, joint_angles5)
'''
dc = _dynamic_control.acquire_dynamic_control_interface()
articulationGrip = dc.get_articulation("/World/kr3_03/tool0")
# Call this each frame of simulation step if the state of the articulation is changing.
dc.wake_up_articulation(articulationGrip)
joint_anglesGrip = [np.random.rand(9) * 2 - 1]
dc.set_articulation_dof_position_targets(articulationGrip, joint_anglesGrip)
'''
'''
dc = _dynamic_control.acquire_dynamic_control_interface()
articulation1 = dc.get_articulation("/World/kr3_01")
dc.wake_up_articulation(articulation1)
dof_ptr1 = dc.find_articulation_dof(articulation1, "joint_a2")
dc.set_dof_position_target(dof_ptr1, -1)
articulation2 = dc.get_articulation("/World/kr3_02")
dc.wake_up_articulation(articulation2)
dof_ptr2 = dc.find_articulation_dof(articulation2, "joint_a2")
dc.set_dof_position_target(dof_ptr2, -1)
'''
| 2,857 | Python | 45.852458 | 87 | 0.782989 |
AndreiVoica/P10-MAP/scripts/TestScripts/planar_motors_basic.py |
def _change_property(self, prim_path: str, attribute_name:str, value:float):
usd_path = Sdf.Path(prim_path + "." + attribute_name)
omni.kit.commands.execute(
"ChangeProperty",
prop_path=usd_path,
value=value,
prev=self._get_property(prim_path, attribute_name),
)
def _get_property(self, prim_path: str, attribute: str):
prim= self.stage.GetPrimAtPath(prim_path)
prim_property = prim.GetAttribute(attribute)
return prim_property.Get() | 492 | Python | 31.866665 | 76 | 0.664634 |
AndreiVoica/P10-MAP/scripts/TestScripts/pipettetest.py | from omni.isaac.manipulators import SingleManipulator
from omni.isaac.manipulators.grippers import ParallelGripper
from omni.isaac.core.utils.types import ArticulationAction
import numpy as np
from omni.isaac.dynamic_control import _dynamic_control
def home():
dc = _dynamic_control.acquire_dynamic_control_interface()
joint1 = dc.get_articulation("/World/kr3")
dc.wake_up_articulation(joint1)
dof_ptr1 = dc.find_articulation_dof(joint1, "joint_a1")
dc.set_dof_position_target(dof_ptr1, 0)
joint2 = dc.get_articulation("/World/kr3")
dc.wake_up_articulation(joint2)
dof_ptr2 = dc.find_articulation_dof(joint2, "joint_a2")
dc.set_dof_position_target(dof_ptr2, 0)
joint3 = dc.get_articulation("/World/kr3")
dc.wake_up_articulation(joint3)
dof_ptr3 = dc.find_articulation_dof(joint3, "joint_a3")
dc.set_dof_position_target(dof_ptr3, 0)
joint4 = dc.get_articulation("/World/kr3")
dc.wake_up_articulation(joint4)
dof_ptr4 = dc.find_articulation_dof(joint4, "joint_a4")
dc.set_dof_position_target(dof_ptr4, 0)
joint5 = dc.get_articulation("/World/kr3")
dc.wake_up_articulation(joint5)
dof_ptr5 = dc.find_articulation_dof(joint5, "joint_a5")
dc.set_dof_position_target(dof_ptr5, 0)
joint6 = dc.get_articulation("/World/kr3")
dc.wake_up_articulation(joint6)
dof_ptr6 = dc.find_articulation_dof(joint1, "joint_a6")
dc.set_dof_position_target(dof_ptr6, 0)
def position1():
dc = _dynamic_control.acquire_dynamic_control_interface()
joint1 = dc.get_articulation("/World/kr3")
dc.wake_up_articulation(joint1)
dof_ptr1 = dc.find_articulation_dof(joint1, "joint_a1")
dc.set_dof_position_target(dof_ptr1, 0)
joint2 = dc.get_articulation("/World/kr3")
dc.wake_up_articulation(joint2)
dof_ptr2 = dc.find_articulation_dof(joint2, "joint_a2")
dc.set_dof_position_target(dof_ptr2, -0.33)
joint3 = dc.get_articulation("/World/kr3")
dc.wake_up_articulation(joint3)
dof_ptr3 = dc.find_articulation_dof(joint3, "joint_a3")
dc.set_dof_position_target(dof_ptr3, 0.342)
joint4 = dc.get_articulation("/World/kr3")
dc.wake_up_articulation(joint4)
dof_ptr4 = dc.find_articulation_dof(joint4, "joint_a4")
dc.set_dof_position_target(dof_ptr4, 0)
joint5 = dc.get_articulation("/World/kr3")
dc.wake_up_articulation(joint5)
dof_ptr5 = dc.find_articulation_dof(joint5, "joint_a5")
dc.set_dof_position_target(dof_ptr5, 0)
joint6 = dc.get_articulation("/World/kr3")
dc.wake_up_articulation(joint6)
dof_ptr6 = dc.find_articulation_dof(joint1, "joint_a6")
dc.set_dof_position_target(dof_ptr6, 0)
home()
position1() | 2,686 | Python | 35.808219 | 61 | 0.699926 |
AndreiVoica/P10-MAP/docs/installation/planar_motor_control_API/basic_example.py | """
Planar Motor Python API Basic Example
(c) Planar Motor Inc 2022
"""
from pmclib import system_commands as sys # PMC System related commands
from pmclib import xbot_commands as bot # PMC Mover related commands
from pmclib import pmc_types # PMC API Types
import time
# %% Connect to the PMC
# To start sending commands to the system, the library must
# first connect to the Planar Motor Controller (PMC) via TCP/IP.
# this can be done by explicitly connecting to a known PMC IP
# address, or by scanning the network using an auto-connect
# command. By default, the PMC IP address is 192.168.10.100.
sys.connect_to_pmc("192.168.10.100")
# or
# sys.auto_connect_to_pmc()
# %% Activating the system
# On bootup, all the movers within the system will be in a
# "Deactivated" state. Which means that they are not actively
# position controlled. To start controlling the system, the
# "activate" command must be sent.
bot.activate_xbots()
# Now we wait for the movers to be levitated and fully controlled.
# This can be done by periodically polling for the PMC status.
maxTime = time.time() + 60 # Set timeout of 60s
while sys.get_pmc_status() is not pmc_types.PmcStatus.PMC_FULLCTRL:
time.sleep(0.5)
if time.time() > maxTime:
raise TimeoutError("PMC Activation timeout")
# %% Basic mover commands
# Now that the mover are levitated, they are now ready to receive
# motion commands.
bot.linear_motion_si(xbot_id=1, target_x=0.06, target_y=0.06,
max_speed=1.0, max_accel=10.0)
# The commands will return as soon the PMC receives and acknowledges
# that the command is valid/invalid. Multiple motion commands can be
# buffered to the same mover, and they will execute continuously in the
# order that the command was sent. Let's define a simple sample motion:
def sample_motions(input_id):
bot.linear_motion_si(xbot_id=input_id, target_x=0.18, target_y=0.06,
max_speed=1.0, max_accel=10.0)
bot.linear_motion_si(xbot_id=input_id, target_x=0.18, target_y=0.18,
max_speed=1.0, max_accel=10.0)
bot.linear_motion_si(xbot_id=input_id, target_x=0.06, target_y=0.18,
max_speed=1.0, max_accel=10.0)
bot.linear_motion_si(xbot_id=input_id, target_x=0.06, target_y=0.06,
max_speed=1.0, max_accel=10.0)
# Sending all commands, will buffer into the movers "Motion Buffer"
sample_motions(input_id=1)
# To check if all buffered motions are complete, we poll for
# the xbot information, and check that it's state is IDLE.
# Let's define a helper function for this:
def wait_for_xbot_done(xid):
while bot.get_xbot_status(xbot_id=xid).xbot_state is not pmc_types.XbotState.XBOT_IDLE:
time.sleep(0.5)
# Now we can wait for all motions buffered to an mover to be
# complete
wait_for_xbot_done(xid=1)
# %% Macros
# We can also save a series of motion commands as a "macro", which
# can be re-used for different movers. Macros are programmed by sending
# commands to mover ID 128 - 191
# First we clear the macro
bot.edit_motion_macro(
option=pmc_types.MotionMacroOptions.CLEAR_MACRO, macro_id=128)
# The the commands can be programmed to the macro
sample_motions(input_id=128)
# Then the macro can be saved,and run
bot.edit_motion_macro(
option=pmc_types.MotionMacroOptions.SAVE_MACRO, macro_id=128)
bot.run_motion_macro(macro_id=128, xbot_id=1)
wait_for_xbot_done(xid=1)
# Macros can be infinitely looped/ chained together by sending
# run_motion_macro commands to macro IDs. i.e. to run macro id
# 128 in a loop:
# First clear the macro so we can edit it again
bot.edit_motion_macro(
option=pmc_types.MotionMacroOptions.CLEAR_MACRO, macro_id=128)
# Send some motion commands
sample_motions(input_id=128)
# Then send run_macro 128 to macro ID 128
bot.run_motion_macro(macro_id=128, xbot_id=128)
bot.edit_motion_macro(
option=pmc_types.MotionMacroOptions.SAVE_MACRO, macro_id=128)
# Now when we run, the macro will infinitely loop
bot.run_motion_macro(macro_id=128, xbot_id=1)
time.sleep(5)
# To stop the motion, we can send the stop motion command
bot.stop_motion(xbot_id=1)
| 4,179 | Python | 34.423729 | 91 | 0.714286 |
AndreiVoica/P10-MAP/docs/installation/planar_motor_control_API/README.md | # PMI_Python_Lib
Python API for interfacing with the Planar Motor Controller
It is highly recommended to create a conda virtual environment when using the API with Isaac Sim. To see how to create conda environment for Isaac Sim check [Isaac Sim Python Environment](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/install_python.html)
## Installing the library on Windows
First install wheel and pythonnet.
```
# install pythonnet
pip install wheel
pip install pythonnet==2.5.2
```
The library can be installed using `pip install /path/to/.whl/file/`
## Installing the library on Ubuntu
Installing the library on linux is a little bit trickier, since dotnet and some required libraries needs to be installed before installing pythonnet. If these libraries aren't installed beforehand, the installation of pythonnet will fail. Run the following commands:
Replace VERSION with the Ubuntu version. (i.e. bionic 18.04) or focal (20.04))
```
sudo apt-get update
# install mono (.NET implementation)
sudo apt install gnupg ca-certificates
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF
echo "deb https://download.mono-project.com/repo/ubuntu stable-VERSION main" | sudo tee /etc/apt/sources.list.d/mono-official-stable.list
sudo apt update
sudo apt install mono-devel
```
Before installing the needed python libraries
```
# install pythonnet
pip install wheel
pip install pythonnet==2.5.2
```
If building the wheel for pythonnet fails, try installing the following libaries
```
# install libraries needed to build pythonnet
sudo apt-get install clang
sudo apt-get install libglib2.0-dev
```
Finally, the library can be installed using `pip install /path/to/.whl/file/`
## Using the library
Once installed, it can be used like any other python library, i.e:
```
from pmclib import system_commands as sys
from pmclib import xbot_commands as bot
if not sys.is_master():
sys.gain_mastership()
sys.auto_connect_to_pmc()
bot.activate_xbots()
bot.linear_motion_si(1, 0.18, 0.18, 0.5, 10)
```
## VS Code Security settings
```
"terminal.integrated.profiles.windows": {
"PowerShell": {
"source": "PowerShell",
"icon": "terminal-powershell",
"args": ["-ExecutionPolicy", "Bypass"]
}
}
```
| 2,302 | Markdown | 29.706666 | 268 | 0.752824 |
AndreiVoica/P10-MAP/docs/installation/Nvidia Isaac Sim ROS2/README.md | # Nvidia Isaac Sim
Nvidia Isaac Sim is a simulation software developed by Nvidia. In the first instance, the project was intended to be developed using ROS2, these steps may be useful for implementation with ROS2.
## Installing Nvidia Isaac Sim on Ubuntu 20.04
Follow the [Workstation Installation](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/install_workstation.html) tutorial.
# ROS 2 Foxy
Nvidia Isaac Sim is currently compatible with ROS 2 Foxy (Ubuntu 20.04) due to ROS 2 bridge.
Follow the [ROS2 Foxy installation method](https://docs.ros.org/en/foxy/Installation/Ubuntu-Install-Debians.html).
# ROS 2 Humble in Docker
ROS 2 Humble is used only for Moveit2 assistant, since it is not available on Foxy.
1. Install Docker following [this tutorial](https://docs.docker.com/engine/install/ubuntu/);
2. Open a terminal and run
```docker pull ubuntu:jammy```
3. In the terminal run ```docker images``` and copy the IMAGE ID
4. Run the docker image using ```docker run -it IMAGE ID```
5. Start installing ROS 2 Humble following [this tutorial](https://docs.ros.org/en/humble/Installation/Ubuntu-Install-Debians.html)
6. Install moveit2 setup assistant ```sudo apt install ros-humble-moveit```
7. Create a colcon workspace ```source /opt/ros/humble/setup.bash```
```
mkdir -p ~/ros2_ws/src
cd ~/ros2_ws/src
```
8. Clone the repo (TBD)
9.
```
cd ..
rosdep install -i --from-path src --rosdistro humble -y
```
# Converting ROS1 packages to ROS2
To be able to build using colcon, the packages have to be converted to support it.
# Using Moveit2
# Connecting Moveit2 to Isaac using a custom robot configuration
# Troubleshooting
## QT unable to find display in while trying to launch a GUI application (such as rviz2 or moveit setup assistant)
If you encounter an error related to QT5 not being able to find a display (this will be needed if you are going to use any application with a GUI in docker) run ```xhost local:root``` in the terminal and after run the docker image by using the command below and skip step 4. Recommended to add ```xhost local:root``` in your .bashrc.
```
docker run -it --rm \
--network host \
-e DISPLAY=$DISPLAY \
-v /tmp/.X11-unix/:/tmp/.X11-unix \
IMAGE ID
```
## URDF files require double
If you have for example velocity = 2.0, try velocity = 2.00001
| 2,327 | Markdown | 39.842105 | 333 | 0.743017 |
AndreiVoica/P10-MAP/docs/installation/MAPs_Extension/README.md | ## How to install MAPs Extension
1. Copy the *maps* folder into the Isaac Sim extensions folder. The default path in Ubuntu (Isaac Sim 2022.2.1) is:
```
/home/$USER$/.local/share/ov/pkg/isaac_sim-2022.2.1/exts/omni.isaac.examples/omni/isaac/examples/maps
```
2. In the *extensions.toml* file, add the following lines:
```
PATH:
/home/$USER$/.local/share/ov/pkg/isaac_sim-2022.2.1/exts/omni.isaac.examples/config/extensions.toml
TO ADD:
[[python.module]]
name = "omni.isaac.examples.maps"
```
3. Once the extension is installed, it is necessary to launch the ROS master in a terminal using `roscore` before opening Isaac Sim GUI.
4. Then you can run the MAPs extension from the Isaac Examples tab in the Isaac Sim GUI:
![MAPs Extension](/docs/imgs/MAPs_extension_menu.jpg)
### Known Issues
The first time you load the Isaac Sim GUI, the MAPs extension doesn't appear, to fix this just go to the *maps.py* file and save it again. This will load the files and allow you to launch it from the GUI.
```
/home/$USER$/.local/share/ov/pkg/isaac_sim-2022.2.1/exts/omni.isaac.examples/omni/isaac/examples/maps/maps.py
```
| 1,123 | Markdown | 34.124999 | 204 | 0.738201 |
Ngochuy2137/omni_isaac_examples/path_planning/path_planning_controller.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import os
from typing import Optional
import carb
import numpy as np
import omni.isaac.core.objects
import omni.isaac.motion_generation.interface_config_loader as interface_config_loader
from omni.isaac.core.articulations import Articulation
from omni.isaac.core.controllers.base_controller import BaseController
from omni.isaac.core.utils.types import ArticulationAction
from omni.isaac.motion_generation import ArticulationTrajectory
from omni.isaac.motion_generation.lula import RRT
from omni.isaac.motion_generation.lula.trajectory_generator import LulaCSpaceTrajectoryGenerator
from omni.isaac.motion_generation.path_planner_visualizer import PathPlannerVisualizer
from omni.isaac.motion_generation.path_planning_interface import PathPlanner
class PathPlannerController(BaseController):
def __init__(
self,
name: str,
path_planner_visualizer: PathPlannerVisualizer,
cspace_trajectory_generator: LulaCSpaceTrajectoryGenerator,
physics_dt=1 / 60.0,
rrt_interpolation_max_dist=0.01,
):
BaseController.__init__(self, name)
self._robot = path_planner_visualizer.get_robot_articulation()
self._cspace_trajectory_generator = cspace_trajectory_generator
self._path_planner = path_planner_visualizer.get_path_planner()
self._path_planner_visualizer = path_planner_visualizer
self._last_solution = None
self._action_sequence = None
self._physics_dt = physics_dt
self._rrt_interpolation_max_dist = rrt_interpolation_max_dist
def _convert_rrt_plan_to_trajectory(self, rrt_plan):
# This example uses the LulaCSpaceTrajectoryGenerator to convert RRT waypoints to a cspace trajectory.
# In general this is not theoretically guaranteed to work since the trajectory generator uses spline-based
# interpolation and RRT only guarantees that the cspace position of the robot can be linearly interpolated between
# waypoints. For this example, we verified experimentally that a dense interpolation of cspace waypoints with a maximum
# l2 norm of .01 between waypoints leads to a good enough approximation of the RRT path by the trajectory generator.
interpolated_path = self._path_planner_visualizer.interpolate_path(rrt_plan, self._rrt_interpolation_max_dist)
trajectory = self._cspace_trajectory_generator.compute_c_space_trajectory(interpolated_path)
art_trajectory = ArticulationTrajectory(self._robot, trajectory, self._physics_dt)
return art_trajectory.get_action_sequence()
def _make_new_plan(
self, target_end_effector_position: np.ndarray, target_end_effector_orientation: Optional[np.ndarray] = None
) -> None:
self._path_planner.set_end_effector_target(target_end_effector_position, target_end_effector_orientation)
self._path_planner.update_world()
path_planner_visualizer = PathPlannerVisualizer(self._robot, self._path_planner)
active_joints = path_planner_visualizer.get_active_joints_subset()
if self._last_solution is None:
start_pos = active_joints.get_joint_positions()
else:
start_pos = self._last_solution
self._rrt_plan = self._path_planner.compute_path(start_pos, np.array([]))
if self._rrt_plan is None or self._rrt_plan == []:
carb.log_warn("No plan could be generated to target pose: " + str(target_end_effector_position))
self._action_sequence = []
return
self._action_sequence = self._convert_rrt_plan_to_trajectory(self._rrt_plan)
self._last_solution = self._action_sequence[-1].joint_positions
def forward(
self, target_end_effector_position: np.ndarray, target_end_effector_orientation: Optional[np.ndarray] = None
) -> ArticulationAction:
if self._action_sequence is None:
# This will only happen the first time the forward function is used
self._make_new_plan(target_end_effector_position, target_end_effector_orientation)
if len(self._action_sequence) == 0:
# The plan is completed; return null action to remain in place
return ArticulationAction()
if len(self._action_sequence) == 1:
final_positions = self._action_sequence[0].joint_positions
# print("Steady State Error: ", np.linalg.norm(self._robot.get_joint_positions()[:7]-final_positions[:7]))
return ArticulationAction(
final_positions, np.zeros_like(final_positions), joint_indices=self._action_sequence[0].joint_indices
)
return self._action_sequence.pop(0)
def add_obstacle(self, obstacle: omni.isaac.core.objects, static: bool = False) -> None:
self._path_planner.add_obstacle(obstacle, static)
def remove_obstacle(self, obstacle: omni.isaac.core.objects) -> None:
self._path_planner.remove_obstacle(obstacle)
def reset(self) -> None:
# PathPlannerController will make one plan per reset
self._path_planner.reset()
self._action_sequence = None
self._last_solution = None
def get_path_planner(self) -> PathPlanner:
return self._path_planner
class FrankaRrtController(PathPlannerController):
def __init__(
self,
name,
robot_articulation: Articulation,
):
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_id = ext_manager.get_enabled_extension_id("omni.isaac.examples")
examples_extension_path = ext_manager.get_extension_path(ext_id)
# Load default RRT config files stored in the omni.isaac.motion_generation extension
rrt_config = interface_config_loader.load_supported_path_planner_config("Franka", "RRT")
# Replace the default robot description file with a copy that has inflated collision spheres
rrt_config["robot_description_path"] = os.path.join(
examples_extension_path,
"omni",
"isaac",
"examples",
"path_planning",
"path_planning_example_assets",
"franka_conservative_spheres_robot_description.yaml",
)
rrt = RRT(**rrt_config)
# Create a trajectory generator to convert RRT cspace waypoints to trajectories
cspace_trajectory_generator = LulaCSpaceTrajectoryGenerator(
rrt_config["robot_description_path"], rrt_config["urdf_path"]
)
# It is important that the Robot Description File includes optional Jerk and Acceleration limits so that the generated trajectory
# can be followed closely by the simulated robot Articulation
assert cspace_trajectory_generator._lula_kinematics.has_c_space_acceleration_limits()
assert cspace_trajectory_generator._lula_kinematics.has_c_space_jerk_limits()
visualizer = PathPlannerVisualizer(robot_articulation, rrt)
PathPlannerController.__init__(self, name, visualizer, cspace_trajectory_generator)
| 7,463 | Python | 45.943396 | 137 | 0.7028 |
Ngochuy2137/omni_isaac_examples/franka_nut_and_bolt/franka_nut_and_bolt_extension.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import os
from omni.isaac.examples.base_sample import BaseSampleExtension
from omni.isaac.examples.franka_nut_and_bolt import FrankaNutAndBolt
class FrankaNutAndBoltExtension(BaseSampleExtension):
def on_startup(self, ext_id: str):
super().on_startup(ext_id)
super().start_extension(
menu_name="Manipulation",
submenu_name="",
name="Franka Nut and Bolt",
title="Franka Nut and Bolt",
doc_link="https://docs.omniverse.nvidia.com/isaacsim/latest/advanced_tutorials/tutorial_advanced_sdf_nut_and_bolt.html#franka-nut-and-bolt-tutorial",
overview="Franka robot arms picking and screwing nuts onto bolts",
file_path=os.path.abspath(__file__),
sample=FrankaNutAndBolt(),
)
return
| 1,241 | Python | 40.399999 | 161 | 0.713135 |
Ngochuy2137/omni_isaac_examples/user_examples/__init__.py | # Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# NOTE: Import here your extension examples to be propagated to ISAAC SIM Extensions startup
from omni.isaac.examples.user_examples.hello_world import HelloWorld
from omni.isaac.examples.user_examples.hello_world_extension import HelloWorldExtension
| 684 | Python | 44.666664 | 92 | 0.821637 |
Ngochuy2137/omni_isaac_examples/user_examples/hello_world_extension.py | import os
from omni.isaac.examples.base_sample import BaseSampleExtension
from omni.isaac.examples.user_examples import HelloWorld
class HelloWorldExtension(BaseSampleExtension):
def on_startup(self, ext_id: str):
super().on_startup(ext_id)
super().start_extension(
menu_name="",
submenu_name="",
name="My Awesome Example",
title="My Awesome Example",
doc_link="https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/tutorial_core_hello_world.html",
overview="This Example introduces the user on how to do cool stuff with Isaac Sim through scripting in asynchronous mode.",
file_path=os.path.abspath(__file__),
sample=HelloWorld(),
)
return | 779 | Python | 40.052629 | 135 | 0.650834 |
Ngochuy2137/omni_isaac_examples/robo_factory/robo_factory.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import numpy as np
from omni.isaac.examples.base_sample import BaseSample
from omni.isaac.franka.controllers.stacking_controller import StackingController
from omni.isaac.franka.tasks import Stacking
class RoboFactory(BaseSample):
def __init__(self) -> None:
super().__init__()
self._tasks = []
self._controllers = []
self._articulation_controllers = []
self._robots = []
self._num_of_tasks = 4
return
def setup_scene(self):
world = self.get_world()
for i in range(self._num_of_tasks):
task = Stacking(name="task" + str(i), offset=np.array([0, (i * 2) - 3, 0]))
world.add_task(task)
return
async def setup_post_load(self):
for i in range(self._num_of_tasks):
self._tasks.append(self._world.get_task(name="task" + str(i)))
for i in range(self._num_of_tasks):
self._robots.append(self._world.scene.get_object(self._tasks[i].get_params()["robot_name"]["value"]))
self._controllers.append(
StackingController(
name="stacking_controller",
gripper=self._robots[i].gripper,
robot_articulation=self._robots[i],
picking_order_cube_names=self._tasks[i].get_cube_names(),
robot_observation_name=self._robots[i].name,
)
)
for i in range(self._num_of_tasks):
self._articulation_controllers.append(self._robots[i].get_articulation_controller())
return
def _on_start_factory_physics_step(self, step_size):
observations = self._world.get_observations()
for i in range(self._num_of_tasks):
actions = self._controllers[i].forward(observations=observations, end_effector_offset=np.array([0, 0, 0]))
self._articulation_controllers[i].apply_action(actions)
return
async def _on_start_stacking_event_async(self):
world = self.get_world()
world.add_physics_callback("sim_step", self._on_start_factory_physics_step)
await world.play_async()
return
async def setup_pre_reset(self):
world = self.get_world()
if world.physics_callback_exists("sim_step"):
world.remove_physics_callback("sim_step")
for i in range(len(self._controllers)):
self._controllers[i].reset()
return
def world_cleanup(self):
self._tasks = []
self._controllers = []
self._articulation_controllers = []
self._robots = []
return
| 3,052 | Python | 38.141025 | 118 | 0.616972 |
Ngochuy2137/omni_isaac_examples/robo_party/robo_party.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import carb
import numpy as np
from omni.isaac.core.prims.xform_prim import XFormPrim
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.dofbot.controllers import PickPlaceController
from omni.isaac.dofbot.tasks import PickPlace
from omni.isaac.examples.base_sample import BaseSample
from omni.isaac.franka.controllers.stacking_controller import StackingController as FrankaStackingController
from omni.isaac.franka.tasks import Stacking as FrankaStacking
from omni.isaac.universal_robots.controllers import StackingController as UR10StackingController
from omni.isaac.universal_robots.tasks import Stacking as UR10Stacking
from omni.isaac.wheeled_robots.controllers.differential_controller import DifferentialController
from omni.isaac.wheeled_robots.controllers.holonomic_controller import HolonomicController
from omni.isaac.wheeled_robots.robots import WheeledRobot
from omni.isaac.wheeled_robots.robots.holonomic_robot_usd_setup import HolonomicRobotUsdSetup
class RoboParty(BaseSample):
def __init__(self) -> None:
super().__init__()
self._tasks = []
self._controllers = []
self._articulation_controllers = []
self._pick_place_task_params = None
self._robots = []
return
def setup_scene(self):
world = self.get_world()
self._tasks.append(FrankaStacking(name="task_0", offset=np.array([0, -2, 0])))
world.add_task(self._tasks[-1])
self._tasks.append(UR10Stacking(name="task_1", offset=np.array([0.5, 0.5, 0])))
world.add_task(self._tasks[-1])
self._tasks.append(PickPlace(name="task_2", offset=np.array([0, -1, 0])))
world.add_task(self._tasks[-1])
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
return
kaya_asset_path = assets_root_path + "/Isaac/Robots/Kaya/kaya.usd"
world.scene.add(
WheeledRobot(
prim_path="/World/Kaya",
name="my_kaya",
wheel_dof_names=["axle_0_joint", "axle_1_joint", "axle_2_joint"],
create_robot=True,
usd_path=kaya_asset_path,
position=np.array([-1, 0, 0]),
)
)
jetbot_asset_path = assets_root_path + "/Isaac/Robots/Jetbot/jetbot.usd"
world.scene.add(
WheeledRobot(
prim_path="/World/Jetbot",
name="my_jetbot",
wheel_dof_names=["left_wheel_joint", "right_wheel_joint"],
create_robot=True,
usd_path=jetbot_asset_path,
position=np.array([-1.5, -1.5, 0]),
)
)
return
async def setup_post_load(self):
self._tasks = [
self._world.get_task(name="task_0"),
self._world.get_task(name="task_1"),
self._world.get_task(name="task_2"),
]
for i in range(3):
self._robots.append(self._world.scene.get_object(self._tasks[i].get_params()["robot_name"]["value"]))
self._robots.append(self._world.scene.get_object("my_kaya"))
self._robots.append(self._world.scene.get_object("my_jetbot"))
self._pick_place_task_params = self._tasks[2].get_params()
self._controllers.append(
FrankaStackingController(
name="stacking_controller",
gripper=self._robots[0].gripper,
robot_articulation=self._robots[0],
picking_order_cube_names=self._tasks[0].get_cube_names(),
robot_observation_name=self._robots[0].name,
)
)
self._controllers.append(
UR10StackingController(
name="pick_place_controller",
gripper=self._robots[1].gripper,
robot_articulation=self._robots[1],
picking_order_cube_names=self._tasks[1].get_cube_names(),
robot_observation_name=self._robots[1].name,
)
)
self._controllers.append(
PickPlaceController(
name="pick_place_controller", gripper=self._robots[2].gripper, robot_articulation=self._robots[2]
)
)
kaya_setup = HolonomicRobotUsdSetup(
robot_prim_path="/World/Kaya", com_prim_path="/World/Kaya/base_link/control_offset"
)
(
wheel_radius,
wheel_positions,
wheel_orientations,
mecanum_angles,
wheel_axis,
up_axis,
) = kaya_setup.get_holonomic_controller_params()
self._controllers.append(
HolonomicController(
name="holonomic_controller",
wheel_radius=wheel_radius,
wheel_positions=wheel_positions,
wheel_orientations=wheel_orientations,
mecanum_angles=mecanum_angles,
wheel_axis=wheel_axis,
up_axis=up_axis,
)
)
self._controllers.append(DifferentialController(name="simple_control", wheel_radius=0.03, wheel_base=0.1125))
for i in range(5):
self._articulation_controllers.append(self._robots[i].get_articulation_controller())
return
def _on_start_party_physics_step(self, step_size):
observations = self._world.get_observations()
actions = self._controllers[0].forward(observations=observations, end_effector_offset=np.array([0, 0, 0]))
self._articulation_controllers[0].apply_action(actions)
actions = self._controllers[1].forward(observations=observations, end_effector_offset=np.array([0, 0, 0.02]))
self._articulation_controllers[1].apply_action(actions)
actions = self._controllers[2].forward(
picking_position=observations[self._pick_place_task_params["cube_name"]["value"]]["position"],
placing_position=observations[self._pick_place_task_params["cube_name"]["value"]]["target_position"],
current_joint_positions=observations[self._pick_place_task_params["robot_name"]["value"]][
"joint_positions"
],
end_effector_offset=np.array([0, -0.06, 0]),
)
self._articulation_controllers[2].apply_action(actions)
if self._world.current_time_step_index >= 0 and self._world.current_time_step_index < 500:
self._robots[3].apply_wheel_actions(self._controllers[3].forward(command=[0.2, 0.0, 0.0]))
self._robots[4].apply_wheel_actions(self._controllers[4].forward(command=[0.1, 0]))
elif self._world.current_time_step_index >= 500 and self._world.current_time_step_index < 1000:
self._robots[3].apply_wheel_actions(self._controllers[3].forward(command=[0, 0.2, 0.0]))
self._robots[4].apply_wheel_actions(self._controllers[4].forward(command=[0.0, np.pi / 10]))
elif self._world.current_time_step_index >= 1000 and self._world.current_time_step_index < 1500:
self._robots[3].apply_wheel_actions(self._controllers[3].forward(command=[0, 0.0, 0.06]))
self._robots[4].apply_wheel_actions(self._controllers[4].forward(command=[0.1, 0]))
return
async def _on_start_party_event_async(self):
world = self.get_world()
world.add_physics_callback("sim_step", self._on_start_party_physics_step)
await world.play_async()
return
async def setup_pre_reset(self):
world = self.get_world()
if world.physics_callback_exists("sim_step"):
world.remove_physics_callback("sim_step")
for i in range(len(self._controllers)):
self._controllers[i].reset()
return
def world_cleanup(self):
self._tasks = []
self._controllers = []
self._articulation_controllers = []
self._pick_place_task_params = None
self._robots = []
return
| 8,421 | Python | 44.27957 | 117 | 0.620116 |
Ngochuy2137/omni_isaac_examples/ur10_palletizing/ur10_palletizing.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import random
import numpy as np
import omni
import omni.isaac.cortex.math_util as math_util
from omni.isaac.core.objects.capsule import VisualCapsule
from omni.isaac.core.objects.sphere import VisualSphere
from omni.isaac.core.prims.xform_prim import XFormPrim
from omni.isaac.core.tasks.base_task import BaseTask
from omni.isaac.core.utils.rotations import euler_angles_to_quat
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.cortex.cortex_rigid_prim import CortexRigidPrim
from omni.isaac.cortex.cortex_utils import get_assets_root_path
from omni.isaac.cortex.robot import CortexUr10
from omni.isaac.cortex.sample_behaviors.ur10 import bin_stacking_behavior as behavior
from omni.isaac.examples.cortex.cortex_base import CortexBase
class Ur10Assets:
def __init__(self):
self.assets_root_path = get_assets_root_path()
self.ur10_table_usd = (
self.assets_root_path + "/Isaac/Samples/Leonardo/Stage/ur10_bin_stacking_short_suction.usd"
)
self.small_klt_usd = self.assets_root_path + "/Isaac/Props/KLT_Bin/small_KLT.usd"
self.background_usd = self.assets_root_path + "/Isaac/Environments/Simple_Warehouse/warehouse.usd"
self.rubiks_cube_usd = self.assets_root_path + "/Isaac/Props/Rubiks_Cube/rubiks_cube.usd"
def random_bin_spawn_transform():
x = random.uniform(-0.15, 0.15)
y = 1.5
z = -0.15
position = np.array([x, y, z])
z = random.random() * 0.02 - 0.01
w = random.random() * 0.02 - 0.01
norm = np.sqrt(z**2 + w**2)
quat = math_util.Quaternion([w / norm, 0, 0, z / norm])
if random.random() > 0.5:
print("<flip>")
# flip the bin so it's upside down
quat = quat * math_util.Quaternion([0, 0, 1, 0])
else:
print("<no flip>")
return position, quat.vals
class BinStackingTask(BaseTask):
def __init__(self, env_path, assets) -> None:
super().__init__("bin_stacking")
self.assets = assets
self.env_path = env_path
self.bins = []
self.stashed_bins = []
self.on_conveyor = None
def _spawn_bin(self, rigid_bin):
x, q = random_bin_spawn_transform()
rigid_bin.set_world_pose(position=x, orientation=q)
rigid_bin.set_linear_velocity(np.array([0, -0.30, 0]))
rigid_bin.set_visibility(True)
def post_reset(self) -> None:
if len(self.bins) > 0:
for rigid_bin in self.bins:
self.scene.remove_object(rigid_bin.name)
self.bins.clear()
self.on_conveyor = None
def pre_step(self, time_step_index, simulation_time) -> None:
"""Spawn a new randomly oriented bin if the previous bin has been placed."""
spawn_new = False
if self.on_conveyor is None:
spawn_new = True
else:
(x, y, z), _ = self.on_conveyor.get_world_pose()
is_on_conveyor = y > 0.0 and -0.4 < x and x < 0.4
if not is_on_conveyor:
spawn_new = True
if spawn_new:
name = "bin_{}".format(len(self.bins))
prim_path = self.env_path + "/bins/{}".format(name)
add_reference_to_stage(usd_path=self.assets.small_klt_usd, prim_path=prim_path)
self.on_conveyor = self.scene.add(CortexRigidPrim(name=name, prim_path=prim_path))
self._spawn_bin(self.on_conveyor)
self.bins.append(self.on_conveyor)
def world_cleanup(self):
self.bins = []
self.stashed_bins = []
self.on_conveyor = None
return
class BinStacking(CortexBase):
def __init__(self, monitor_fn=None):
super().__init__()
self._monitor_fn = monitor_fn
self.robot = None
def setup_scene(self):
world = self.get_world()
env_path = "/World/Ur10Table"
ur10_assets = Ur10Assets()
add_reference_to_stage(usd_path=ur10_assets.ur10_table_usd, prim_path=env_path)
add_reference_to_stage(usd_path=ur10_assets.background_usd, prim_path="/World/Background")
background_prim = XFormPrim(
"/World/Background", position=[10.00, 2.00, -1.18180], orientation=[0.7071, 0, 0, 0.7071]
)
self.robot = world.add_robot(CortexUr10(name="robot", prim_path="{}/ur10".format(env_path)))
obs = world.scene.add(
VisualSphere(
"/World/Ur10Table/Obstacles/FlipStationSphere",
name="flip_station_sphere",
position=np.array([0.73, 0.76, -0.13]),
radius=0.2,
visible=False,
)
)
self.robot.register_obstacle(obs)
obs = world.scene.add(
VisualSphere(
"/World/Ur10Table/Obstacles/NavigationDome",
name="navigation_dome_obs",
position=[-0.031, -0.018, -1.086],
radius=1.1,
visible=False,
)
)
self.robot.register_obstacle(obs)
az = np.array([1.0, 0.0, -0.3])
ax = np.array([0.0, 1.0, 0.0])
ay = np.cross(az, ax)
R = math_util.pack_R(ax, ay, az)
quat = math_util.matrix_to_quat(R)
obs = world.scene.add(
VisualCapsule(
"/World/Ur10Table/Obstacles/NavigationBarrier",
name="navigation_barrier_obs",
position=[0.471, 0.276, -0.463 - 0.1],
orientation=quat,
radius=0.5,
height=0.9,
visible=False,
)
)
self.robot.register_obstacle(obs)
obs = world.scene.add(
VisualCapsule(
"/World/Ur10Table/Obstacles/NavigationFlipStation",
name="navigation_flip_station_obs",
position=np.array([0.766, 0.755, -0.5]),
radius=0.5,
height=0.5,
visible=False,
)
)
self.robot.register_obstacle(obs)
async def setup_post_load(self):
world = self.get_world()
env_path = "/World/Ur10Table"
ur10_assets = Ur10Assets()
if not self.robot:
self.robot = world._robots["robot"]
world._current_tasks.clear()
world._behaviors.clear()
world._logical_state_monitors.clear()
self.task = BinStackingTask(env_path, ur10_assets)
print(world.scene)
self.task.set_up_scene(world.scene)
world.add_task(self.task)
self.decider_network = behavior.make_decider_network(self.robot, self._on_monitor_update)
world.add_decider_network(self.decider_network)
return
def _on_monitor_update(self, diagnostics):
decision_stack = ""
if self.decider_network._decider_state.stack:
decision_stack = "\n".join(
[
"{0}{1}".format(" " * i, element)
for i, element in enumerate(str(i) for i in self.decider_network._decider_state.stack)
]
)
if self._monitor_fn:
self._monitor_fn(diagnostics, decision_stack)
def _on_physics_step(self, step_size):
world = self.get_world()
world.step(False, False)
return
async def on_event_async(self):
world = self.get_world()
await omni.kit.app.get_app().next_update_async()
world.reset_cortex()
world.add_physics_callback("sim_step", self._on_physics_step)
await world.play_async()
return
async def setup_pre_reset(self):
world = self.get_world()
if world.physics_callback_exists("sim_step"):
world.remove_physics_callback("sim_step")
return
def world_cleanup(self):
return
| 8,211 | Python | 34.860262 | 106 | 0.590427 |
Ngochuy2137/omni_isaac_examples/hello_world/hello_world_extension.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import os
from omni.isaac.examples.base_sample import BaseSampleExtension
from omni.isaac.examples.hello_world import HelloWorld
class HelloWorldExtension(BaseSampleExtension):
def on_startup(self, ext_id: str):
super().on_startup(ext_id)
super().start_extension(
menu_name="",
submenu_name="",
name="Hello World",
title="Hello World Example",
doc_link="https://docs.omniverse.nvidia.com/isaacsim/latest/core_api_tutorials/tutorial_core_hello_world.html",
overview="This Example introduces the user on how to do cool stuff with Isaac Sim through scripting in asynchronous mode.",
file_path=os.path.abspath(__file__),
sample=HelloWorld(),
)
return
| 1,214 | Python | 39.499999 | 135 | 0.706755 |
Ngochuy2137/omni_isaac_examples/cortex/cortex_base.py | # Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import gc
from abc import abstractmethod
from omni.isaac.core import World
from omni.isaac.core.scenes.scene import Scene
from omni.isaac.core.tasks.base_task import BaseTask
from omni.isaac.core.utils.stage import create_new_stage_async, update_stage_async
from omni.isaac.cortex.cortex_world import CortexWorld
from omni.isaac.examples import base_sample
class CortexBase(base_sample.BaseSample):
async def load_world_async(self):
"""
Function called when clicking load buttton.
The difference between this class and Base Sample is that we initialize a CortexWorld specialization.
"""
if CortexWorld.instance() is None:
await create_new_stage_async()
self._world = CortexWorld(**self._world_settings)
await self._world.initialize_simulation_context_async()
self.setup_scene()
else:
self._world = CortexWorld.instance()
self._current_tasks = self._world.get_current_tasks()
await self._world.reset_async()
await self._world.pause_async()
await self.setup_post_load()
if len(self._current_tasks) > 0:
self._world.add_physics_callback("tasks_step", self._world.step_async)
return
| 1,685 | Python | 41.149999 | 109 | 0.71454 |
Ngochuy2137/omni_isaac_examples/franka_cortex/franka_cortex.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import carb
import numpy as np
import omni
from omni.isaac.core.objects import DynamicCuboid, VisualCuboid
from omni.isaac.cortex.cortex_utils import load_behavior_module
from omni.isaac.cortex.cortex_world import Behavior, CortexWorld, LogicalStateMonitor
from omni.isaac.cortex.dfb import DfDiagnosticsMonitor
from omni.isaac.cortex.robot import CortexFranka, add_franka_to_stage
from omni.isaac.cortex.tools import SteadyRate
from omni.isaac.examples.cortex.cortex_base import CortexBase
class CubeSpec:
def __init__(self, name, color):
self.name = name
self.color = np.array(color)
class ContextStateMonitor(DfDiagnosticsMonitor):
"""
State monitor to read the context and pass it to the UI.
For these behaviors, the context has a `diagnostic_message` that contains the text to be displayed, and each
behavior implements its own monitor to update that.
"""
def __init__(self, print_dt, diagnostic_fn=None):
super().__init__(print_dt=print_dt)
self.diagnostic_fn = diagnostic_fn
def print_diagnostics(self, context):
if self.diagnostic_fn:
self.diagnostic_fn(context)
class FrankaCortex(CortexBase):
def __init__(self, monitor_fn=None):
super().__init__()
self._monitor_fn = monitor_fn
self.behavior = None
self.robot = None
self.context_monitor = ContextStateMonitor(print_dt=0.25, diagnostic_fn=self._on_monitor_update)
def setup_scene(self):
world = self.get_world()
self.robot = world.add_robot(add_franka_to_stage(name="franka", prim_path="/World/Franka"))
obs_specs = [
CubeSpec("RedCube", [0.7, 0.0, 0.0]),
CubeSpec("BlueCube", [0.0, 0.0, 0.7]),
CubeSpec("YellowCube", [0.7, 0.7, 0.0]),
CubeSpec("GreenCube", [0.0, 0.7, 0.0]),
]
width = 0.0515
for i, (x, spec) in enumerate(zip(np.linspace(0.3, 0.7, len(obs_specs)), obs_specs)):
obj = world.scene.add(
DynamicCuboid(
prim_path="/World/Obs/{}".format(spec.name),
name=spec.name,
size=width,
color=spec.color,
position=np.array([x, -0.4, width / 2]),
)
)
self.robot.register_obstacle(obj)
world.scene.add_default_ground_plane()
async def load_behavior(self, behavior):
world = self.get_world()
self.behavior = behavior
self.decider_network = load_behavior_module(self.behavior).make_decider_network(self.robot)
self.decider_network.context.add_monitor(self.context_monitor.monitor)
world.add_decider_network(self.decider_network)
def clear_behavior(self):
world = self.get_world()
world._logical_state_monitors.clear()
world._behaviors.clear()
async def setup_post_load(self, soft=False):
world = self.get_world()
prim_path = "/World/Franka"
if not self.robot:
self.robot = world._robots["franka"]
self.decider_network = load_behavior_module(self.behavior).make_decider_network(self.robot)
self.decider_network.context.add_monitor(self.context_monitor.monitor)
world.add_decider_network(self.decider_network)
await omni.kit.app.get_app().next_update_async()
def _on_monitor_update(self, context):
diagnostic = ""
decision_stack = ""
if hasattr(context, "diagnostics_message"):
diagnostic = context.diagnostics_message
if self.decider_network._decider_state.stack:
decision_stack = "\n".join(
[
"{0}{1}".format(" " * i, element)
for i, element in enumerate(str(i) for i in self.decider_network._decider_state.stack)
]
)
if self._monitor_fn:
self._monitor_fn(diagnostic, decision_stack)
def _on_physics_step(self, step_size):
world = self.get_world()
world.step(False, False)
async def on_event_async(self):
world = self.get_world()
await omni.kit.app.get_app().next_update_async()
world.reset_cortex()
world.add_physics_callback("sim_step", self._on_physics_step)
await world.play_async()
async def setup_pre_reset(self):
world = self.get_world()
if world.physics_callback_exists("sim_step"):
world.remove_physics_callback("sim_step")
def world_cleanup(self):
pass
| 4,998 | Python | 36.029629 | 112 | 0.631253 |
Ngochuy2137/omni_isaac_examples/franka_cortex/franka_cortex_extension.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# from omni.isaac.examples.ur10_palletizing.ur10_palletizing import BinStacking
import asyncio
import os
import omni
import omni.ui as ui
from omni.isaac.cortex.cortex_world import CortexWorld
from omni.isaac.examples.base_sample import BaseSampleExtension
from omni.isaac.examples.franka_cortex.franka_cortex import FrankaCortex
from omni.isaac.ui.ui_utils import btn_builder, cb_builder, dropdown_builder, get_style, str_builder
class FrankaCortexExtension(BaseSampleExtension):
def on_startup(self, ext_id: str):
super().on_startup(ext_id)
ext_manager = omni.kit.app.get_app().get_extension_manager()
sample_behaviors_id = ext_manager.get_enabled_extension_id("omni.isaac.cortex.sample_behaviors")
behavior_path = (
omni.kit.app.get_app().get_extension_manager().get_extension_path(sample_behaviors_id)
+ "/omni/isaac/cortex/sample_behaviors/franka"
)
self.behavior_map = {
"Block Stacking": f"{behavior_path}/block_stacking_behavior.py",
"Simple State Machine": f"{behavior_path}/simple/simple_state_machine.py",
"Simple Decider Network": f"{behavior_path}/simple/simple_decider_network.py",
"Peck State Machine": f"{behavior_path}/peck_state_machine.py",
"Peck Decider Network": f"{behavior_path}/peck_decider_network.py",
"Peck Game": f"{behavior_path}/peck_game.py",
}
self.selected_behavior = "Block Stacking"
super().start_extension(
menu_name="Cortex",
submenu_name="",
name="Franka Cortex Examples",
title="Franka Cortex Examples",
doc_link="https://docs.omniverse.nvidia.com/isaacsim/latest/cortex_tutorials/tutorial_cortex_4_franka_block_stacking.html#isaac-sim-app-tutorial-cortex-4-franka-block-stacking",
overview="This Example shows how to Use Cortex for multiple behaviors robot and Cortex behaviors in Isaac Sim.\n\nPress the 'Open in IDE' button to view the source code.",
sample=FrankaCortex(self.on_diagnostics),
file_path=os.path.abspath(__file__),
number_of_extra_frames=2,
)
self.task_ui_elements = {}
frame = self.get_frame(index=0)
self.build_task_controls_ui(frame)
self.loaded = False
return
def _on_load_world(self):
self._sample.behavior = self.get_behavior()
self.loaded = True
super()._on_load_world()
def on_diagnostics(self, diagnostic, decision_stack):
if diagnostic:
self.diagostic_model.set_value(diagnostic)
self.state_model.set_value(decision_stack)
self.diagnostics_panel.visible = bool(diagnostic)
def get_world(self):
return CortexWorld.instance()
def get_behavior(self):
return self.behavior_map[self.selected_behavior]
def _on_start_button_event(self):
asyncio.ensure_future(self.sample.on_event_async())
self.task_ui_elements["Start"].enabled = False
return
def post_reset_button_event(self):
self.task_ui_elements["Start"].enabled = True
return
def post_load_button_event(self):
self.task_ui_elements["Start"].enabled = True
return
def post_clear_button_event(self):
self.task_ui_elements["Start"].enabled = False
return
def __on_selected_behavior_changed(self, selected_index):
self.selected_behavior = selected_index
if self.loaded:
asyncio.ensure_future(self._sample.load_behavior(self.get_behavior()))
self.on_diagnostics("", "")
def build_task_controls_ui(self, frame):
with self._controls_frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
self.task_ui_elements["Selected Behavior"] = dropdown_builder(
"Selected Behavior",
items=[
"Block Stacking",
"Simple State Machine",
"Simple Decider Network",
"Peck State Machine",
"Peck Decider Network",
"Peck Game",
],
on_clicked_fn=self.__on_selected_behavior_changed,
)
dict = {
"label": "Load World",
"type": "button",
"text": "Load",
"tooltip": "Load World and Task",
"on_clicked_fn": self._on_load_world,
}
self._buttons["Load World"] = btn_builder(**dict)
self._buttons["Load World"].enabled = True
dict = {
"label": "Reset",
"type": "button",
"text": "Reset",
"tooltip": "Reset robot and environment",
"on_clicked_fn": self._on_reset,
}
self._buttons["Reset"] = btn_builder(**dict)
self._buttons["Reset"].enabled = False
with frame:
with ui.VStack(spacing=5):
# Update the Frame Title
frame.title = "Task Controls"
frame.visible = True
dict = {
"label": "Start",
"type": "button",
"text": "Start",
"tooltip": "Start",
"on_clicked_fn": self._on_start_button_event,
}
self.task_ui_elements["Start"] = btn_builder(**dict)
self.task_ui_elements["Start"].enabled = False
with self.get_frame(index=1):
self.get_frame(index=1).title = "Diagnostics"
self.get_frame(index=1).visible = True
self._diagnostics = ui.VStack(spacing=5)
# self._diagnostics.enabled = False
with self._diagnostics:
ui.Label("Decision Stack", height=20)
self.state_model = ui.SimpleStringModel()
ui.StringField(self.state_model, multiline=True, height=120)
self.diagnostics_panel = ui.VStack(spacing=5)
with self.diagnostics_panel:
ui.Label("Diagnostic message", height=20)
self.diagostic_model = ui.SimpleStringModel()
ui.StringField(self.diagostic_model, multiline=True, height=200)
| 6,913 | Python | 42.2125 | 189 | 0.581947 |
JJGIV2010/goBilda-extension/README.md | ## NVIDIA Omniverse GoBilda Extension
The NVIDIA Omniverse GoBilda Extension is a work-in-progress open source extension that aims to provide universal scene description components for the GoBilda platform. The extension is designed to assist students, engineers, and educators in building prototypes of robots by offering a physics environment and useful metrics such as cost.
![Preview_Image](exts/goBilda/data/preview.png)
### Features
- Universal scene description components for the GoBilda platform
- Import GoBilda parts into your Omniverse scene
- Get information about the assembly and useful metrics such as cost
- Simulate the assembly using the physics environment in NVIDIA Omniverse
- Save time and money by ensuring part compatibility through simulation
### Usage
1. Enable the extension by following the instructions in the README.
2. Use the file menu bar to navigate to "goBilda > *part category* " and select a part to add it to the scene.
3. After you have setup your assembly, use the file menu bar to navigate to "goBilda > tools > stage info window" to view useful information about the assembly such as total cost or weight.
4. Explore the GoBilda menu to access additional information about the scene such as enabling viewport widgets.
5. Customize individual parts by adding physics, materials, and attributes that might be useful for your particular project.
6. Explore the python classes included in the repo that are currently used to author variant and variant sets for the GoBilda parts.
8. Update or import a new STEP file for a part by using the import STEP file option in the GoBilda menu.
### Requirements
- NVIDIA Omniverse
### Installation
The installation steps will be provided in the completed README once the extension is finished.
**Note:** This extension is currently a work-in-progress, and additional features and documentation will be added in future updates.
---
**Note to developers:** Please check out the repo to see source code, examples or if you would like to contribute.
| 2,038 | Markdown | 52.657893 | 339 | 0.795878 |
JJGIV2010/goBilda-extension/exts/goBilda/goBilda/extension.py | import omni.ext
import omni.ui as ui
import asyncio
import carb.input
import omni.kit.menu.utils
import omni.kit.undo
import omni.kit.commands
import omni.usd
from omni.kit.menu.utils import MenuItemDescription
from pxr import Sdf
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class GoBildaExtension(omni.ext.IExt):
def __init__(self):
self.extensionID = None
def on_startup(self, ext_id):
print("[goBilda] GoBilda startup")
# Register a menu item under the "Extensions" menu
self.extensionID = ext_id
# self.aboutWindow()
self.init_menu(ext_id)
self.stage = omni.usd.get_context().get_stage()
_menu_list = None
_sub_menu_list = None
# Menu name.
_menu_name = "goBilda"
def comingSoon(self):
self._window = ui.Window("goBilda Extension", width=500, textwrap=True)
with self._window.frame:
with ui.VStack():
####### Image : Omniverse logo ########
with ui.HStack():
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_path = ext_manager.get_extension_path(self.extensionID)
img = ui.Image(alignment=ui.Alignment.CENTER)
img.source_url = ext_path + "/data/goBildaLogo.png"
ui.Label("""
Coming soon!
""", textwrap=True)
def aboutWindow(self):
self._window = ui.Window("goBilda Extension", width=500, textwrap=True)
with self._window.frame:
with ui.VStack():
####### Image : Omniverse logo ########
with ui.HStack():
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_path = ext_manager.get_extension_path(self.extensionID)
img = ui.Image(alignment=ui.Alignment.CENTER)
img.source_url = ext_path + "/data/goBildaLogo.png"
ui.Label("""
Welcome to the unofficial extension for goBilda parts! If you're unfamiliar, goBilda is an open-source robotics prototyping platform perfect for designing robots, machines, and much more.
With our extension, you can seamlessly integrate goBilda parts into your Omniverse scene. This tool provides valuable insight into your assembly, aids in simulating your design, and even ensures part compatibility via simulation. The result? A smoother design process that saves you both time and money.
Check out the official goBilda website for more information: https://www.gobilda.com/
Ready to start building? Follow these simple steps:
Congratulations, you've already enabled the extension! If necessary, resetting the extension is as easy as disabling and enabling it again.
To select a part, navigate through the file menu bar: goBilda > parts > select a part.
Your chosen part will be incorporated into the scene.
For additional details about the scene, feel free to explore the goBilda menu.
Customize each part to fit your project needs by adding physics, materials, and attributes as needed.
To update or import a new step file for a fresh part, simply head over to the goBilda menu and choose the import step file option.
Thanks for checking out the extension. Now, let the fun begin. Happy building!
""", textwrap=True)
def stageInfoWindow(self):
self.stageInfoWindow = ui.Window("Stage Info", width=500, textwrap=True)
with self.stageInfoWindow.frame:
ui.Label("Stage Info")
def viewportOverlay(self):
print("viewport overlay place holder")
def addToStage(self, component):
"""
This function adds a component to the stage
:param component:
:return:
"""
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_path = ext_manager.get_extension_path(self.extensionID)
path = f"{ext_path}/data/models/{component}/_"
primSourceLocation = path + component + "_allVariants.usda"
omni.kit.commands.execute("CreatePayload",
path_to=Sdf.Path(f"/World/Components/_{component}"),
# Prim path for where to create the reference
asset_path=primSourceLocation,
# The file path to reference. Relative paths are accepted too.
usd_context=omni.usd.get_context()
)
def getCost(self):
"""
This function gets the cost of the parts in the scene
:return:
"""
totalCost = 0
print("analyzing components in scene for cost")
# get all the components in the scene
components = self.stage.GetPrimAtPath("/World/Components").GetChildren()
# get the cost of each component
for component in components:
cost = component.GetAttribute("cost").Get()
totalCost = totalCost + cost
print("total cost of components in scene: " + str(totalCost))
return totalCost
def getWeight(self):
"""
This function gets the total weight of the parts in the scene
:return:
"""
totalWeight = 0
print("analyzing components in scene for weight")
# get all the components in the scene
components = self.stage.GetPrimAtPath("/World/Components").GetChildren()
# get the weight of each component
for component in components:
weight = component.GetAttribute("weight").Get()
totalWeight = totalWeight + weight
print("total weight of components in scene: " + str(totalWeight))
return totalWeight
def init_menu(self, ext_id):
async def _rebuild_menus():
await omni.kit.app.get_app().next_update_async()
omni.kit.menu.utils.rebuild_menus()
self.channelSubMenu = [
MenuItemDescription(name="UChannel", onclick_fn=lambda: self.addToStage("1120")),
MenuItemDescription(name="LowUChannel", onclick_fn=lambda: self.addToStage("1121")),
]
self.goRailSubMenu = [
MenuItemDescription(name="GoRailClosed", onclick_fn=lambda: self.addToStage("1109")),
MenuItemDescription(name="GoRailOpen", onclick_fn=lambda: self.addToStage("1118")),
]
self.beamsSubMenu = [
MenuItemDescription(name="U Beams", onclick_fn=lambda: self.addToStage("1101")),
MenuItemDescription(name="L Beams", onclick_fn=lambda: self.addToStage("1103")),
MenuItemDescription(name="Flat Beams", onclick_fn=lambda: self.addToStage("1102")),
MenuItemDescription(name="Square Beams", onclick_fn=lambda: self.addToStage("1106")),
MenuItemDescription(name="Shaft Beams", onclick_fn=lambda: self.addToStage("1119")),
]
self.shaftsAndTubingSubMenu = [
MenuItemDescription(name="Steel Round", onclick_fn=lambda: self.addToStage("2100")),
MenuItemDescription(name="Steel D", onclick_fn=lambda: self.addToStage("2101")),
MenuItemDescription(name="Steel Rex", onclick_fn=lambda: self.addToStage("2102")),
MenuItemDescription(name="Aluminum Rex", onclick_fn=lambda: self.addToStage("2104")),
MenuItemDescription(name="Hub Shafts", onclick_fn=lambda: self.addToStage("2110")),
MenuItemDescription(name="Aluminum Tubing", onclick_fn=lambda: self.addToStage("4100")),
MenuItemDescription(name="goTube", onclick_fn=lambda: self.addToStage("4103")),
MenuItemDescription(name="goRail", sub_menu=self.goRailSubMenu)
]
self.mountsSubMenu = [
MenuItemDescription(name="Block Mounts", onclick_fn=lambda: self.addToStage("1203")),
MenuItemDescription(name="Dual Block Mounts", onclick_fn=lambda: self.addToStage("1205")),
MenuItemDescription(name="One Side Two Post Pattern", onclick_fn=lambda: self.addToStage("1400")),
MenuItemDescription(name="Two Side Two Post Pattern", onclick_fn=lambda: self.addToStage("1401")),
MenuItemDescription(name="Gusseted Angle Pattern", onclick_fn=lambda: self.addToStage("1204"))
]
self.structureSubMenu = [
MenuItemDescription(name="Channel", sub_menu=self.channelSubMenu),
MenuItemDescription(name="goRail", sub_menu=self.goRailSubMenu),
MenuItemDescription(name="Beams", sub_menu=self.beamsSubMenu),
MenuItemDescription(name="Shafting & Tubing", sub_menu=self.shaftsAndTubingSubMenu),
MenuItemDescription(name="Mounts", sub_menu=self.mountsSubMenu)
]
self.motionSubMenu = [
MenuItemDescription(name="Servos", onclick_fn=lambda: self.addToStage("2000"))
]
self.electronicsSubMenu = [
# MenuItemDescription(name="Motor Controllers", onclick_fn=lambda: self.addToStage("motorControllers")),
# MenuItemDescription(name="Servo Electronics", onclick_fn=lambda: self.addToStage("servoElectronics")),
# MenuItemDescription(name="Signal Mixers", onclick_fn=lambda: self.addToStage("signalMixers")),
# MenuItemDescription(name="Batteries", onclick_fn=lambda: self.addToStage("batteries")),
# MenuItemDescription(name="Voltage Regulators", onclick_fn=lambda: self.addToStage("voltageRegulators")),
# MenuItemDescription(name="Power Distribution Boards", onclick_fn=lambda: self.addToStage("powerDistributionBoards")),
# MenuItemDescription(name="Wiring", onclick_fn=lambda: self.addToStage("wiring")),
# MenuItemDescription(name="Switches", onclick_fn=lambda: self.addToStage("switches")),
# MenuItemDescription(name="Lights", onclick_fn=lambda: self.addToStage("lights")),
]
self.hardwareSubMenu = [
# MenuItemDescription(name="Screws", onclick_fn=lambda: self.addToStage("screws")),
# MenuItemDescription(name="M4 Threaded Rods", onclick_fn=lambda: self.addToStage("threadedRods")),
# MenuItemDescription(name="Washers", onclick_fn=lambda: self.addToStage("washers")),
# MenuItemDescription(name="Shaft Spacers & Shims", onclick_fn=lambda: self.addToStage("sahftSpacersAndShims")),
# MenuItemDescription(name="Hole Reducers", onclick_fn=lambda: self.addToStage("holeReducers")),
# MenuItemDescription(name="Nuts", onclick_fn=lambda: self.addToStage("nuts")),
# MenuItemDescription(name="Springs", onclick_fn=lambda: self.addToStage("springs")),
# MenuItemDescription(name="Threaded Plates", onclick_fn=lambda: self.addToStage("threadedPlates")),
# MenuItemDescription(name="Standoffs & Spacers", onclick_fn=lambda: self.addToStage("standoffsAndSpacers")),
# MenuItemDescription(name="Collars", onclick_fn=lambda: self.addToStage("collars")),
# MenuItemDescription(name="Hinges", onclick_fn=lambda: self.addToStage("hinges")),
# MenuItemDescription(name="Tools", onclick_fn=lambda: self.addToStage("tools")),
# MenuItemDescription(name="Flexible Tubing", onclick_fn=lambda: self.addToStage("flexibleTubing")),
# MenuItemDescription(name="Cable", onclick_fn=lambda: self.addToStage("cable")),
# MenuItemDescription(name="Wire Management", onclick_fn=lambda: self.addToStage("wireManagement")),
# MenuItemDescription(name="Grommets", onclick_fn=lambda: self.addToStage("grommets")),
# MenuItemDescription(name="Rubber Feet", onclick_fn=lambda: self.addToStage("rubberFeet")),
# MenuItemDescription(name="Magnets", onclick_fn=lambda: self.addToStage("magnets"))
]
self.stageToolsSubMenu = [
MenuItemDescription(name="Stage Info Window", onclick_fn=lambda: self.stageInfoWindow()),
MenuItemDescription(name="Viewport Overlay", onclick_fn=lambda: self.viewportOverlay())
]
self._menu_list = [
MenuItemDescription(name="Tools", sub_menu=self.stageToolsSubMenu),
MenuItemDescription(),
MenuItemDescription(name="Structure", sub_menu=self.structureSubMenu),
MenuItemDescription(name="Motion", sub_menu=self.motionSubMenu),
MenuItemDescription(name="Electronics", sub_menu=self.electronicsSubMenu),
MenuItemDescription(name="Hardware", sub_menu=self.hardwareSubMenu),
MenuItemDescription(),
MenuItemDescription(name="About",
onclick_fn=lambda: self.aboutWindow()),
]
# Rebuild with additional menu items.
omni.kit.menu.utils.add_menu_items(self._menu_list, self._menu_name)
asyncio.ensure_future(_rebuild_menus())
def on_standards_option_select(self):
enabled = True
def on_standards_option_checked(self):
enabled = False
return enabled
def on_standards_normally_open_option_select(self):
enabled = False
def on_standards_normally_closed_option_checked(self):
enabled = True
return enabled
def term_menu(self):
async def _rebuild_menus():
await omni.kit.app.get_app().next_update_async()
omni.kit.menu.utils.rebuild_menus()
# Remove and rebuild the added menu items.
omni.kit.menu.utils.remove_menu_items(self._menu_list, self._menu_name)
asyncio.ensure_future(_rebuild_menus())
def on_shutdown(self):
print("[goBilda] GoBilda shutdown")
| 14,200 | Python | 51.018315 | 319 | 0.632887 |
JJGIV2010/goBilda-extension/exts/goBilda/config/extension.toml | [package]
# Semantic Versionning is used: https://semver.org/
version = "1.0.0"
# The title and description fields are primarily for displaying extension info in UI
title = "goBilda extension"
description="An extension to help students, artists, educators and engineers model, simulate and analyze goBilda assemblies using universal scene description and nvidia omniverse."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# URL of the extension source repository.
repository = "https://github.com/JJGIV2010/goBilda-extension.git"
# One of categories for UI.
category = "Simulation"
# Keywords for the extension
keywords = ["robotic", "goBilda", "motor", "servo", "simulation", "bracket", "wheel"]
# Icon to show in the extension manager
icon = "data/icon.png"
# Preview to show in the extension manager
preview_image = "data/preview.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.uiapp" = {}
# Main python module this extension provides, it will be publicly available as "import goBilda.goBilda".
[[python.module]]
name = "goBilda"
| 1,109 | TOML | 31.647058 | 180 | 0.751127 |
leggedrobotics/viplanner/pyproject.toml | [build-system]
requires = ["setuptools", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "viplanner"
version = "0.1.0"
description = "Visual Imperative Planner for Legged Robots"
authors = [{name = "Pascal Roth", email = "[email protected]"}]
license = {file = "LICENSE.txt"}
readme = "README.md"
requires-python = ">=3.7"
keywords = ["robotics", "planning", "legged-robots"]
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
]
dependencies = [
"torch",
"torchvision",
"PyYAML==6.0",
"tqdm",
"matplotlib",
"networkx",
"scipy",
"open3d==0.17.0",
"wandb==0.14.0",
"opencv-python-headless",
]
[project.optional-dependencies]
inference = [
"mmcv==2.0.0",
"mmengine",
"mmdet",
]
standard = [
"pypose",
]
jetson = [
"torch==1.11",
]
[project.urls]
homepage = "https://github.com/pascal-roth/viplanner"
repository = "https://github.com/pascal-roth/viplanner.git"
[tool.setuptools.packages]
find = {}
| 1,203 | TOML | 21.296296 | 60 | 0.625935 |
leggedrobotics/viplanner/TRAINING.md | # Training and Evaluation
Here an overview of the steps involved in training the policy is provided.
## Cost-Map Building
Cost-Map building is an essential step in guiding optimization and representing the environment.
Cost-Maps can be built from either depth and semantic images (i.e., data generated in simulation) or (semantically annotated) point clouds (i.e., real-world data).
If depth and semantic images of the simulation are available, then first 3D reconstruction has to be performed, following the steps described in Point 1. If the (semantically annotated) pointclouds are generated, then the cost-map can be build directly from the pointcloud, following the steps described in Point 2.
1. **Simulation: Depth Reconstruction** <br>
The reconstruction is executed in two steps, controlled by the config parameter defined in [ReconstructionCfg Class](viplanner/config/costmap_cfg.py):
1. Generate colored point cloud by warping each semantic images onto the depth image (account for cameras in different frames)
2. Projection into 3D space and voxelization
The process expects following datastructure:
``` graphql
env_name
βββ camera_extrinsic.txt # format: x y z qx qy qz qw
βββ intrinsics.txt # expects ROS CameraInfo format --> P-Matrix
βββ depth # either png and/ or npy, if both npy is used
| βββ xxxx.png # images saved with 4 digits, e.g. 0000.png
| βββ xxxx.npy # arrays saved with 4 digits, e.g. 0000.npy
βββ semantics # optional
βββ xxxx.png # images saved with 4 digits, e.g. 0000.png
```
when both depth and semantic images are available, then define sem_suffic and depth_suffix in ReconstructionCfg to differentiate between the two with the following structure:
``` graphql
env_name
βββ camera_extrinsic{depth_suffix}.txt # format: x y z qx qy qz qw
βββ camera_extrinsic{sem_suffix}.txt # format: x y z qx qy qz qw
βββ intrinsics.txt # P-Matrix for intrinsics of depth and semantic images
βββ depth # either png and/ or npy, if both npy is used
| βββ xxxx{depth_suffix}.png # images saved with 4 digits, e.g. 0000.png
| βββ xxxx{depth_suffix}.npy # arrays saved with 4 digits, e.g. 0000.npy
βββ semantics # optional
βββ xxxx{sem_suffix}.png # images saved with 4 digits, e.g. 0000.png
```
2. **Real-World: Open3D-Slam**
To create an annotated 3D Point-Cloud from real-world data, i.e., LiDAR scans and semantics generated from the RGB camera stream, use tools such as [Open3D Slam](https://github.com/leggedrobotics/open3d_slam).
3. **Cost-Building** <br>
Fully automated, either a geometric or semantic cost map can be generated running the following command:
```
python viplanner/cost_builder.py
```
With configs set in [CostMapConfig](viplanner/config/costmap_cfg.py). We provided some standard values, however, before running the script, please adjust the config to your needs and local environment paths.
Cost-Maps will be saved within the environment folder, with the following structure:
``` graphql
maps
βββ cloud
βΒ Β βββ cost_{map_name}.txt # 3d visualization of cost map
βββ data
βΒ Β βββ cost_{map_name}_map.txt # cost map
βΒ Β βββ cost_{map_name}_ground.txt # ground height estimated from pointcloud
βββ params
βββ config_cost_{map_name}.yaml # CostMapConfig used to generate cost map
```
## Training
Configurations of the training given in [TrainCfg](viplanner/config/learning_cfg.py). Training can be started using the example training script [train.py](viplanner/train.py).
``` bash
python viplanner/train.py
```
For the training a directory structure as follows is expected/ will be created:
``` graphql
file_path # TrainCfg.file_path or env variable EXPERIMENT_DIRECTORY
βββ data
βΒ Β βββ env_name # structure as defined in Cost-Map Building
βββ models
βΒ Β βββ model_name
βΒ Β | βββ model.pth # trained model
βΒ Β | βββ model.yaml # TrainCfg used to train model
βββ logs
βΒ Β βββ model_name
```
It is important that the model name is unique, otherwise the previous training will be overwritten.
Also always copy the `model.pt` and `model.yaml` because the configs are necessary to reload the model.
| 4,753 | Markdown | 46.54 | 315 | 0.634547 |