diff --git a/core/include/jiminy/core/utilities/random.h b/core/include/jiminy/core/utilities/random.h index 7092728a5..65d0a3c64 100644 --- a/core/include/jiminy/core/utilities/random.h +++ b/core/include/jiminy/core/utilities/random.h @@ -451,7 +451,8 @@ namespace jiminy VectorN shift_{}; - mutable VectorN cellIndex_{}; + mutable VectorN cellIndex_ = + VectorN::Constant(std::numeric_limits::max()); mutable std::array, (1U << N)> gradKnots_{}; }; diff --git a/core/include/jiminy/core/utilities/random.hxx b/core/include/jiminy/core/utilities/random.hxx index 1e2541c3e..a68e67d40 100644 --- a/core/include/jiminy/core/utilities/random.hxx +++ b/core/include/jiminy/core/utilities/random.hxx @@ -239,6 +239,9 @@ namespace jiminy { // Sample random cell shift shift_ = uniform(N, 1, g).cast(); + + // Clear cache index + cellIndex_.setConstant(std::numeric_limits::max()); } template class DerivedPerlinNoiseOctave, unsigned int N> diff --git a/core/unit/miscellaneous.cc b/core/unit/miscellaneous.cc index 770d153d7..6648517aa 100644 --- a/core/unit/miscellaneous.cc +++ b/core/unit/miscellaneous.cc @@ -1,4 +1,3 @@ -#include #include #include "jiminy/core/fwd.h" @@ -71,66 +70,3 @@ TEST(Miscellaneous, swapMatrixRows) matrixOut); } } - - -TEST(Miscellaneous, MatrixRandom) -{ - using generator_t = - std::independent_bits_engine::digits, uint32_t>; - - generator_t gen32{0}; - uniform_random_bit_generator_ref gen32_ref = gen32; - - float mean = 5.0; - float stddev = 2.0; - - auto mean_vec = Eigen::MatrixXf::Constant(1, 2, mean); - auto stddev_vec = Eigen::MatrixXf::Constant(1, 2, stddev); - float value1 = normal(gen32, mean, stddev); - float value2 = normal(gen32, mean, stddev); - - { - gen32.seed(0); - scalar_random_op op{ - [](auto & g, float _mean, float _stddev) -> float - { return normal(g, _mean, _stddev); }, - gen32, - mean_vec, - stddev_vec}; - ASSERT_FLOAT_EQ(op(0, 0), value1); - ASSERT_FLOAT_EQ(op(0, 1), value2); - ASSERT_THAT(op(0, 0), testing::Not(testing::FloatEq(value1))); - } - { - gen32.seed(0); - scalar_random_op &, float, float), - uniform_random_bit_generator_ref, - float, - float> - op{normal, gen32, mean, stddev}; - ASSERT_FLOAT_EQ(op(0, 0), value1); - ASSERT_FLOAT_EQ(op(10, 10), value2); - ASSERT_THAT(op(0, 0), testing::Not(testing::FloatEq(value1))); - } - { - gen32.seed(0); - auto mat_expr = normal(gen32_ref, mean_vec, stddev_vec); - ASSERT_FLOAT_EQ(mat_expr(0, 0), value1); - ASSERT_FLOAT_EQ(mat_expr(0, 1), value2); - ASSERT_THAT(mat_expr(0, 0), testing::Not(testing::FloatEq(value1))); - } - { - gen32.seed(0); - auto mat_expr = normal(1, 2, gen32, mean, stddev); - ASSERT_FLOAT_EQ(mat_expr(0, 0), value1); - ASSERT_FLOAT_EQ(mat_expr(0, 1), value2); - ASSERT_THAT(mat_expr(0, 0), testing::Not(testing::FloatEq(value1))); - } - { - gen32.seed(0); - auto mat_expr = normal(gen32_ref, mean, stddev_vec.transpose()); - ASSERT_FLOAT_EQ(mat_expr(0, 0), value1); - ASSERT_FLOAT_EQ(mat_expr(1, 0), value2); - ASSERT_THAT(mat_expr(0, 0), testing::Not(testing::FloatEq(value1))); - } -} diff --git a/core/unit/random_test.cc b/core/unit/random_test.cc index 8d5d244b9..ae9c7a82e 100644 --- a/core/unit/random_test.cc +++ b/core/unit/random_test.cc @@ -1,3 +1,4 @@ +#include // `testing::*` #include #include "jiminy/core/utilities/random.h" @@ -6,25 +7,90 @@ using namespace jiminy; static inline constexpr double DELTA{1e-6}; -static inline constexpr double TOL{1e-3}; +static inline constexpr double TOL{1e-4}; + + +TEST(Miscellaneous, MatrixRandom) +{ + using generator_t = + std::independent_bits_engine::digits, uint32_t>; + + generator_t gen32{0}; + uniform_random_bit_generator_ref gen32_ref = gen32; + + float mean = 5.0; + float stddev = 2.0; + + auto mean_vec = Eigen::MatrixXf::Constant(1, 2, mean); + auto stddev_vec = Eigen::MatrixXf::Constant(1, 2, stddev); + float value1 = normal(gen32, mean, stddev); + float value2 = normal(gen32, mean, stddev); + + { + gen32.seed(0); + scalar_random_op op{ + [](auto & g, float _mean, float _stddev) -> float + { return normal(g, _mean, _stddev); }, + gen32, + mean_vec, + stddev_vec}; + ASSERT_FLOAT_EQ(op(0, 0), value1); + ASSERT_FLOAT_EQ(op(0, 1), value2); + ASSERT_THAT(op(0, 0), testing::Not(testing::FloatEq(value1))); + } + { + gen32.seed(0); + scalar_random_op &, float, float), + uniform_random_bit_generator_ref, + float, + float> + op{normal, gen32, mean, stddev}; + ASSERT_FLOAT_EQ(op(0, 0), value1); + ASSERT_FLOAT_EQ(op(10, 10), value2); + ASSERT_THAT(op(0, 0), testing::Not(testing::FloatEq(value1))); + } + { + gen32.seed(0); + auto mat_expr = normal(gen32_ref, mean_vec, stddev_vec); + ASSERT_FLOAT_EQ(mat_expr(0, 0), value1); + ASSERT_FLOAT_EQ(mat_expr(0, 1), value2); + ASSERT_THAT(mat_expr(0, 0), testing::Not(testing::FloatEq(value1))); + } + { + gen32.seed(0); + auto mat_expr = normal(1, 2, gen32, mean, stddev); + ASSERT_FLOAT_EQ(mat_expr(0, 0), value1); + ASSERT_FLOAT_EQ(mat_expr(0, 1), value2); + ASSERT_THAT(mat_expr(0, 0), testing::Not(testing::FloatEq(value1))); + } + { + gen32.seed(0); + auto mat_expr = normal(gen32_ref, mean, stddev_vec.transpose()); + ASSERT_FLOAT_EQ(mat_expr(0, 0), value1); + ASSERT_FLOAT_EQ(mat_expr(1, 0), value2); + ASSERT_THAT(mat_expr(0, 0), testing::Not(testing::FloatEq(value1))); + } +} TEST(PerlinNoiseTest, RandomPerlinNoiseOctaveInitialization) { double wavelength = 10.0; - RandomPerlinNoiseOctave<1> randomOctave(wavelength); + RandomPerlinNoiseOctave<1> octave(wavelength); + octave.reset(PCG32{std::seed_seq{0}}); - EXPECT_DOUBLE_EQ(randomOctave.getWavelength(), wavelength); + EXPECT_DOUBLE_EQ(octave.getWavelength(), wavelength); } TEST(PerlinNoiseTest, PeriodicPerlinNoiseOctaveInitialization) { double wavelength = 10.0; double period = 20.0; - PeriodicPerlinNoiseOctave<1> periodicOctave(wavelength, period); + PeriodicPerlinNoiseOctave<1> octave(wavelength, period); + octave.reset(PCG32{std::seed_seq{0}}); - EXPECT_DOUBLE_EQ(periodicOctave.getWavelength(), wavelength); - EXPECT_DOUBLE_EQ(periodicOctave.getPeriod(), period); + EXPECT_DOUBLE_EQ(octave.getWavelength(), wavelength); + EXPECT_DOUBLE_EQ(octave.getPeriod(), period); } TEST(PerlinNoiseTest, RandomGradientCalculation1D) @@ -32,6 +98,7 @@ TEST(PerlinNoiseTest, RandomGradientCalculation1D) { double wavelength = 10.0; RandomPerlinNoiseOctave<1> octave(wavelength); + octave.reset(PCG32{std::seed_seq{0}}); Eigen::Array t{5.43}; Eigen::Matrix finiteDiffGrad{(octave(t + DELTA) - octave(t - DELTA)) / @@ -41,6 +108,7 @@ TEST(PerlinNoiseTest, RandomGradientCalculation1D) { double wavelength = 3.41; RandomPerlinNoiseOctave<1> octave(wavelength); + octave.reset(PCG32{std::seed_seq{0}}); Eigen::Array t{17.0}; Eigen::Matrix finiteDiffGrad{(octave(t + DELTA) - octave(t - DELTA)) / @@ -54,6 +122,7 @@ TEST(PerlinNoiseTest, PeriodicGradientCalculation1D) double wavelength = 10.0; double period = 30.0; PeriodicPerlinNoiseOctave<1> octave(wavelength, period); + octave.reset(PCG32{std::seed_seq{0}}); Eigen::Array t{5.43}; Eigen::Matrix finiteDiffGrad{(octave(t + DELTA) - octave(t - DELTA)) / @@ -66,6 +135,7 @@ TEST(PerlinNoiseTest, RandomGradientCalculation2D) { double wavelength = 10.0; RandomPerlinNoiseOctave<2> octave(wavelength); + octave.reset(PCG32{std::seed_seq{0}}); Eigen::Vector2d pos{5.43, 7.12}; Eigen::Vector2d finiteDiffGrad{(octave(pos + DELTA * Eigen::Vector2d::UnitX()) - @@ -83,6 +153,7 @@ TEST(PerlinNoiseTest, PeriodicGradientCalculation2D) double wavelength = 10.0; double period = 30.0; PeriodicPerlinNoiseOctave<2> octave(wavelength, period); + octave.reset(PCG32{std::seed_seq{0}}); Eigen::Vector2d pos{5.43, 7.12}; Eigen::Vector2d finiteDiffGrad{(octave(pos + DELTA * Eigen::Vector2d::UnitX()) - diff --git a/python/gym_jiminy/unit_py/data/cassie_standing_9.png b/python/gym_jiminy/unit_py/data/cassie_standing_9.png new file mode 100644 index 000000000..e0a3f0a20 Binary files /dev/null and b/python/gym_jiminy/unit_py/data/cassie_standing_9.png differ diff --git a/python/jiminy_py/examples/extra_cameras.py b/python/jiminy_py/examples/extra_cameras.py new file mode 100644 index 000000000..4c246d82a --- /dev/null +++ b/python/jiminy_py/examples/extra_cameras.py @@ -0,0 +1,74 @@ +import numpy as np +import matplotlib.pyplot as plt + +import gymnasium as gym + +from panda3d.core import VBase4, Point3, Vec3 +from jiminy_py.viewer import Viewer +import pinocchio as pin + +Viewer.close() +#Viewer.connect_backend("panda3d-sync") +env = gym.make("gym_jiminy.envs:atlas-pid", viewer_kwargs={"backend": "panda3d-sync"}) +env.reset(seed=0) +env.step(env.action) +#env.render() +env.simulator.render(return_rgb_array=True) + +env.viewer.add_marker("sphere", + shape="sphere", + pose=(np.array((1.7, 0.0, 1.5)), None), + color="red", + radius=0.1, + always_foreground=False) + +Viewer.add_camera("rgb", height=200, width=200, is_depthmap=False) +Viewer.add_camera("depth", height=128, width=128, is_depthmap=True) +Viewer.set_camera_transform( + position=[2.5, -1.4, 1.6], # [3.0, 0.0, 0.0], + rotation=[1.35, 0.0, 0.8], # [np.pi/2, 0.0, np.pi/2] + camera_name="depth") + +frame_index = env.robot.pinocchio_model.getFrameId("head") +frame_pose = env.robot.pinocchio_data.oMf[frame_index] +# Viewer._backend_obj.gui.set_camera_transform( +# pos=frame_pose.translation + np.array([0.0, 0.0, 0.0]), +# quat=pin.Quaternion(frame_pose.rotation @ pin.rpy.rpyToMatrix(0.0, 0.0, -np.pi/2)).coeffs(), +# camera_name="rgb") +Viewer.set_camera_transform( + position=frame_pose.translation + np.array([0.0, 0.0, 0.0]), + rotation=pin.rpy.matrixToRpy(frame_pose.rotation @ pin.rpy.rpyToMatrix(np.pi/2, 0.0, -np.pi/2)), + camera_name="rgb") + +lens = Viewer._backend_obj.render.find("user_camera_depth").node().get_lens() +# proj = lens.get_projection_mat_inv() +# buffer = Viewer._backend_obj._user_buffers["depth"] +# buffer.trigger_copy() +# Viewer._backend_obj.graphics_engine.render_frame() +# texture = buffer.get_texture() +# tex_peeker = texture.peek() +# pixel = VBase4() +# tex_peeker.lookup(pixel, 0.5, 0.5) # (y, x normalized coordinates, from top-left to bottom-right) +# depth_rel = 2.0 * pixel[0] - 1.0 # map range [0.0 (near), 1.0 (far)] to [-1.0, 1.0] +# point = Point3() +# #lens.extrude_depth(Point3(0.0, 0.0, depth_rel), point) +# # proj.xform_point_general(Point3(0.0, 0.0, pixel[0])) +# # depth = point[1] +# depth = 1.0 / (proj[2][3] * depth_rel + proj[3][3]) +# print(depth) + +rgb_array = Viewer.capture_frame(camera_name="rgb") +depth_array = Viewer.capture_frame(camera_name="depth") +# depth_normalized_array = lens.near / (lens.far - (lens.far - lens.near) * depth_array) +depth_true_array = lens.near / (1.0 - (1.0 - lens.near / lens.far) * depth_array) +fig = plt.figure(figsize=(10, 5)) +ax1 = fig.add_subplot(121) +ax1.imshow(rgb_array) +ax2 = fig.add_subplot(122) +ax2.imshow(depth_true_array, cmap=plt.cm.binary) +for ax in (ax1, ax2): + ax.axis('off') + ax.xaxis.set_visible(False) + ax.yaxis.set_visible(False) +fig.tight_layout(pad=1.0) +plt.show(block=False) diff --git a/python/jiminy_py/setup.py b/python/jiminy_py/setup.py index 2746ecbcd..336612d81 100644 --- a/python/jiminy_py/setup.py +++ b/python/jiminy_py/setup.py @@ -136,7 +136,7 @@ def finalize_options(self) -> None: "meshcat>=0.3.2", # Used to detect running Meshcat servers and avoid orphan child # processes. - "psutil", + "psutil>=6.0", # Low-level backend for Ipython powering Jupyter notebooks "ipykernel>=5.0,<7.0", # Used internally by Viewer to read/write Meshcat snapshots diff --git a/python/jiminy_py/src/jiminy_py/viewer/panda3d/panda3d_visualizer.py b/python/jiminy_py/src/jiminy_py/viewer/panda3d/panda3d_visualizer.py index 5fb6f334a..2d1548acd 100644 --- a/python/jiminy_py/src/jiminy_py/viewer/panda3d/panda3d_visualizer.py +++ b/python/jiminy_py/src/jiminy_py/viewer/panda3d/panda3d_visualizer.py @@ -34,8 +34,8 @@ from direct.gui.OnscreenImage import OnscreenImage from direct.gui.OnscreenText import OnscreenText from panda3d.core import ( # pylint: disable=no-name-in-module - NodePath, Point3, Vec3, Vec4, Mat4, Quat, LQuaternion, Geom, GeomEnums, - GeomNode, GeomTriangles, GeomVertexData, GeomVertexArrayFormat, + NodePath, PandaNode, Point3, Vec3, Vec4, Mat4, Quat, Geom, GeomEnums, + GeomNode, GeomTriangles, GeomVertexData, GeomVertexArrayFormat, BitMask32, GeomVertexFormat, GeomVertexWriter, PNMImage, PNMImageHeader, TextNode, OmniBoundingVolume, CompassEffect, BillboardEffect, InternalName, Filename, Material, Texture, TextureStage, TransparencyAttrib, PGTop, Camera, Lens, @@ -58,7 +58,7 @@ WINDOW_SIZE_DEFAULT = (600, 600) -CAMERA_POS_DEFAULT = [(4.0, -4.0, 1.5), (0, 0, 0.5)] +CAMERA_POSE_DEFAULT = [(4.0, -4.0, 1.5), (0, 0, 0.5)] SKY_TOP_COLOR = (0.53, 0.8, 0.98, 1.0) SKY_BOTTOM_COLOR = (0.1, 0.1, 0.43, 1.0) @@ -366,9 +366,39 @@ def make_torus(minor_radius: float = 0.2, num_segments: int = 16) -> Geom: return geom +def enable_pbr_shader(node: NodePath) -> None: + """Create physics-based shader. + + .. note:: + Lighting must be adapted accordingly to give the desired effect. + + .. warning:: + It slows down the rendering by about 30% on discrete NVIDIA GPU. + + :param node: Root node on which to apply shader, usually the camera itself. + """ + tempnode = NodePath(PandaNode("temp node")) + shader_options = {'ENABLE_SHADOWS': True} + pbr_shader = simplepbr.shaderutils.make_shader( + 'pbr', 'simplepbr.vert', 'simplepbr.frag', shader_options) + tempnode.set_attrib(ShaderAttrib.make(pbr_shader)) + env_map = simplepbr.EnvMap.create_empty() + tempnode.set_shader_input( + 'filtered_env_map', env_map.filtered_env_map) + tempnode.set_shader_input( + 'max_reflection_lod', + env_map.filtered_env_map.num_loadable_ram_mipmap_images) + tempnode.set_shader_input('sh_coeffs', env_map.sh_coefficients) + node.set_initial_state(tempnode.get_state()) + + class Panda3dApp(panda3d_viewer.viewer_app.ViewerApp): """A Panda3D based application. """ + UserRGBCameraMask = BitMask32(1 << 1) # 0x2 + UserDepthCameraMask = BitMask32(1 << 2) # 0x4 + UserCameraMask = UserRGBCameraMask | UserDepthCameraMask + def __init__(self, # pylint: disable=super-init-not-called config: Optional[ViewerConfig] = None) -> None: # Enforce viewer configuration @@ -376,7 +406,8 @@ def __init__(self, # pylint: disable=super-init-not-called config = ViewerConfig() config.set_window_size(*WINDOW_SIZE_DEFAULT) config.set_window_fixed(False) - config.enable_antialiasing(True, multisamples=4) + config.enable_antialiasing(False, multisamples=0) + # config.set_value('want-pstats', True) config.set_value('framebuffer-software', False) config.set_value('framebuffer-hardware', False) config.set_value('load-display', 'pandagl') @@ -388,13 +419,14 @@ def __init__(self, # pylint: disable=super-init-not-called config.set_value('sync-video', False) config.set_value('default-near', 0.1) config.set_value('gl-version', '3 1') + # config.set_value('gl-check-errors', '#t') config.set_value('notify-level', 'fatal') config.set_value('notify-level-x11display', 'fatal') config.set_value('notify-level-device', 'fatal') config.set_value('default-directnotify-level', 'error') loadPrcFileData('', str(config)) - # Define offscreen buffer + # Offscreen buffer self.buff: Optional[GraphicsOutput] = None # Initialize base implementation. @@ -429,28 +461,16 @@ def keyboardInterruptHandler( self._spotlight = self.config.GetBool('enable-spotlight', False) self._lights_mask = [True, True] - # Create physics-based shader and adapt lighting accordingly. - # It slows down the rendering by about 30% on discrete NVIDIA GPU. - shader_options = {'ENABLE_SHADOWS': True} - pbr_shader = simplepbr.shaderutils.make_shader( - 'pbr', 'simplepbr.vert', 'simplepbr.frag', shader_options) - self.render.set_attrib(ShaderAttrib.make(pbr_shader)) - env_map = simplepbr.EnvMap.create_empty() - self.render.set_shader_input( - 'filtered_env_map', env_map.filtered_env_map) - self.render.set_shader_input( - 'max_reflection_lod', - env_map.filtered_env_map.num_loadable_ram_mipmap_images) - self.render.set_shader_input('sh_coeffs', env_map.sh_coefficients) + # Adapt lighting to accomodate physics-based rendering. self._lights = [ self._make_light_ambient((0.5, 0.5, 0.5)), self._make_light_direct(1, (1.0, 1.0, 1.0), pos=(8.0, -8.0, 10.0))] - # Define default camera pos - self._camera_defaults = CAMERA_POS_DEFAULT + # Current camera pose + self._camera_defaults = CAMERA_POSE_DEFAULT self.reset_camera(*self._camera_defaults) - # Define clock. It will be used later to limit framerate + # Custom clock. It will be used later to limit framerate self.clock = ClockObject.get_global_clock() self.framerate: Optional[float] = None @@ -469,9 +489,9 @@ def keyboardInterruptHandler( # Create gradient for skybox self.skybox = make_gradient_skybox( SKY_TOP_COLOR, SKY_BOTTOM_COLOR, 0.35, 0.17) - self.skybox.set_shader_auto(True) + self.skybox.set_shader_auto() self.skybox.set_light_off() - self.skybox.hide(self.LightMask) + self.skybox.hide(self.LightMask | self.UserDepthCameraMask) # The background needs to be parented to an intermediary node to which # a compass effect is applied to keep it at the same position as the @@ -522,7 +542,7 @@ def keyboardInterruptHandler( self.offA2dBottomCenter.set_pos(0, 0, self.a2dBottom) self.offA2dBottomRight.set_pos(self.a2dRight, 0, self.a2dBottom) - # Define widget overlay + # Widget overlay self.offscreen_graphics_lens: Optional[Lens] = None self.offscreen_display_region: Optional[DisplayRegion] = None self._help_label = None @@ -530,10 +550,14 @@ def keyboardInterruptHandler( self._legend: Optional[OnscreenImage] = None self._clock: Optional[OnscreenText] = None - # Define input control + # Custom user-specified cameras + self._user_buffers: Dict[str, NodePath] = {} + self._user_cameras: Dict[str, NodePath] = {} + + # Input control self.key_map = {"mouse1": 0, "mouse2": 0, "mouse3": 0} - # Define camera control + # Camera control self.zoom_rate = 1.03 self.camera_lookat = np.zeros(3) self.longitude_deg = 0.0 @@ -541,13 +565,17 @@ def keyboardInterruptHandler( self.last_mouse_x = 0.0 self.last_mouse_y = 0.0 - # Define object/highlighting selector + # Object/highlighting selector self.picker_ray: Optional[CollisionRay] = None self.picker_node: Optional[CollisionNode] = None self.picked_object: Optional[Tuple[str, str]] = None self.click_mouse_x = 0.0 self.click_mouse_y = 0.0 + # Make the original window inactive without deleting it. + # It must be kept in order to maintain alive the same graphics context. + self.win.set_active(False) + # Create resizeable offscreen buffer. # Note that a resizable buffer is systematically created, no matter # if the main window is an offscreen non-resizable window or an @@ -580,16 +608,23 @@ def open_window(self) -> None: if self.has_gui(): raise RuntimeError("Only one graphical window can be opened.") + # Force enabling multi-sampling for onscreen graphical window + fbprops = FrameBufferProperties(FrameBufferProperties.getDefault()) + fbprops.set_multisamples(4) + # Replace the original offscreen window by an onscreen one if possible is_success = True size = self.win.get_size() try: self.windowType = 'onscreen' - self.open_main_window(size=size) + self.open_main_window(size=size, fbprops=fbprops) except Exception: # pylint: disable=broad-except is_success = False self.windowType = 'offscreen' - self.open_main_window(size=size) + self.open_main_window(size=size, fbprops=fbprops) + + # Enable Physics-based rendering + enable_pbr_shader(self.cam.node()) if is_success: # Setup mouse and keyboard controls for onscreen display @@ -656,7 +691,7 @@ def _open_offscreen_window(self, fbprops.set_rgba_bits(8, 8, 8, 8) fbprops.set_float_color(False) fbprops.set_depth_bits(16) - fbprops.set_float_depth(True) + fbprops.set_float_depth(False) fbprops.set_multisamples(4) # Set offscreen buffer windows properties @@ -674,23 +709,30 @@ def _open_offscreen_window(self, self.pipe, "offscreen_buffer", 0, fbprops, winprops, flags, self.win.get_gsg(), self.win) if win is None: - raise RuntimeError("Faulty graphics pipeline of this machine.") + raise RuntimeError("Faulty graphics pipeline on this machine.") self.buff = win + # Disable automatic rendering of the buffer of efficiency + win.set_one_shot(True) + # Append buffer to the list of windows managed by the ShowBase self.winList.append(win) # Attach a texture as screenshot requires copying GPU data to RAM - self.buff.add_render_texture( - Texture(), GraphicsOutput.RTM_triggered_copy_ram) + tex = Texture() + tex.set_format(Texture.F_rgb) + self.buff.add_render_texture(tex, GraphicsOutput.RTM_copy_ram) # Create 3D camera region for the scene. # Set near distance of camera lens to allow seeing model from close. self.offscreen_graphics_lens = PerspectiveLens() self.offscreen_graphics_lens.set_near(0.1) - self.make_camera( + cam = self.make_camera( win, camName='offscreen_camera', lens=self.offscreen_graphics_lens) + # Enable Physics-based rendering + enable_pbr_shader(cam.node()) + # Create 2D display region for widgets self.offscreen_display_region = win.makeMonoDisplayRegion() self.offscreen_display_region.set_sort(5) @@ -742,6 +784,122 @@ def _adjust_offscreen_window_aspect_ratio(self) -> None: self.offA2dBottomLeft.set_pos(a2dLeft, 0, a2dBottom) self.offA2dBottomRight.set_pos(a2dRight, 0, a2dBottom) + def add_camera(self, + name: str, + is_depthmap: bool, + size: Tuple[int, int]) -> None: + """Add a RGB or depth camera to the scene. + + The user is responsible for managing it, ie set its pose in world, get + screenshot from it, and remove it when no longer relevant. Manually + added cameras is mainly useful for simulating exteroceptive sensors. + + :param name: Name of the camera to be added. + :param is_depthmap: Whether the camera output gathers 3 8-bits integers + RGB channels or 1 32-bits floats depth channel. + :param size: Resolution (height and width) in pixel of the image being + captured by the camera. + """ + # TODO: Expose optional parameters to set lens type and properties. + + # Make sure that no camera with the same name already exists + if name in self._user_cameras: + raise ValueError( + "A camera with the same name already exists. Please delete " + "it by calling `remove_camera` before adding a new one.") + + # Create new offscreen buffer + fbprops = FrameBufferProperties() + if is_depthmap: + fbprops.set_depth_bits(32) + fbprops.set_float_depth(True) + fbprops.set_multisamples(0) + else: + fbprops.set_rgba_bits(8, 8, 8, 8) + fbprops.set_float_color(False) + fbprops.set_depth_bits(16) + fbprops.set_float_depth(False) + fbprops.set_multisamples(4) + winprops = WindowProperties() + winprops.set_size(*size) + flags = GraphicsPipe.BF_refuse_window + buffer = self.graphicsEngine.make_output( + self.pipe, f"user_buffer_{name}", 0, fbprops, winprops, flags, + self.win.get_gsg(), self.win) + if buffer is None: + raise RuntimeError("Faulty graphics pipeline on this machine.") + self._user_buffers[name] = buffer + self.winList.append(buffer) + + # Disable automatic rendering of the buffer of efficiency + buffer.set_one_shot(True) + + # Disable color buffer and enable depth buffer + if is_depthmap: + buffer.set_clear_color_active(False) + buffer.set_clear_depth_active(True) + + # Attach a texture as screenshot requires copying GPU data to RAM + tex = Texture(f"user_texture_{name}") + if is_depthmap: + tex.set_format(Texture.F_depth_component) + else: + tex.set_format(Texture.F_rgb) + buffer.add_render_texture(tex, GraphicsOutput.RTM_copy_ram) + + # Create 3D camera region for the scene. + # See official documentation about field of view parameterization: + # https://docs.panda3d.org/1.10/python/programming/camera-control/perspective-lenses # noqa: E501 # pylint: disable=line-too-long + lens = PerspectiveLens() + if is_depthmap: + lens.set_fov(50.0) # field of view angle [0, 180], 40° by default + lens.set_near(0.02) # near distance (objects closer not rendered) + lens.set_far(6.0) # far distance (objects farther not rendered) + # lens.set_film_size(24, 36) + # lens.set_focal_length(50) # Setting this will overwrite fov + else: + lens.set_near(0.1) + lens.set_aspect_ratio(self.get_aspect_ratio(buffer)) + if is_depthmap: + mask = self.UserDepthCameraMask + else: + mask = self.UserRGBCameraMask + cam = self.make_camera( + buffer, camName=f"user_camera_{name}", lens=lens, mask=mask) + cam.reparent_to(self.render) + self._user_cameras[name] = cam + + # Disable shader for depth map since it irrelevant + if is_depthmap: + tempnode = NodePath(PandaNode("temp node")) + tempnode.set_material_off(2) + tempnode.set_texture_off(2) + tempnode.set_light_off(2) + tempnode.set_shader_off(2) + cam.node().set_initial_state(tempnode.get_state()) + else: + # Enable Physics-based rendering + enable_pbr_shader(cam.node()) + + # Force rendering the scene to finalize initialization of the GSG + self.graphics_engine.render_frame() + + # Flipped buffer upside-down + buffer.inverted = True + + def remove_camera(self, name: str) -> None: + """Remove one of the cameras being managed by the user, which has been + added manually via `add_camera`. + + :param name: Name of the camera to remove. + """ + # Make sure that the camera exists before trying to delete it + if name not in self._user_cameras: + raise ValueError(f"No camera with name '{name}' was found.") + self.close_window(self._user_buffers[name], keepCamera=False) + del self._user_cameras[name] + del self._user_buffers[name] + def getSize(self, win: Optional[Any] = None) -> Tuple[int, int]: """Patched to return the size of the window used for capturing frame by default, instead of main window. @@ -896,9 +1054,9 @@ def move_orbital_camera_task(self, def _make_light_ambient(self, color: Tuple3FType) -> NodePath: """Patched to fix wrong color alpha. """ - node = super()._make_light_ambient(color) - node.get_node(0).set_color((*color, 1.0)) - return node + light = super()._make_light_ambient(color) + light.node().set_color((*color, 1.0)) + return light def _make_light_direct(self, index: int, @@ -908,9 +1066,9 @@ def _make_light_direct(self, ) -> NodePath: """Patched to fix wrong color alpha. """ - light_path = super()._make_light_direct(index, color, pos, target) - light_path.get_node(0).set_color((*color, 1.0)) - return light_path + light = super()._make_light_direct(index, color, pos, target) + light.node().set_color((*color, 1.0)) + return light def _make_axes(self) -> NodePath: model = GeomNode('axes') @@ -920,9 +1078,10 @@ def _make_axes(self) -> NodePath: if self.win.gsg.driver_vendor.startswith('NVIDIA'): node.set_render_mode_thickness(4) node.set_antialias(AntialiasAttrib.MLine) - node.set_shader_auto(True) + node.set_shader_auto() node.set_light_off() - node.hide(self.LightMask) + node.hide(self.LightMask | self.UserCameraMask) + node.set_tag("is_virtual", "1") node.set_scale(0.3) return node @@ -962,7 +1121,7 @@ def _make_floor(self, # Set material to render shadows if supported material = Material() material.set_base_color((1.35, 1.35, 1.35, 1.0)) - node.set_material(material, True) + node.set_material(material) # Disable light casting node.hide(self.LightMask) @@ -970,7 +1129,7 @@ def _make_floor(self, # Adjust frustum of the lights to project shadow over the whole scene for light_path in self._lights[1:]: bmin, bmax = node.get_tight_bounds(light_path) - lens = light_path.get_node(0).get_lens() + lens = light_path.node().get_lens() lens.set_film_offset((bmin.xz + bmax.xz) * 0.5) lens.set_film_size(bmax.xz - bmin.xz) lens.set_near_far(bmin.y, bmax.y) @@ -1071,9 +1230,10 @@ def append_frame(self, if self.win.gsg.driver_vendor.startswith('NVIDIA'): node.set_render_mode_thickness(4) node.set_antialias(AntialiasAttrib.MLine) - node.set_shader_auto(True) + node.set_shader_auto() node.set_light_off() - node.hide(self.LightMask) + node.hide(self.LightMask | self.UserCameraMask) + node.set_tag("is_virtual", "1") self.append_node(root_path, name, node, frame) def append_cone(self, @@ -1156,6 +1316,10 @@ def append_arrow(self, body_node.set_scale(1.0, 1.0, length) body_node.set_pos(0.0, 0.0, (-0.5 if anchor_top else 0.5) * length) arrow_node.set_scale(radius, radius, 1.0) + + arrow_node.hide(self.LightMask | self.UserCameraMask) + arrow_node.set_tag("is_virtual", "1") + self.append_node(root_path, name, arrow_node, frame) def append_mesh(self, @@ -1498,7 +1662,7 @@ def set_material(self, material.set_diffuse(Vec4(*color)) material.set_specular(Vec3(1, 1, 1)) material.set_roughness(0.4) - node.set_material(material, True) + node.set_material(material) if color[3] < 1.0: node.set_transparency(TransparencyAttrib.M_alpha) @@ -1573,10 +1737,14 @@ def show_node(self, node.set_tag("status", "hidden") node.hide() if always_foreground is not None: + # FIXME: Properly restore original mask if any if always_foreground: node.set_bin("fixed", 0) + node.hide(self.UserCameraMask) else: node.clear_bin() + if node.get_tag("is_virtual") != "1": + node.show(self.UserCameraMask) node.set_depth_test(not always_foreground) node.set_depth_write(not always_foreground) @@ -1587,27 +1755,40 @@ def get_camera_transform(self) -> Tuple[np.ndarray, np.ndarray]: representation of the orientation (X, Y, Z, W) as a pair of `np.ndarray`. """ - return (np.array(self.camera.get_pos()), - np.array(self.camera.get_quat())) + return (np.array(self.camera.get_pos(), dtype=np.float64), + np.array(self.camera.get_quat(), dtype=np.float64)) def set_camera_transform(self, pos: Tuple3FType, quat: np.ndarray, - lookat: Tuple3FType = (0.0, 0.0, 0.0)) -> None: + camera_name: Optional[str] = None) -> None: """Set the current absolute pose of the camera. :param pos: Desired position of the camera. :param quat: Desired orientation of the camera as a quaternion (X, Y, Z, W). - :param lookat: Point at which the camera is looking at. It is partially - redundant with the desired orientation and will take - precedence in case of inconsistency. It is also involved - in zoom control. + :param camera_name: Name of the camera to consider. Whether one of the + cameras that were manually added by the user via + `add_camera`, or None to specify the one associated + with the main window. If the main window is an + onscreen graphical window, then the camera of its + accompanying offscreen buffer for screenshots will + be jointly moved since they are attached together. + Optional: None by default. """ - self.camera.set_pos(*pos) - self.camera.set_quat(LQuaternion(quat[-1], *quat[:-1])) - self.camera_lookat = np.array(lookat) - self.move_orbital_camera_task() + # Pick the right camera + if camera_name is None: + camera = self.camera + else: + camera = self._user_cameras[camera_name] + + # Move the camera + camera.set_pos_quat(Vec3(*pos), Quat(quat[-1], *quat[:-1])) + + # Reset orbital camera control + if camera_name is None: + self.camera_lookat = np.array([0.0, 0.0, 0.0]) + self.move_orbital_camera_task() def get_camera_lookat(self) -> np.ndarray: """Get the location of the point toward which the camera is looking at. @@ -1673,7 +1854,7 @@ def save_screenshot(self, filename: Optional[str] = None) -> bool: # Refresh the scene to make sure it is perfectly up-to-date. # It will take into account the updated position of the camera. assert self.buff is not None - self.buff.trigger_copy() + self.buff.set_one_shot(True) self.graphics_engine.render_frame() # Capture frame as image @@ -1695,9 +1876,9 @@ def save_screenshot(self, filename: Optional[str] = None) -> bool: return True - def get_screenshot(self, - requested_format: str = 'RGB', - raw: bool = False) -> Union[np.ndarray, bytes]: + def get_screenshot(self, # pylint: disable=arguments-renamed + camera_name: Optional[str] = None + ) -> Union[np.ndarray, bytes]: """Patched to take screenshot of the last window available instead of the main one, and to add raw data return mode for efficient multiprocessing. @@ -1708,32 +1889,54 @@ def get_screenshot(self, scheduler. The framerate limit must be disable manually to avoid such limitation. - .. note:: - Internally, Panda3d uses BGRA, so using it is slightly faster than - RGBA, but not RGB since there is one channel missing. - - :param requested_format: Desired export format (e.g. 'RGB' or 'BGRA') - :param raw: whether to return a raw memory view of bytes, of a - structured `np.ndarray` of uint8 with dimensions [H, W, D]. + :param camera_name: Name of the camera to consider. Whether one of the + cameras that were manually added by the user via + `add_camera`, or None to specify the one associated + with the main window (ie either the camera attached + to the main window directly if the later is an + offscreen buffer, otherwise the camera of its + accompanying offscreen buffer). + Optional: None by default. """ + # Get desired buffer + if camera_name is None: + assert self.buff is not None + buffer = self.buff + else: + buffer = self._user_buffers[camera_name] + + # Get frame as raw texture + texture = buffer.get_texture() + is_depth_map = texture.format == Texture.F_depth_component32 + + # Disable shadow casting for depth map computation since it is useless + shadow_buffers = [] + if is_depth_map: + for light in self._lights: + if not light.node().is_ambient_light(): + shadow_buffer = light.node().getShadowBuffer(self.win.gsg) + if shadow_buffer is not None: + shadow_buffer.active = False + shadow_buffers.append(shadow_buffer) + # Refresh the scene - assert self.buff is not None - self.buff.trigger_copy() + buffer.set_one_shot(True) self.graphics_engine.render_frame() - # Get frame as raw texture - assert self.buff is not None - texture = self.buff.get_texture() + # Restore shadow casting + for shadow_buffer in shadow_buffers: + shadow_buffer.active = True # Extract raw array buffer from texture - image = texture.get_ram_image_as(requested_format) - - # Return raw buffer if requested - if raw: - return image.get_data() + if is_depth_map: + image = texture.get_ram_image() + else: + image = texture.get_ram_image_as('RGB') # Convert raw texture to numpy array if requested xsize, ysize = texture.get_x_size(), texture.get_y_size() + if is_depth_map: + return np.frombuffer(image, np.float32).reshape((ysize, xsize)) return np.frombuffer(image, np.uint8).reshape((ysize, xsize, -1)) def enable_shadow(self, enable: bool) -> None: diff --git a/python/jiminy_py/src/jiminy_py/viewer/panda3d/panda3d_widget.py b/python/jiminy_py/src/jiminy_py/viewer/panda3d/panda3d_widget.py index 149495da1..1a94e20fb 100644 --- a/python/jiminy_py/src/jiminy_py/viewer/panda3d/panda3d_widget.py +++ b/python/jiminy_py/src/jiminy_py/viewer/panda3d/panda3d_widget.py @@ -71,7 +71,7 @@ def paintEvent(self, # Note that `QImage` does not manage the lifetime of the input data # buffer, so it is necessary to keep it is local scope until the end of # its drawning. - data = self.get_screenshot(requested_format='RGB', raw=True) + data = self.get_screenshot() img = QtGui.QImage( data, *self.buff.getSize(), QtGui.QImage.Format_RGB888) diff --git a/python/jiminy_py/src/jiminy_py/viewer/replay.py b/python/jiminy_py/src/jiminy_py/viewer/replay.py index b8c5341e5..4c8cd74f2 100644 --- a/python/jiminy_py/src/jiminy_py/viewer/replay.py +++ b/python/jiminy_py/src/jiminy_py/viewer/replay.py @@ -595,6 +595,7 @@ def play_trajectories( # Create frame storage frame = av.VideoFrame(*record_video_size, 'rgb24') + frame_bytes = memoryview(frame.planes[0]) # Add frames to video sequentially update_hook_t = None @@ -636,9 +637,8 @@ def play_trajectories( # Update frame. # Note that `capture_frame` is by far the main bottleneck # of the whole recording process (~75% on discrete gpu). - buffer = Viewer.capture_frame( - *record_video_size, raw_data=True) - memoryview(frame.planes[0])[:] = buffer + buffer = Viewer.capture_frame(*record_video_size) + frame_bytes[:] = buffer.reshape(-1).data # Write frame for packet in stream.encode(frame): diff --git a/python/jiminy_py/src/jiminy_py/viewer/viewer.py b/python/jiminy_py/src/jiminy_py/viewer/viewer.py index a4a6f8162..9a1fef98c 100644 --- a/python/jiminy_py/src/jiminy_py/viewer/viewer.py +++ b/python/jiminy_py/src/jiminy_py/viewer/viewer.py @@ -21,7 +21,6 @@ import subprocess import webbrowser import multiprocessing -from copy import deepcopy from urllib.request import urlopen from functools import wraps, partial from threading import RLock @@ -1460,7 +1459,8 @@ def set_clock(t: Optional[float] = None) -> None: @staticmethod @_with_lock @_must_be_open - def get_camera_transform() -> Tuple[Tuple3FType, Tuple3FType]: + def get_camera_transform(camera_name: Optional[str] = None + ) -> Tuple[Tuple3FType, Tuple3FType]: """Get transform of the camera pose. .. warning:: @@ -1471,6 +1471,15 @@ def get_camera_transform() -> Tuple[Tuple3FType, Tuple3FType]: since it is impossible to get access to this information. Thus this method is valid as long as the user does not move the camera manually using mouse camera control. + + .. warning:: + Specifying a camera name is only supported by Panda3d rendering + backend. + + :param camera_name: Name of the camera to consider. None to specify + the "default" world facing camera that is used + when GUI (onscreen window) in enabled. + Optional: None by default. """ # Assert(s) for type checker assert Viewer.backend is not None @@ -1481,9 +1490,15 @@ def get_camera_transform() -> Tuple[Tuple3FType, Tuple3FType]: quat /= np.linalg.norm(quat) rot = pin.Quaternion(*quat).matrix() rpy = matrixToRpy(rot @ CAMERA_INV_TRANSFORM_PANDA3D.T) - else: - xyz, rpy = deepcopy(Viewer._camera_xyzrpy) - return xyz, rpy + return xyz, rpy + + # Make sure that no camera name has been specified for meshcat + if camera_name is not None: + raise ValueError( + "Specifying a camera name is only supported by Panda3d.") + + xpy, rpy = map(tuple, Viewer._camera_xyzrpy) + return xpy, rpy @_with_lock @_must_be_open @@ -1491,6 +1506,7 @@ def set_camera_transform(self: Optional["Viewer"] = None, position: Optional[Tuple3FType] = None, rotation: Optional[Tuple3FType] = None, relative: Optional[Union[str, int]] = None, + camera_name: Optional[str] = None, wait: bool = False) -> None: """Set transform of the camera pose. @@ -1500,6 +1516,10 @@ def set_camera_transform(self: Optional["Viewer"] = None, [0.0, 0.0, 0.0] moves the camera at the center of scene, looking downward. + .. warning:: + Specifying a camera name is only supported by Panda3d rendering + backend. + :param position: Position [X, Y, Z] as a list or 1D array. If `None`, when it will be kept as is. Optional: None by default. @@ -1516,12 +1536,22 @@ def set_camera_transform(self: Optional["Viewer"] = None, - **other:** relative to a robot frame, not accounting for the rotation of the frame during travelling. It supports both frame name and index in model. + :param camera_name: Name of the camera to consider. None to specify + the "default" world facing camera that is used + when GUI (onscreen window) in enabled. + Optional: None by default. :param wait: Whether to wait for rendering to finish. """ # pylint: disable=invalid-name, possibly-used-before-assignment # Assert(s) for type checker assert Viewer.backend is not None assert Viewer._backend_obj is not None + assert self is None or isinstance(self, Viewer) + + # Make sure that no camera name has been specified for meshcat + if Viewer.backend == 'meshcat' and camera_name is not None: + raise ValueError( + "Specifying a camera name is only supported by Panda3d.") if self is None and relative is not None and relative != 'camera': raise ValueError( @@ -1530,7 +1560,8 @@ def set_camera_transform(self: Optional["Viewer"] = None, # Handling of position and rotation arguments if position is None or rotation is None or relative == 'camera': - position_camera, rotation_camera = Viewer.get_camera_transform() + position_camera, rotation_camera = Viewer.get_camera_transform( + camera_name=camera_name) if position is None: if relative is not None: position = (0.0, 0.0, 0.0) @@ -1568,7 +1599,8 @@ def set_camera_transform(self: Optional["Viewer"] = None, H_abs = H_orig * SE3(rotation_mat, position) position = H_abs.translation rotation = matrixToRpy(H_abs.rotation) - Viewer.set_camera_transform(None, position, rotation) + Viewer.set_camera_transform( + None, position, rotation, camera_name=camera_name) return # Perform the desired transformation @@ -1576,7 +1608,7 @@ def set_camera_transform(self: Optional["Viewer"] = None, rotation_panda3d = pin.Quaternion( rotation_mat @ CAMERA_INV_TRANSFORM_PANDA3D).coeffs() Viewer._backend_obj.gui.set_camera_transform( - position, rotation_panda3d) + position, rotation_panda3d, camera_name) elif Viewer.backend == 'meshcat': # pylint: disable=import-outside-toplevel # Meshcat camera is rotated by -pi/2 along Roll axis wrt the @@ -1758,7 +1790,7 @@ def set_color(self, """Override the color of the visual and collision geometries of the robot on-the-fly. - .. note:: + .. warning:: This method is only supported by Panda3d for now. :param color: Color of the robot. It will override the original color @@ -1864,20 +1896,56 @@ def update_floor(ground_profile: Optional[jiminy.HeightmapFunction] = None, update_floor as meshcat_update_floor) meshcat_update_floor(Viewer._backend_obj.gui, geom) + @staticmethod + @_with_lock + @_must_be_open + def add_camera(camera_name: str, + width: int, + height: int, + is_depthmap: bool) -> None: + """TODO: Write documentation. + + .. warning:: + This method is only supported by Panda3d for now. + + """ + # Assert(s) for type checker + assert Viewer.backend is not None + assert Viewer._backend_obj is not None + + # Make sure the backend supports this method + if not Viewer.backend.startswith('panda3d'): + raise NotImplementedError( + "This method is only supported by Panda3d.") + + # Add camera + Viewer._backend_obj.gui.add_camera( + camera_name, (width, height), is_depthmap) + @staticmethod @_with_lock @_must_be_open def capture_frame(width: Optional[int] = None, height: Optional[int] = None, - raw_data: bool = False) -> Union[np.ndarray, bytes]: + camera_name: Optional[str] = None, + raw_data: bool = False + ) -> Union[np.ndarray, bytes]: """Take a snapshot and return associated data. + .. warning:: + Specifying a camera name is only supported by Panda3d rendering + backend, while raw data mode is only supported by Meshcat. + :param width: Width for the image in pixels. None to keep unchanged. Optional: Kept unchanged by default. :param height: Height for the image in pixels. None to keep unchanged. Optional: Kept unchanged by default. + :param camera_name: Name of the camera to consider. None to specify + the "default" world facing camera that is used + when GUI (onscreen window) in enabled. + Optional: None by default. :param raw_data: Whether to return a 2D numpy array, or the raw output - from the backend (the actual type may vary). + from the backend as bytes array. """ # Assert(s) for type checker assert Viewer.backend is not None @@ -1885,31 +1953,33 @@ def capture_frame(width: Optional[int] = None, # Check user arguments if Viewer.backend.startswith('panda3d'): - # Resize window if size has changed - _width, _height = Viewer._backend_obj.gui.getSize() - if width is None: - width = _width - if height is None: - height = _height - if _width != width or _height != height: - Viewer._backend_obj.gui.set_window_size(width, height) + if camera_name is None: + # Resize window if size has changed + _width, _height = Viewer._backend_obj.gui.getSize() + if width is None: + width = _width + if height is None: + height = _height + if _width != width or _height != height: + Viewer._backend_obj.gui.set_window_size(width, height) + elif width is not None or height is not None: + raise ValueError( + "Specifying both camera name and image width and/or " + "height is not supported.") - # Get raw buffer image instead of numpy array for efficiency - buffer = Viewer._backend_obj.gui.get_screenshot( - requested_format='RGB', raw=True) - if buffer is None: + # Get screenshot + image = Viewer._backend_obj.gui.get_screenshot(camera_name) + if image is None: raise RuntimeError( "Impossible to capture frame. There is something wrong " "with the graphics stack on this machine.") + return image - # Return raw data if requested - if raw_data: - return buffer - - # Extract and return numpy array RGB - return np.frombuffer(buffer, np.uint8).reshape((height, width, 3)) + # Make sure that no camera name has been specified for meshcat + if camera_name is not None: + raise ValueError( + "Specifying a camera name is only supported by Panda3d.") - # if Viewer.backend == 'meshcat': # Send capture frame request to the background recorder process img_html = Viewer._backend_obj.capture_frame(width, height)