From 3183f19c31803d0c82cc35b770fb5fc1518d4065 Mon Sep 17 00:00:00 2001 From: Lior Ramati Date: Thu, 28 Mar 2019 17:05:58 +0200 Subject: [PATCH 01/16] [python] add docstrings to enums --- include/librealsense2/h/rs_frame.h | 2 +- include/librealsense2/h/rs_sensor.h | 4 ++-- include/librealsense2/h/rs_types.h | 10 +++++----- wrappers/python/python.cpp | 25 +++++++++++++------------ 4 files changed, 21 insertions(+), 20 deletions(-) diff --git a/include/librealsense2/h/rs_frame.h b/include/librealsense2/h/rs_frame.h index a34beb1b01..a0789463d9 100644 --- a/include/librealsense2/h/rs_frame.h +++ b/include/librealsense2/h/rs_frame.h @@ -25,7 +25,7 @@ typedef enum rs2_timestamp_domain } rs2_timestamp_domain; const char* rs2_timestamp_domain_to_string(rs2_timestamp_domain info); -/** \brief Per-Frame-Metadata are set of read-only properties that might be exposed for each individual frame */ +/** \brief Per-Frame-Metadata is the set of read-only properties that might be exposed for each individual frame. */ typedef enum rs2_frame_metadata_value { RS2_FRAME_METADATA_FRAME_COUNTER , /**< A sequential index managed per-stream. Integer value*/ diff --git a/include/librealsense2/h/rs_sensor.h b/include/librealsense2/h/rs_sensor.h index 43988ffbf3..82b346b43d 100644 --- a/include/librealsense2/h/rs_sensor.h +++ b/include/librealsense2/h/rs_sensor.h @@ -34,7 +34,7 @@ typedef enum rs2_camera_info { } rs2_camera_info; const char* rs2_camera_info_to_string(rs2_camera_info info); -/** \brief Streams are different types of data provided by RealSense devices */ +/** \brief Streams are different types of data provided by RealSense devices. */ typedef enum rs2_stream { RS2_STREAM_ANY, @@ -51,7 +51,7 @@ typedef enum rs2_stream } rs2_stream; const char* rs2_stream_to_string(rs2_stream stream); -/** \brief Format identifies how binary data is encoded within a frame */ +/** \brief A stream's format identifies how binary data is encoded within a frame. */ typedef enum rs2_format { RS2_FORMAT_ANY , /**< When passed to enable stream, librealsense will try to provide best suited format */ diff --git a/include/librealsense2/h/rs_types.h b/include/librealsense2/h/rs_types.h index 6acdefcbcb..2d127c4ccb 100644 --- a/include/librealsense2/h/rs_types.h +++ b/include/librealsense2/h/rs_types.h @@ -13,7 +13,7 @@ extern "C" { #endif -/** \brief Category of the librealsense notifications */ +/** \brief Category of the librealsense notification. */ typedef enum rs2_notification_category{ RS2_NOTIFICATION_CATEGORY_FRAMES_TIMEOUT, /**< Frames didn't arrived within 5 seconds */ RS2_NOTIFICATION_CATEGORY_FRAME_CORRUPTED, /**< Received partial/incomplete frame */ @@ -25,7 +25,7 @@ typedef enum rs2_notification_category{ } rs2_notification_category; const char* rs2_notification_category_to_string(rs2_notification_category category); -/** \brief Exception types are the different categories of errors that RealSense API might return */ +/** \brief Exception types are the different categories of errors that RealSense API might return. */ typedef enum rs2_exception_type { RS2_EXCEPTION_TYPE_UNKNOWN, @@ -53,7 +53,7 @@ typedef enum rs2_distortion } rs2_distortion; const char* rs2_distortion_to_string(rs2_distortion distortion); -/** \brief Video stream intrinsics */ +/** \brief Video stream intrinsics. */ typedef struct rs2_intrinsics { int width; /**< Width of the image in pixels */ @@ -116,7 +116,7 @@ typedef struct rs2_pose unsigned int mapper_confidence; /**< pose data confidence 0x0 - Failed, 0x1 - Low, 0x2 - Medium, 0x3 - High */ } rs2_pose; -/** \brief Severity of the librealsense logger */ +/** \brief Severity of the librealsense logger. */ typedef enum rs2_log_severity { RS2_LOG_SEVERITY_DEBUG, /**< Detailed information about ordinary operations */ RS2_LOG_SEVERITY_INFO , /**< Terse information about ordinary operations */ @@ -128,7 +128,7 @@ typedef enum rs2_log_severity { } rs2_log_severity; const char* rs2_log_severity_to_string(rs2_log_severity info); -/** \brief Specifies advanced interfaces (capabilities) objects may implement */ +/** \brief Specifies advanced interfaces (capabilities) objects may implement. */ typedef enum rs2_extension { RS2_EXTENSION_UNKNOWN, diff --git a/wrappers/python/python.cpp b/wrappers/python/python.cpp index 409812a316..99e57993da 100644 --- a/wrappers/python/python.cpp +++ b/wrappers/python/python.cpp @@ -38,10 +38,11 @@ std::string make_pythonic_str(std::string str) } return str; } -#define BIND_ENUM(module, rs2_enum_type, RS2_ENUM_COUNT) \ +#define BIND_ENUM(module, rs2_enum_type, RS2_ENUM_COUNT, docstring) \ static std::string rs2_enum_type##pyclass_name = std::string(#rs2_enum_type).substr(rs2_prefix.length()); \ /* Above 'static' is required in order to keep the string alive since py::class_ does not copy it */ \ py::enum_ py_##rs2_enum_type(module, rs2_enum_type##pyclass_name.c_str()); \ + py_##rs2_enum_type.doc() = docstring; \ /* std::cout << std::endl << "## " << rs2_enum_type##pyclass_name << ":" << std::endl; */ \ for (int i = 0; i < static_cast(RS2_ENUM_COUNT); i++) \ { \ @@ -143,17 +144,17 @@ PYBIND11_MODULE(NAME, m) { /** Binding of rs2_ enums */ - BIND_ENUM(m, rs2_camera_info, RS2_CAMERA_INFO_COUNT) - BIND_ENUM(m, rs2_frame_metadata_value, RS2_FRAME_METADATA_COUNT) - BIND_ENUM(m, rs2_stream, RS2_STREAM_COUNT) - BIND_ENUM(m, rs2_extension, RS2_EXTENSION_COUNT) - BIND_ENUM(m, rs2_format, RS2_FORMAT_COUNT) - BIND_ENUM(m, rs2_notification_category, RS2_NOTIFICATION_CATEGORY_COUNT) - BIND_ENUM(m, rs2_log_severity, RS2_LOG_SEVERITY_COUNT) - BIND_ENUM(m, rs2_option, RS2_OPTION_COUNT) - BIND_ENUM(m, rs2_timestamp_domain, RS2_TIMESTAMP_DOMAIN_COUNT) - BIND_ENUM(m, rs2_distortion, RS2_DISTORTION_COUNT) - BIND_ENUM(m, rs2_playback_status, RS2_PLAYBACK_STATUS_COUNT) + BIND_ENUM(m, rs2_camera_info, RS2_CAMERA_INFO_COUNT, "This information is mainly available for camera debug and troubleshooting and should not be used in applications.") + BIND_ENUM(m, rs2_frame_metadata_value, RS2_FRAME_METADATA_COUNT, "Per-Frame-Metadata is the set of read-only properties that might be exposed for each individual frame.") + BIND_ENUM(m, rs2_stream, RS2_STREAM_COUNT, "Streams are different types of data provided by RealSense devices.") + BIND_ENUM(m, rs2_extension, RS2_EXTENSION_COUNT, "Specifies advanced interfaces (capabilities) objects may implement.") + BIND_ENUM(m, rs2_format, RS2_FORMAT_COUNT, "A stream's format identifies how binary data is encoded within a frame.") + BIND_ENUM(m, rs2_notification_category, RS2_NOTIFICATION_CATEGORY_COUNT, "Category of the librealsense notification.") + BIND_ENUM(m, rs2_log_severity, RS2_LOG_SEVERITY_COUNT, "Severity of the librealsense logger.") + BIND_ENUM(m, rs2_option, RS2_OPTION_COUNT, "Defines general configuration controls. These can generally be mapped to camera UVC controls, and unless stated otherwise, can be set / queried at any time.") + BIND_ENUM(m, rs2_timestamp_domain, RS2_TIMESTAMP_DOMAIN_COUNT, "Specifies the clock in relation to which the frame timestamp was measured.") + BIND_ENUM(m, rs2_distortion, RS2_DISTORTION_COUNT, "Distortion model: defines how pixel coordinates should be mapped to sensor coordinates.") + BIND_ENUM(m, rs2_playback_status, RS2_PLAYBACK_STATUS_COUNT, "") py::class_ extrinsics(m, "extrinsics"); extrinsics.def(py::init<>()) From 8cedaa0e06f16be9b85924001d22982c53474611 Mon Sep 17 00:00:00 2001 From: Lior Ramati Date: Thu, 18 Apr 2019 17:08:46 +0300 Subject: [PATCH 02/16] [docs] lots of work on python docs, some edits in C++ docs too. Minor python code edits too --- include/librealsense2/h/rs_sensor.h | 2 +- include/librealsense2/h/rs_types.h | 6 +- include/librealsense2/hpp/rs_context.hpp | 4 +- include/librealsense2/hpp/rs_frame.hpp | 44 +-- wrappers/python/python.cpp | 412 ++++++++++++----------- 5 files changed, 238 insertions(+), 230 deletions(-) diff --git a/include/librealsense2/h/rs_sensor.h b/include/librealsense2/h/rs_sensor.h index 82b346b43d..3e59c7cb98 100644 --- a/include/librealsense2/h/rs_sensor.h +++ b/include/librealsense2/h/rs_sensor.h @@ -79,7 +79,7 @@ typedef enum rs2_format } rs2_format; const char* rs2_format_to_string(rs2_format format); -/** \brief Cross-stream extrinsics: encode the topology describing how the different devices are connected. */ +/** \brief Cross-stream extrinsics: encodes the topology describing how the different devices are oriented. */ typedef struct rs2_extrinsics { float rotation[9]; /**< Column-major 3x3 rotation matrix */ diff --git a/include/librealsense2/h/rs_types.h b/include/librealsense2/h/rs_types.h index 2d127c4ccb..c7890aad58 100644 --- a/include/librealsense2/h/rs_types.h +++ b/include/librealsense2/h/rs_types.h @@ -66,7 +66,7 @@ typedef struct rs2_intrinsics float coeffs[5]; /**< Distortion coefficients, order: k1, k2, p1, p2, k3 */ } rs2_intrinsics; -/** \brief Motion device intrinsics: scale, bias, and variances */ +/** \brief Motion device intrinsics: scale, bias, and variances. */ typedef struct rs2_motion_device_intrinsic { /* \internal @@ -112,8 +112,8 @@ typedef struct rs2_pose rs2_quaternion rotation; /**< Qi, Qj, Qk, Qr components of rotation as represented in quaternion rotation (relative to initial position) */ rs2_vector angular_velocity; /**< X, Y, Z values of angular velocity, in radians/sec */ rs2_vector angular_acceleration; /**< X, Y, Z values of angular acceleration, in radians/sec^2 */ - unsigned int tracker_confidence; /**< pose data confidence 0x0 - Failed, 0x1 - Low, 0x2 - Medium, 0x3 - High */ - unsigned int mapper_confidence; /**< pose data confidence 0x0 - Failed, 0x1 - Low, 0x2 - Medium, 0x3 - High */ + unsigned int tracker_confidence; /**< Pose data confidence 0x0 - Failed, 0x1 - Low, 0x2 - Medium, 0x3 - High */ + unsigned int mapper_confidence; /**< Pose data confidence 0x0 - Failed, 0x1 - Low, 0x2 - Medium, 0x3 - High */ } rs2_pose; /** \brief Severity of the librealsense logger. */ diff --git a/include/librealsense2/hpp/rs_context.hpp b/include/librealsense2/hpp/rs_context.hpp index 7d40f13f0a..3d105aa0aa 100644 --- a/include/librealsense2/hpp/rs_context.hpp +++ b/include/librealsense2/hpp/rs_context.hpp @@ -17,7 +17,7 @@ namespace rs2 :_removed(removed), _added(added) {} /** - * check if specific device was disconnected + * check if a specific device was disconnected * \return true if device disconnected, false if device connected */ bool was_removed(const rs2::device& dev) const @@ -34,7 +34,7 @@ namespace rs2 } /** - * check if specific device was added + * check if a specific device was added * \return true if device added, false otherwise */ bool was_added(const rs2::device& dev) const diff --git a/include/librealsense2/hpp/rs_frame.hpp b/include/librealsense2/hpp/rs_frame.hpp index 1cd1c07363..46fe41375a 100644 --- a/include/librealsense2/hpp/rs_frame.hpp +++ b/include/librealsense2/hpp/rs_frame.hpp @@ -407,7 +407,7 @@ namespace rs2 void keep() { rs2_keep_frame(frame_ref); } /** - * Parenthesis operator check internal frame handle is valid. + * Parenthesis operator check if internal frame handle is valid. * \return bool - true or false. */ operator bool() const { return frame_ref != nullptr; } @@ -570,7 +570,7 @@ namespace rs2 { public: /** - * Inherit frame class with additional video related attributs/functions + * Extend frame class with additional video related attributes and functions * \param[in] frame - existing frame instance */ video_frame(const frame& f) @@ -653,12 +653,12 @@ namespace rs2 { public: /** - * Inherit frame class with additional point cloud related attributs/functions + * Extend frame class with additional point cloud related attributes and functions */ points() : frame(), _size(0) {} /** - * Inherit frame class with additional point cloud related attributs/functions + * Extend frame class with additional point cloud related attributes and functions * \param[in] frame - existing frame instance */ points(const frame& f) @@ -678,7 +678,7 @@ namespace rs2 } } /** - * Retrieve back the vertices + * Retrieve the vertices * \param[in] vertex* - pointer of vertex sturcture */ const vertex* get_vertices() const @@ -727,7 +727,7 @@ namespace rs2 { public: /** - * Inherit video_frame class with additional depth related attributs/functions + * Extend video_frame class with additional depth related attributes and functions * \param[in] frame - existing frame instance */ depth_frame(const frame& f) @@ -742,10 +742,10 @@ namespace rs2 } /** - * Return the distance between two depth pixels - * \param[in] int x - first pixel position. - * \param[in] int y - second pixel position. - * \return float - distance between to points. + * Provide the depth in metric units at the given pixel + * \param[in] int x - pixel's x coordinate. + * \param[in] int y - pixel's y coordinate. + * \return float - depth in metric units at given pixel */ float get_distance(int x, int y) const { @@ -790,7 +790,7 @@ namespace rs2 { public: /** - * Inherit frame class with additional motion related attributs/functions + * Extends frame class with additional motion related attributes and functions * \param[in] frame - existing frame instance */ motion_frame(const frame& f) @@ -804,7 +804,7 @@ namespace rs2 error::handle(e); } /** - * Retrieve back the motion data from IMU sensor + * Retrieve the motion data from IMU sensor * \return rs2_vector - 3D vector in Euclidean coordinate space. */ rs2_vector get_motion_data() const @@ -818,7 +818,7 @@ namespace rs2 { public: /** - * Inherit frame class with additional pose related attributs/functions + * Extends frame class with additional pose related attributes and functions * \param[in] frame - existing frame instance */ pose_frame(const frame& f) @@ -832,7 +832,7 @@ namespace rs2 error::handle(e); } /** - * Retrieve back the pose data from T2xx position tracking sensor + * Retrieve the pose data from T2xx position tracking sensor * \return rs2_pose - orientation and velocity data. */ rs2_pose get_pose_data() const @@ -849,11 +849,11 @@ namespace rs2 { public: /** - * Inherit frame class with additional frameset related attributs/functions + * Extend frame class with additional frameset related attributes and functions */ frameset() :_size(0) {}; /** - * Inherit frame class with additional frameset related attributs/functions + * Extend frame class with additional frameset related attributes and functions * \param[in] frame - existing frame instance */ frameset(const frame& f) @@ -875,7 +875,7 @@ namespace rs2 } /** - * Retrieve back the first frame of specific stream and format types, if no frame found, return the default one(frame instance) + * Retrieve the first frame of specific stream and format types, if no frame found, return the default one (frame instance) * \param[in] rs2_stream s - frame to be retrieved from this stream type. * \param[in] rs2_format f - frame to be retrieved from this format type. * \return frame - first found frame with s stream type. @@ -892,7 +892,7 @@ namespace rs2 return result; } /** - * Retrieve back the first frame of specific stream type, if no frame found, error will be thrown + * Retrieve the first frame of specific stream type, if no frame found, an error will be thrown * \param[in] rs2_stream s - frame to be retrieved from this stream type. * \param[in] rs2_format f - frame to be retrieved from this format type. * \return frame - first found frame with s stream type. @@ -905,7 +905,7 @@ namespace rs2 } /** - * Retrieve back the first depth frame, if no frame found, return the default one(frame instance) + * Retrieve the first depth frame, if no frame found, return the default one (frame instance) * \return depth_frame - first found depth frame. */ depth_frame get_depth_frame() const @@ -914,7 +914,7 @@ namespace rs2 return f.as(); } /** - * Retrieve back the first color frame, if no frame found, search the color frame from IR stream. If still can't find, return the default one(frame instance) + * Retrieve the first color frame, if no frame found, search the color frame from IR stream. If one still can't be found, return the default one (frame instance) * \return video_frame - first found color frame. */ video_frame get_color_frame() const @@ -930,7 +930,7 @@ namespace rs2 return f; } /** - * Retrieve back the first infrared frame, return the default one(frame instance) + * Retrieve the first infrared frame, if no frame found, return the default one (frame instance) * \param[in] size_t index * \return video_frame - first found infrared frame. */ @@ -1005,7 +1005,7 @@ namespace rs2 } /** - * Template function, extract internal frame handle from the frameset and invoke the action function + * Template function, extract internal frame handles from the frameset and invoke the action function * \param[in] action - instance with () operator implemented will be invoke after frame extraction. */ template diff --git a/wrappers/python/python.cpp b/wrappers/python/python.cpp index 99e57993da..0af61fb96f 100644 --- a/wrappers/python/python.cpp +++ b/wrappers/python/python.cpp @@ -41,8 +41,7 @@ std::string make_pythonic_str(std::string str) #define BIND_ENUM(module, rs2_enum_type, RS2_ENUM_COUNT, docstring) \ static std::string rs2_enum_type##pyclass_name = std::string(#rs2_enum_type).substr(rs2_prefix.length()); \ /* Above 'static' is required in order to keep the string alive since py::class_ does not copy it */ \ - py::enum_ py_##rs2_enum_type(module, rs2_enum_type##pyclass_name.c_str()); \ - py_##rs2_enum_type.doc() = docstring; \ + py::enum_ py_##rs2_enum_type(module, rs2_enum_type##pyclass_name.c_str(), docstring); \ /* std::cout << std::endl << "## " << rs2_enum_type##pyclass_name << ":" << std::endl; */ \ for (int i = 0; i < static_cast(RS2_ENUM_COUNT); i++) \ { \ @@ -154,102 +153,100 @@ PYBIND11_MODULE(NAME, m) { BIND_ENUM(m, rs2_option, RS2_OPTION_COUNT, "Defines general configuration controls. These can generally be mapped to camera UVC controls, and unless stated otherwise, can be set / queried at any time.") BIND_ENUM(m, rs2_timestamp_domain, RS2_TIMESTAMP_DOMAIN_COUNT, "Specifies the clock in relation to which the frame timestamp was measured.") BIND_ENUM(m, rs2_distortion, RS2_DISTORTION_COUNT, "Distortion model: defines how pixel coordinates should be mapped to sensor coordinates.") - BIND_ENUM(m, rs2_playback_status, RS2_PLAYBACK_STATUS_COUNT, "") + BIND_ENUM(m, rs2_playback_status, RS2_PLAYBACK_STATUS_COUNT, "") // No docstring in C++ - py::class_ extrinsics(m, "extrinsics"); + py::class_ extrinsics(m, "extrinsics", "Cross-stream extrinsics: encodes the topology describing how the different devices are oriented."); extrinsics.def(py::init<>()) - .def_property(BIND_RAW_ARRAY_PROPERTY(rs2_extrinsics, rotation, float, 9)) - .def_property(BIND_RAW_ARRAY_PROPERTY(rs2_extrinsics, translation, float, 3)) + .def_property(BIND_RAW_ARRAY_PROPERTY(rs2_extrinsics, rotation, float, 9), "Column - major 3x3 rotation matrix") + .def_property(BIND_RAW_ARRAY_PROPERTY(rs2_extrinsics, translation, float, 3), "Three-element translation vector, in meters") .def("__repr__", [](const rs2_extrinsics &e) { - std::stringstream ss; - ss << "rotation: " << array_to_string(e.rotation); - ss << "\ntranslation: " << array_to_string(e.translation); - return ss.str(); - }); + std::stringstream ss; + ss << "rotation: " << array_to_string(e.rotation); + ss << "\ntranslation: " << array_to_string(e.translation); + return ss.str(); + }); - py::class_ intrinsics(m, "intrinsics"); + py::class_ intrinsics(m, "intrinsics", "Video stream intrinsics."); intrinsics.def(py::init<>()) - .def_readwrite("width", &rs2_intrinsics::width) - .def_readwrite("height", &rs2_intrinsics::height) - .def_readwrite("ppx", &rs2_intrinsics::ppx) - .def_readwrite("ppy", &rs2_intrinsics::ppy) - .def_readwrite("fx", &rs2_intrinsics::fx) - .def_readwrite("fy", &rs2_intrinsics::fy) - .def_readwrite("model", &rs2_intrinsics::model) - .def_property(BIND_RAW_ARRAY_PROPERTY(rs2_intrinsics, coeffs, float, 5)) - .def("__repr__", [](const rs2_intrinsics& self) - { - std::stringstream ss; - ss << "width: " << self.width << ", "; - ss << "height: " << self.height << ", "; - ss << "ppx: " << self.ppx << ", "; - ss << "ppy: " << self.ppy << ", "; - ss << "fx: " << self.fx << ", "; - ss << "fy: " << self.fy << ", "; - ss << "model: " << self.model << ", "; - ss << "coeffs: " << array_to_string(self.coeffs); - return ss.str(); - }); - - py::class_ motion_device_inrinsic(m, "motion_device_intrinsic"); + .def_readwrite("width", &rs2_intrinsics::width, "Width of the image in pixels") + .def_readwrite("height", &rs2_intrinsics::height, "Height of the image in pixels") + .def_readwrite("ppx", &rs2_intrinsics::ppx, "Horizontal coordinate of the principal point of the image, as a pixel offset from the left edge") + .def_readwrite("ppy", &rs2_intrinsics::ppy, "Vertical coordinate of the principal point of the image, as a pixel offset from the top edge") + .def_readwrite("fx", &rs2_intrinsics::fx, "Focal length of the image plane, as a multiple of pixel width") + .def_readwrite("fy", &rs2_intrinsics::fy, "Focal length of the image plane, as a multiple of pixel height") + .def_readwrite("model", &rs2_intrinsics::model, "Distortion model of the image") + .def_property(BIND_RAW_ARRAY_PROPERTY(rs2_intrinsics, coeffs, float, 5), "Distortion coefficients, order: k1, k2, p1, p2, k3") + .def("__repr__", [](const rs2_intrinsics& self) { + std::stringstream ss; + ss << "width: " << self.width << ", "; + ss << "height: " << self.height << ", "; + ss << "ppx: " << self.ppx << ", "; + ss << "ppy: " << self.ppy << ", "; + ss << "fx: " << self.fx << ", "; + ss << "fy: " << self.fy << ", "; + ss << "model: " << self.model << ", "; + ss << "coeffs: " << array_to_string(self.coeffs); + return ss.str(); + }); + + py::class_ motion_device_inrinsic(m, "motion_device_intrinsic", "Motion device intrinsics: scale, bias, and variances."); motion_device_inrinsic.def(py::init<>()) - .def_property(BIND_RAW_2D_ARRAY_PROPERTY(rs2_motion_device_intrinsic, data, float, 3, 4)) - .def_property(BIND_RAW_ARRAY_PROPERTY(rs2_motion_device_intrinsic, noise_variances, float, 3)) - .def_property(BIND_RAW_ARRAY_PROPERTY(rs2_motion_device_intrinsic, bias_variances, float, 3)); + .def_property(BIND_RAW_2D_ARRAY_PROPERTY(rs2_motion_device_intrinsic, data, float, 3, 4), "Interpret data array values") + .def_property(BIND_RAW_ARRAY_PROPERTY(rs2_motion_device_intrinsic, noise_variances, float, 3), "Variance of noise for X, Y, and Z axis") + .def_property(BIND_RAW_ARRAY_PROPERTY(rs2_motion_device_intrinsic, bias_variances, float, 3), "Variance of bias for X, Y, and Z axis"); /* rs2_types.hpp */ - py::class_ option_range(m, "option_range"); + py::class_ option_range(m, "option_range"); // No docstring in C++ option_range.def_readwrite("min", &rs2::option_range::min) .def_readwrite("max", &rs2::option_range::max) .def_readwrite("default", &rs2::option_range::def) .def_readwrite("step", &rs2::option_range::step) .def("__repr__", [](const rs2::option_range &self) { - std::stringstream ss; - ss << "<" SNAME ".option_range: " << self.min << "-" << self.max - << "/" << self.step << " [" << self.def << "]>"; - return ss.str(); - }); + std::stringstream ss; + ss << "<" SNAME ".option_range: " << self.min << "-" << self.max + << "/" << self.step << " [" << self.def << "]>"; + return ss.str(); + }); - py::class_ region_of_interest(m, "region_of_interest"); + py::class_ region_of_interest(m, "region_of_interest"); // No docstring in C++ region_of_interest.def_readwrite("min_x", &rs2::region_of_interest::min_x) .def_readwrite("min_y", &rs2::region_of_interest::min_y) .def_readwrite("max_x", &rs2::region_of_interest::max_x) .def_readwrite("max_y", &rs2::region_of_interest::max_y); /* rs2_context.hpp */ - py::class_ context(m, "context"); + py::class_ context(m, "context", "Librealsense context class. Includes realsense API version."); context.def(py::init<>()) .def("query_devices", (rs2::device_list(rs2::context::*)() const) &rs2::context::query_devices, "Create a static" - " snapshot of all connected devices a the time of the call.") + " snapshot of all connected devices a the time of the call.") .def_property_readonly("devices", (rs2::device_list(rs2::context::*)() const) &rs2::context::query_devices, - "Create a static snapshot of all connected devices a the time of the call.") + "A static snapshot of all connected devices at time of access. Identical to calling query_devices.") .def("query_all_sensors", &rs2::context::query_all_sensors, "Generate a flat list of " - "all available sensors from all RealSense devices.") - .def_property_readonly("sensors", &rs2::context::query_all_sensors, "Generate a flat list of " - "all available sensors from all RealSense devices.") - .def("get_sensor_parent", &rs2::context::get_sensor_parent, "s"_a) - .def("set_devices_changed_callback", [](rs2::context& self, std::function &callback) - { - self.set_devices_changed_callback(callback); - }, "Register devices changed callback.", "callback"_a) + "all available sensors from all RealSense devices.") + .def_property_readonly("sensors", &rs2::context::query_all_sensors, "A flat list of " + "all available sensors from all RealSense devices. Identical to calling query_all_sensors.") + .def("get_sensor_parent", &rs2::context::get_sensor_parent, "s"_a) // no docstring in C++ + .def("set_devices_changed_callback", [](rs2::context& self, std::function &callback) { + self.set_devices_changed_callback(callback); + }, "Register devices changed callback.", "callback"_a) // not binding create_processing_block, not inpr Python API. .def("load_device", &rs2::context::load_device, "Creates a devices from a RealSense file.\n" - "On successful load, the device will be appended to the context and a devices_changed event triggered." - "filename"_a) - .def("unload_device", &rs2::context::unload_device, "filename"_a); + "On successful load, the device will be appended to the context and a devices_changed event triggered." + "filename"_a) + .def("unload_device", &rs2::context::unload_device, "filename"_a); // No docstring in C++ /* rs2_device.hpp */ - py::class_ device(m, "device"); + py::class_ device(m, "device"); // No docstring in C++ device.def("query_sensors", &rs2::device::query_sensors, "Returns the list of adjacent devices, " - "sharing the same physical parent composite device.") - .def_property_readonly("sensors", &rs2::device::query_sensors, "Returns the list of adjacent devices, " - "sharing the same physical parent composite device.") - .def("first_depth_sensor", [](rs2::device& self) { return self.first(); }) - .def("first_roi_sensor", [](rs2::device& self) { return self.first(); }) - .def("first_pose_sensor", [](rs2::device& self) { return self.first(); }) + "sharing the same physical parent composite device.") + .def_property_readonly("sensors", &rs2::device::query_sensors, "List of adjacent devices, " + "sharing the same physical parent composite device. Identical to calling query_sensors.") + .def("first_depth_sensor", [](rs2::device& self) { return self.first(); }) // No docstring in C++ + .def("first_roi_sensor", [](rs2::device& self) { return self.first(); }) // No docstring in C++ + .def("first_pose_sensor", [](rs2::device& self) { return self.first(); }) // No docstring in C++ .def("supports", &rs2::device::supports, "Check if specific camera info is supported.", "info"_a) .def("get_info", &rs2::device::get_info, "Retrieve camera specific information, " - "like versions of various internal components", "info"_a) + "like versions of various internal components", "info"_a) .def("hardware_reset", &rs2::device::hardware_reset, "Send hardware reset request to the device") .def(py::init<>()) .def("__nonzero__", &rs2::device::operator bool) @@ -257,64 +254,64 @@ PYBIND11_MODULE(NAME, m) { .def(BIND_DOWNCAST(device, playback)) .def(BIND_DOWNCAST(device, recorder)) .def(BIND_DOWNCAST(device, tm2)) - .def("__repr__", [](const rs2::device &self) - { - std::stringstream ss; - ss << "<" SNAME ".device: " << self.get_info(RS2_CAMERA_INFO_NAME) - << " (S/N: " << self.get_info(RS2_CAMERA_INFO_SERIAL_NUMBER) - << ")>"; - return ss.str(); - }); - - py::class_ debug_protocol(m, "debug_protocol"); + .def("__repr__", [](const rs2::device &self) { + std::stringstream ss; + ss << "<" SNAME ".device: " << self.get_info(RS2_CAMERA_INFO_NAME) + << " (S/N: " << self.get_info(RS2_CAMERA_INFO_SERIAL_NUMBER) + << ")>"; + return ss.str(); + }); + + py::class_ debug_protocol(m, "debug_protocol"); // No docstring in C++ debug_protocol.def(py::init()) .def("send_and_receive_raw_data", &rs2::debug_protocol::send_and_receive_raw_data, - "input"_a); + "input"_a); // No docstring in C++ - py::class_ device_list(m, "device_list"); + py::class_ device_list(m, "device_list"); // No docstring in C++ device_list.def(py::init<>()) - .def("contains", &rs2::device_list::contains) + .def("contains", &rs2::device_list::contains) // No docstring in C++ .def("__getitem__", [](const rs2::device_list& self, size_t i) { - if (i >= self.size()) - throw py::index_error(); - return self[uint32_t(i)]; - }) + if (i >= self.size()) + throw py::index_error(); + return self[uint32_t(i)]; + }) .def("__len__", &rs2::device_list::size) - .def("size", &rs2::device_list::size) + .def("size", &rs2::device_list::size) // No docstring in C++ .def("__iter__", [](const rs2::device_list& self) { - return py::make_iterator(self.begin(), self.end()); - }, py::keep_alive<0, 1>()) + return py::make_iterator(self.begin(), self.end()); + }, py::keep_alive<0, 1>()) .def("__getitem__", [](const rs2::device_list& self, py::slice slice) { - size_t start, stop, step, slicelength; - if (!slice.compute(self.size(), &start, &stop, &step, &slicelength)) - throw py::error_already_set(); - auto *dlist = new std::vector(slicelength); - for (size_t i = 0; i < slicelength; ++i) { - (*dlist)[i] = self[uint32_t(start)]; - start += step; - } - return dlist; - }) - .def("front", &rs2::device_list::front) - .def("back", &rs2::device_list::back); + size_t start, stop, step, slicelength; + if (!slice.compute(self.size(), &start, &stop, &step, &slicelength)) + throw py::error_already_set(); + auto *dlist = new std::vector(slicelength); + for (size_t i = 0; i < slicelength; ++i) { + (*dlist)[i] = self[uint32_t(start)]; + start += step; + } + return dlist; + }) + .def("front", &rs2::device_list::front) // No docstring in C++ + .def("back", &rs2::device_list::back); // No docstring in C++ // Not binding status_changed_callback, templated - py::class_ event_information(m, "event_information"); + py::class_ event_information(m, "event_information"); // No docstring in C++ event_information.def("was_removed", &rs2::event_information::was_removed, "Check if " - "specific device was disconnected.", "dev"_a) + "a specific device was disconnected.", "dev"_a) .def("was_added", &rs2::event_information::was_added, "Check if " - "specific device was added.", "dev"_a) + "a specific device was added.", "dev"_a) .def("get_new_devices", &rs2::event_information::get_new_devices, "Returns a " - "list of all newly connected devices"); + "list of all newly connected devices"); - py::class_ tm2(m, "tm2"); + py::class_ tm2(m, "tm2"); // No docstring in C++ tm2.def(py::init(), "device"_a) - .def("enable_loopback", &rs2::tm2::enable_loopback, "filename"_a) - .def("disable_loopback", &rs2::tm2::disable_loopback) - .def("is_loopback_enabled", &rs2::tm2::is_loopback_enabled) - .def("connect_controller", &rs2::tm2::connect_controller, "mac_address"_a) - .def("disconnect_controller", &rs2::tm2::disconnect_controller, "id"_a); + .def("enable_loopback", &rs2::tm2::enable_loopback, "Enter the given device into " + "loopback operation mode that uses the given file as input for raw data", "filename"_a) + .def("disable_loopback", &rs2::tm2::disable_loopback, "Restores the given device into normal operation mode") + .def("is_loopback_enabled", &rs2::tm2::is_loopback_enabled, "Checks if the device is in loopback mode or not") + .def("connect_controller", &rs2::tm2::connect_controller, "Connects to a given tm2 controller", "mac_address"_a) + .def("disconnect_controller", &rs2::tm2::disconnect_controller, "Disconnects a given tm2 controller", "id"_a); /* rs2_frame.hpp */ @@ -342,25 +339,25 @@ PYBIND11_MODULE(NAME, m) { else return BufData(const_cast(self.get_data()), 1, std::string("@B"), 0); }; - py::class_ frame(m, "frame"); + py::class_ frame(m, "frame", "Base class for multiple frame extensions"); frame.def(py::init<>()) - // .def(py::self = py::self) // can't overload assignment in python + // .def(py::self = py::self) // can't overload assignment in python .def(py::init()) - .def("swap", &rs2::frame::swap, "other"_a) - .def("__nonzero__", &rs2::frame::operator bool) + .def("swap", &rs2::frame::swap, "Swap the internal frame handle with the one in parameter", "other"_a) + .def("__nonzero__", &rs2::frame::operator bool, "check if internal frame handle is valid") .def("get_timestamp", &rs2::frame::get_timestamp, "Retrieve the time at which the frame was captured") - .def_property_readonly("timestamp", &rs2::frame::get_timestamp, "Retrieve the time at which the frame was captured") + .def_property_readonly("timestamp", &rs2::frame::get_timestamp, "Time at which the frame was captured. Identical to calling get_timestamp.") .def("get_frame_timestamp_domain", &rs2::frame::get_frame_timestamp_domain, "Retrieve the timestamp domain.") - .def_property_readonly("frame_timestamp_domain", &rs2::frame::get_frame_timestamp_domain, "Retrieve the timestamp domain.") + .def_property_readonly("frame_timestamp_domain", &rs2::frame::get_frame_timestamp_domain, "The timestamp domain. Identical to calling get_frame_timestamp_domain.") .def("get_frame_metadata", &rs2::frame::get_frame_metadata, "Retrieve the current value of a single frame_metadata.", "frame_metadata"_a) .def("supports_frame_metadata", &rs2::frame::supports_frame_metadata, "Determine if the device allows a specific metadata to be queried.", "frame_metadata"_a) .def("get_frame_number", &rs2::frame::get_frame_number, "Retrieve the frame number.") - .def_property_readonly("frame_number", &rs2::frame::get_frame_number, "Retrieve the frame number.") - .def("get_data", get_frame_data, "retrieve data from the frame handle.", py::keep_alive<0, 1>()) - .def_property_readonly("data", get_frame_data, "retrieve data from the frame handle.", py::keep_alive<0, 1>()) - .def("get_profile", &rs2::frame::get_profile) - .def("keep", &rs2::frame::keep) - .def_property_readonly("profile", &rs2::frame::get_profile) + .def_property_readonly("frame_number", &rs2::frame::get_frame_number, "The frame number. Identical to calling get_frame_number.") + .def("get_data", get_frame_data, "Retrieve data from the frame handle.", py::keep_alive<0, 1>()) + .def_property_readonly("data", get_frame_data, "Data from the frame handle. Identical to calling get_data.", py::keep_alive<0, 1>()) + .def("get_profile", &rs2::frame::get_profile, "Retrieve stream profile from frame handle.") + .def_property_readonly("profile", &rs2::frame::get_profile, "Stream profile from frame handle. Identical to calling get_profile.") + .def("keep", &rs2::frame::keep, "Keep the frame, otherwise if no refernce to the frame, the frame will be released.") .def(BIND_DOWNCAST(frame, frame)) .def(BIND_DOWNCAST(frame, points)) .def(BIND_DOWNCAST(frame, frameset)) @@ -368,99 +365,99 @@ PYBIND11_MODULE(NAME, m) { .def(BIND_DOWNCAST(frame, depth_frame)) .def(BIND_DOWNCAST(frame, motion_frame)) .def(BIND_DOWNCAST(frame, pose_frame)); + // No apply_filter? - py::class_ video_frame(m, "video_frame"); + py::class_ video_frame(m, "video_frame", "Extend frame class with additional video related attributes and functions."); video_frame.def(py::init()) .def("get_width", &rs2::video_frame::get_width, "Returns image width in pixels.") - .def_property_readonly("width", &rs2::video_frame::get_width, "Returns image width in pixels.") + .def_property_readonly("width", &rs2::video_frame::get_width, "Image width in pixels. Identical to calling get_width.") .def("get_height", &rs2::video_frame::get_height, "Returns image height in pixels.") - .def_property_readonly("height", &rs2::video_frame::get_height, "Returns image height in pixels.") + .def_property_readonly("height", &rs2::video_frame::get_height, "Image height in pixels. Identical to calling get_height.") .def("get_stride_in_bytes", &rs2::video_frame::get_stride_in_bytes, "Retrieve frame stride, meaning the actual line width in memory in bytes (not the logical image width).") - .def_property_readonly("stride_in_bytes", &rs2::video_frame::get_stride_in_bytes, "Retrieve frame stride, meaning the actual line width in memory in bytes (not the logical image width).") + .def_property_readonly("stride_in_bytes", &rs2::video_frame::get_stride_in_bytes, "Frame stride, meaning the actual line width in memory in bytes (not the logical image width). Identical to calling get_stride_in_bytes.") .def("get_bits_per_pixel", &rs2::video_frame::get_bits_per_pixel, "Retrieve bits per pixel.") - .def_property_readonly("bits_per_pixel", &rs2::video_frame::get_bits_per_pixel, "Retrieve bits per pixel.") + .def_property_readonly("bits_per_pixel", &rs2::video_frame::get_bits_per_pixel, "Bits per pixel. Identical to calling get_bits_per_pixel.") .def("get_bytes_per_pixel", &rs2::video_frame::get_bytes_per_pixel, "Retrieve bytes per pixel.") - .def("get_bytes_per_pixel", &rs2::video_frame::get_bytes_per_pixel, "Retrieve bytes per pixel."); + .def_property_readonly("bytes_per_pixel", &rs2::video_frame::get_bytes_per_pixel, "Bytes per pixel. Identical to calling get_bytes_per_pixel."); - py::class_ vector(m, "vector"); + py::class_ vector(m, "vector", "3D vector in Euclidean coordinate space."); vector.def(py::init<>()) .def_readwrite("x", &rs2_vector::x) .def_readwrite("y", &rs2_vector::y) .def_readwrite("z", &rs2_vector::z) - .def("__repr__", [](const rs2_vector& self) - { - std::stringstream ss; - ss << "x: " << self.x << ", "; - ss << "y: " << self.y << ", "; - ss << "z: " << self.z; - return ss.str(); - }); - - py::class_ quaternion(m, "quaternion"); + .def("__repr__", [](const rs2_vector& self) { + std::stringstream ss; + ss << "x: " << self.x << ", "; + ss << "y: " << self.y << ", "; + ss << "z: " << self.z; + return ss.str(); + }); + + py::class_ quaternion(m, "quaternion", "Quaternion used to represent rotation."); quaternion.def(py::init<>()) .def_readwrite("x", &rs2_quaternion::x) .def_readwrite("y", &rs2_quaternion::y) .def_readwrite("z", &rs2_quaternion::z) .def_readwrite("w", &rs2_quaternion::w) - .def("__repr__", [](const rs2_quaternion& self) - { - std::stringstream ss; - ss << "x: " << self.x << ", "; - ss << "y: " << self.y << ", "; - ss << "z: " << self.z << ", "; - ss << "w: " << self.w; - return ss.str(); - }); + .def("__repr__", [](const rs2_quaternion& self) { + std::stringstream ss; + ss << "x: " << self.x << ", "; + ss << "y: " << self.y << ", "; + ss << "z: " << self.z << ", "; + ss << "w: " << self.w; + return ss.str(); + }); - py::class_ pose(m, "pose"); + py::class_ pose(m, "pose"); // No docstring in C++ pose.def(py::init<>()) - .def_readwrite("translation", &rs2_pose::translation) - .def_readwrite("velocity", &rs2_pose::velocity) - .def_readwrite("acceleration", &rs2_pose::acceleration) - .def_readwrite("rotation", &rs2_pose::rotation) - .def_readwrite("angular_velocity", &rs2_pose::angular_velocity) - .def_readwrite("angular_acceleration", &rs2_pose::angular_acceleration) - .def_readwrite("tracker_confidence", &rs2_pose::tracker_confidence) - .def_readwrite("mapper_confidence", &rs2_pose::mapper_confidence); - - py::class_ motion_frame(m, "motion_frame"); + .def_readwrite("translation", &rs2_pose::translation, "X, Y, Z values of translation, in meters (relative to initial position)") + .def_readwrite("velocity", &rs2_pose::velocity, "X, Y, Z values of velocity, in meter/sec") + .def_readwrite("acceleration", &rs2_pose::acceleration, "X, Y, Z values of acceleration, in meter/sec^2") + .def_readwrite("rotation", &rs2_pose::rotation, "Qi, Qj, Qk, Qr components of rotation as represented in quaternion rotation (relative to initial position)") + .def_readwrite("angular_velocity", &rs2_pose::angular_velocity, "X, Y, Z values of angular velocity, in radians/sec") + .def_readwrite("angular_acceleration", &rs2_pose::angular_acceleration, "X, Y, Z values of angular acceleration, in radians/sec^2") + .def_readwrite("tracker_confidence", &rs2_pose::tracker_confidence, "Pose data confidence 0x0 - Failed, 0x1 - Low, 0x2 - Medium, 0x3 - High") + .def_readwrite("mapper_confidence", &rs2_pose::mapper_confidence, "Pose data confidence 0x0 - Failed, 0x1 - Low, 0x2 - Medium, 0x3 - High"); + + py::class_ motion_frame(m, "motion_frame", "Extends frame class with additional motion related attributes and functions"); motion_frame.def(py::init()) - .def("get_motion_data", &rs2::motion_frame::get_motion_data, "Returns motion info of frame."); + .def("get_motion_data", &rs2::motion_frame::get_motion_data, "Retrieve the motion data from IMU sensor.") + .def_property_readonly("motion_data", &rs2::motion_frame::get_motion_data, "Motion data from IMU sensor. Identical to calling get_motion_data."); - py::class_ pose_frame(m, "pose_frame"); + py::class_ pose_frame(m, "pose_frame", "Extends frame class with additional pose related attributes and functions."); pose_frame.def(py::init()) - .def("get_pose_data", &rs2::pose_frame::get_pose_data); + .def("get_pose_data", &rs2::pose_frame::get_pose_data, "Retrieve the pose data from T2xx position tracking sensor.") + .def_property_readonly("pose_data", &rs2::pose_frame::get_pose_data, "Pose data from T2xx position tracking sensor. Identical to calling get_pose_data."); - py::class_ vertex(m, "vertex"); + py::class_ vertex(m, "vertex"); // No docstring in C++ vertex.def_readwrite("x", &rs2::vertex::x) .def_readwrite("y", &rs2::vertex::y) .def_readwrite("z", &rs2::vertex::z) .def(py::init([]() { return rs2::vertex{}; })) .def(py::init([](float x, float y, float z) { return rs2::vertex{ x,y,z }; })) .def("__repr__", [](const rs2::vertex& v) { - std::ostringstream oss; - oss << v.x << ", " << v.y << ", " << v.z; - return oss.str(); - }); + std::ostringstream oss; + oss << v.x << ", " << v.y << ", " << v.z; + return oss.str(); + }); - py::class_ texture_coordinate(m, "texture_coordinate"); + py::class_ texture_coordinate(m, "texture_coordinate"); // No docstring in C++ texture_coordinate.def_readwrite("u", &rs2::texture_coordinate::u) .def_readwrite("v", &rs2::texture_coordinate::v) .def(py::init([]() { return rs2::texture_coordinate{}; })) .def(py::init([](float u, float v) { return rs2::texture_coordinate{ u, v }; })) .def("__repr__", [](const rs2::texture_coordinate& t) { - std::ostringstream oss; - oss << t.u << ", " << t.v; - return oss.str(); - }); + std::ostringstream oss; + oss << t.u << ", " << t.v; + return oss.str(); + }); - py::class_ points(m, "points"); + py::class_ points(m, "points", "Extend frame class with additional point cloud related attributes and functions."); points.def(py::init<>()) .def(py::init()) - .def("get_vertices", [](rs2::points& self, int dims) -> BufData - { + .def("get_vertices", [](rs2::points& self, int dims) { auto verts = const_cast(self.get_vertices()); auto profile = self.get_profile().as(); size_t h = profile.height(), w = profile.width(); @@ -474,9 +471,8 @@ PYBIND11_MODULE(NAME, m) { default: throw std::domain_error("dims arg only supports values of 1, 2 or 3"); } - }, py::keep_alive<0, 1>(), "dims"_a=1) - .def("get_texture_coordinates", [](rs2::points& self, int dims) -> BufData - { + }, "Retrieve the vertices", py::keep_alive<0, 1>(), "dims"_a=1) + .def("get_texture_coordinates", [](rs2::points& self, int dims) { auto tex = const_cast(self.get_texture_coordinates()); auto profile = self.get_profile().as(); size_t h = profile.height(), w = profile.width(); @@ -490,39 +486,51 @@ PYBIND11_MODULE(NAME, m) { default: throw std::domain_error("dims arg only supports values of 1, 2 or 3"); } - }, py::keep_alive<0, 1>(), "dims"_a=1) - .def("export_to_ply", &rs2::points::export_to_ply) - .def("size", &rs2::points::size); + }, "Return the texture coordinate(uv map) for the point cloud", py::keep_alive<0, 1>(), "dims"_a=1) + .def("export_to_ply", &rs2::points::export_to_ply, "Export current point cloud to PLY file") + .def("size", &rs2::points::size); // No docstring in C++ - py::class_ frameset(m, "composite_frame"); + py::class_ frameset(m, "composite_frame", "Extend frame class with additional frameset related attributes and functions"); frameset.def(py::init()) - .def("first_or_default", &rs2::frameset::first_or_default, "s"_a, "f"_a = RS2_FORMAT_ANY) - .def("first", &rs2::frameset::first, "s"_a, "f"_a = RS2_FORMAT_ANY) - .def("size", &rs2::frameset::size) - .def("foreach", [](const rs2::frameset& self, std::function callable) - { - self.foreach(callable); - }) + .def("first_or_default", &rs2::frameset::first_or_default, "Retrieve the first frame of specific stream and " + "format types, if no frame found, return the default one. (frame instance)", "s"_a, "f"_a = RS2_FORMAT_ANY) + .def("first", &rs2::frameset::first, "Retrieve the first frame of specific stream type, " + "if no frame found, an error will be thrown.", "s"_a, "f"_a = RS2_FORMAT_ANY) + .def("size", &rs2::frameset::size, "Return the size of the frameset") + .def("__len__", &rs2::frameset::size, "Return the size of the frameset") + .def("foreach", [](const rs2::frameset& self, std::function callable) { + self.foreach(callable); + }, "Extract internal frame handles from the frameset and invoke the action function", "callable"_a) .def("__getitem__", &rs2::frameset::operator[]) - .def("get_depth_frame", &rs2::frameset::get_depth_frame) - .def("get_color_frame", &rs2::frameset::get_color_frame) - .def("get_infrared_frame", &rs2::frameset::get_infrared_frame, "index"_a = 0) - .def("get_fisheye_frame", &rs2::frameset::get_fisheye_frame) - //.def("get_pose_frame", &rs2::frameset::get_pose_frame) + .def("get_depth_frame", &rs2::frameset::get_depth_frame, "Retrieve the first depth frame, if no frame found, return the default one. (frame instance)") + .def("get_color_frame", &rs2::frameset::get_color_frame, "Retrieve the first color frame, if no frame found, search the " + "color frame from IR stream. If one still can't be found, return the default one. (frame instance)") + .def("get_infrared_frame", &rs2::frameset::get_infrared_frame, "Retrieve the first infrared frame, if no frame " + "found, return the default one (frame instance)", "index"_a = 0) + .def("get_fisheye_frame", &rs2::frameset::get_fisheye_frame, "Retrieve the fisheye monochrome video frame", "index"_a=0) + .def("get_pose_frame", &rs2::frameset::get_pose_frame, "Retrieve the pose frame", "index"_a = 0) .def("get_pose_frame", [](rs2::frameset& self){ return self.get_pose_frame(); }) - .def("__iter__", [](rs2::frameset& self) - { - return py::make_iterator(self.begin(), self.end()); - }, py::keep_alive<0, 1>()) - .def("size", &rs2::frameset::size) - .def("__getitem__", &rs2::frameset::operator[]); + .def("__iter__", [](rs2::frameset& self) { + return py::make_iterator(self.begin(), self.end()); + }, py::keep_alive<0, 1>()) + .def("__getitem__", [](const rs2::frameset& self, py::slice slice) { + size_t start, stop, step, slicelength; + if (!slice.compute(self.size(), &start, &stop, &step, &slicelength)) + throw py::error_already_set(); + auto *flist = new std::vector(slicelength); + for (size_t i = 0; i < slicelength; ++i) { + (*flist)[i] = self[start]; + start += step; + } + return flist; + }); - py::class_ depth_frame(m, "depth_frame"); + py::class_ depth_frame(m, "depth_frame", "Extend video_frame class with additional depth related attributes and functions."); depth_frame.def(py::init()) - .def("get_distance", &rs2::depth_frame::get_distance, "x"_a, "y"_a); + .def("get_distance", &rs2::depth_frame::get_distance, "x"_a, "y"_a, "Provide the depth in metric units at the given pixel"); /* rs2_processing.hpp */ - py::class_ filter_interface(m, "filter_interface"); + py::class_ filter_interface(m, "filter_interface", "Interface for frame filtering functionality"); filter_interface.def("process", &rs2::filter_interface::process, "frame"_a); // Base class for options interface. Should be used via sensor From 35c0247f66432ec798980853f46c0c3adf1501d5 Mon Sep 17 00:00:00 2001 From: Lior Ramati Date: Sun, 21 Apr 2019 14:18:55 +0300 Subject: [PATCH 03/16] More python doc work --- include/librealsense2/hpp/rs_frame.hpp | 6 +-- include/librealsense2/hpp/rs_processing.hpp | 13 +++--- wrappers/python/python.cpp | 49 ++++++++++++--------- 3 files changed, 38 insertions(+), 30 deletions(-) diff --git a/include/librealsense2/hpp/rs_frame.hpp b/include/librealsense2/hpp/rs_frame.hpp index 46fe41375a..f61ee8890b 100644 --- a/include/librealsense2/hpp/rs_frame.hpp +++ b/include/librealsense2/hpp/rs_frame.hpp @@ -678,7 +678,7 @@ namespace rs2 } } /** - * Retrieve the vertices + * Retrieve the vertices for the point cloud * \param[in] vertex* - pointer of vertex sturcture */ const vertex* get_vertices() const @@ -690,7 +690,7 @@ namespace rs2 } /** - * Export current point cloud to PLY file + * Export the point cloud to PLY file * \param[in] string fname - file name of the PLY to be saved * \param[in] video_frame texture - the texture for the PLY. */ @@ -703,7 +703,7 @@ namespace rs2 error::handle(e); } /** - * return the texture coordinate(uv map) for the point cloud + * Retrieve the texture coordinates (uv map) for the point cloud * \return texture_coordinate* - pointer of texture coordinates. */ const texture_coordinate* get_texture_coordinates() const diff --git a/include/librealsense2/hpp/rs_processing.hpp b/include/librealsense2/hpp/rs_processing.hpp index 349ef58a89..51598fa7e7 100644 --- a/include/librealsense2/hpp/rs_processing.hpp +++ b/include/librealsense2/hpp/rs_processing.hpp @@ -11,14 +11,15 @@ namespace rs2 { /** - * The source used to generate the frame, which usually generated by low level driver for each sensor. The frame_source is one of the parameter of processing_block callback function, which can be used to re-generate the frame and via frame_ready to invoke another callback function - * to notify application frame is ready. Best understanding please refer to "video_processing_thread" code snippet in rs-measure.cpp. + * The source used to generate frames, which is usually done by the low level driver for each sensor. frame_source is one of the parameters + * of processing_block's callback function, which can be used to re-generate the frame and via frame_ready invoke another callback function + * to notify application frame is ready. Please refer to "video_processing_thread" code snippet in rs-measure.cpp for a detailed usage example. */ class frame_source { public: /** - * Allocate video frame with given params + * Allocate a new video frame with given params * * \param[in] profile Stream profile going to allocate. * \param[in] original Original frame, if new_bpp, new_width, new_height or new_stride is zero, newly created frame will base on original frame's metadata to allocate new frame. If frame_type is RS2_EXTENSION_DEPTH_FRAME, the original of the returned frame will be set to it. @@ -131,7 +132,7 @@ namespace rs2 frame_queue() : frame_queue(1) {} /** - * enqueue new frame into a queue + * enqueue new frame into the queue * \param[in] f - frame handle to enqueue (this operation passed ownership to the queue) */ void enqueue(frame f) const @@ -188,7 +189,7 @@ namespace rs2 enqueue(std::move(f)); } /** - * return the capacity of the queue + * Return the capacity of the queue * \return capacity size */ size_t capacity() const { return _capacity; } @@ -199,7 +200,7 @@ namespace rs2 }; /** - * Define the processing block flow, inherit this class to generate your own processing_block. Best understanding is to refer to the viewer class in examples.hpp + * Define the processing block flow, inherit this class to generate your own processing_block. Please refer to the viewer class in examples.hpp for a detailed usage example. */ class processing_block : public options { diff --git a/wrappers/python/python.cpp b/wrappers/python/python.cpp index 0af61fb96f..44cee145c7 100644 --- a/wrappers/python/python.cpp +++ b/wrappers/python/python.cpp @@ -471,7 +471,7 @@ PYBIND11_MODULE(NAME, m) { default: throw std::domain_error("dims arg only supports values of 1, 2 or 3"); } - }, "Retrieve the vertices", py::keep_alive<0, 1>(), "dims"_a=1) + }, "Retrieve the vertices for the point cloud", py::keep_alive<0, 1>(), "dims"_a=1) .def("get_texture_coordinates", [](rs2::points& self, int dims) { auto tex = const_cast(self.get_texture_coordinates()); auto profile = self.get_profile().as(); @@ -486,10 +486,11 @@ PYBIND11_MODULE(NAME, m) { default: throw std::domain_error("dims arg only supports values of 1, 2 or 3"); } - }, "Return the texture coordinate(uv map) for the point cloud", py::keep_alive<0, 1>(), "dims"_a=1) - .def("export_to_ply", &rs2::points::export_to_ply, "Export current point cloud to PLY file") + }, "Retrieve the texture coordinates (uv map) for the point cloud", py::keep_alive<0, 1>(), "dims"_a=1) + .def("export_to_ply", &rs2::points::export_to_ply, "Export the point cloud to PLY file") .def("size", &rs2::points::size); // No docstring in C++ + // TODO: Deprecate composite_frame, replace with frameset py::class_ frameset(m, "composite_frame", "Extend frame class with additional frameset related attributes and functions"); frameset.def(py::init()) .def("first_or_default", &rs2::frameset::first_or_default, "Retrieve the first frame of specific stream and " @@ -531,10 +532,10 @@ PYBIND11_MODULE(NAME, m) { /* rs2_processing.hpp */ py::class_ filter_interface(m, "filter_interface", "Interface for frame filtering functionality"); - filter_interface.def("process", &rs2::filter_interface::process, "frame"_a); + filter_interface.def("process", &rs2::filter_interface::process, "frame"_a); // No docstring in C++ - // Base class for options interface. Should be used via sensor - py::class_ options(m, "options"); + + py::class_ options(m, "options", "Base class for options interface. Should be used via sensor."); options.def("is_option_read_only", &rs2::options::is_option_read_only, "Check if particular option " "is read only.", "option"_a) .def("get_option", &rs2::options::get_option, "Read option value from the device.", "option"_a) @@ -550,22 +551,26 @@ PYBIND11_MODULE(NAME, m) { "of a supported option"); /* rs2_processing.hpp */ - py::class_ frame_source(m, "frame_source"); - frame_source.def("allocate_video_frame", &rs2::frame_source::allocate_video_frame, + py::class_ frame_source(m, "frame_source", "The source used to generate frames, which is usually done by the low level driver for each sensor. " + "frame_source is one of the parameters of processing_block's callback function, which can be used to re-generate the " + "frame and via frame_ready invoke another callback function to notify application frame is ready. Please refer to " + "\"video_processing_thread\" code snippet in rs-measure.cpp for a detailed usage example."); + frame_source.def("allocate_video_frame", &rs2::frame_source::allocate_video_frame, "Allocate a new video frame with given params" "profile"_a, "original"_a, "new_bpp"_a = 0, "new_width"_a = 0, "new_height"_a = 0, "new_stride"_a = 0, "frame_type"_a = RS2_EXTENSION_VIDEO_FRAME) .def("allocate_points", &rs2::frame_source::allocate_points, "profile"_a, - "original"_a) + "original"_a) // No docstring in C++ .def("allocate_composite_frame", &rs2::frame_source::allocate_composite_frame, - "frames"_a) // does anything special need to be done for the vector argument? - .def("frame_ready", &rs2::frame_source::frame_ready, "result"_a); - - py::class_ frame_queue(m, "frame_queue"); - frame_queue.def(py::init(), "Create a frame queue. Frame queues are the simplest " - "cross-platform synchronization primitive provided by librealsense to help " - "developers who are not using async APIs.") + "Allocate composite frame with given params", "frames"_a) // does anything special need to be done for the vector argument? + .def("frame_ready", &rs2::frame_source::frame_ready, "Invoke the " + "callback funtion informing the frame is ready.", "result"_a); + + py::class_ frame_queue(m, "frame_queue", "Frame queues are the simplest cross-platform " + "synchronization primitive provided by librealsense to help " + "developers who are not using async APIs."); + frame_queue.def(py::init()) .def(py::init<>()) - .def("enqueue", &rs2::frame_queue::enqueue, "Enqueue a new frame into a queue.", "f"_a) + .def("enqueue", &rs2::frame_queue::enqueue, "Enqueue a new frame into the queue.", "f"_a) .def("wait_for_frame", &rs2::frame_queue::wait_for_frame, "Wait until a new frame " "becomes available in the queue and dequeue it.", "timeout_ms"_a = 5000, py::call_guard()) .def("poll_for_frame", [](const rs2::frame_queue &self) @@ -579,12 +584,14 @@ PYBIND11_MODULE(NAME, m) { rs2::frame frame; auto success = self.try_wait_for_frame(&frame, timeout_ms); return std::make_tuple(success, frame); - }, "timeout_ms"_a=5000, py::call_guard()) - .def("__call__", &rs2::frame_queue::operator()) - .def("capacity", &rs2::frame_queue::capacity); + }, "timeout_ms"_a=5000, py::call_guard()) // No docstring in C++ + .def("__call__", &rs2::frame_queue::operator(), "Identical to calling enqueue", "f"_a) + .def("capacity", &rs2::frame_queue::capacity, "Return the capacity of the queue"); // Not binding frame_processor_callback, templated - py::class_ processing_block(m, "processing_block"); + + py::class_ processing_block(m, "processing_block", "Define the processing block flow, inherit this class to generate your own " + "processing_block. Please refer to the viewer class in examples.hpp for a detailed usage example."); processing_block.def("__init__", [](rs2::processing_block &self, std::function processing_function) { new (&self) rs2::processing_block(processing_function); }, "processing_function"_a); From e270d4db3b6a3b80db21be83c094019e8970e31f Mon Sep 17 00:00:00 2001 From: Lior Ramati Date: Wed, 30 Jan 2019 13:16:14 +0200 Subject: [PATCH 04/16] [python] Move away from deprecated placement-new style init functions --- wrappers/python/python.cpp | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/wrappers/python/python.cpp b/wrappers/python/python.cpp index 44cee145c7..941aebd0e3 100644 --- a/wrappers/python/python.cpp +++ b/wrappers/python/python.cpp @@ -592,20 +592,19 @@ PYBIND11_MODULE(NAME, m) { py::class_ processing_block(m, "processing_block", "Define the processing block flow, inherit this class to generate your own " "processing_block. Please refer to the viewer class in examples.hpp for a detailed usage example."); - processing_block.def("__init__", [](rs2::processing_block &self, std::function processing_function) { - new (&self) rs2::processing_block(processing_function); - }, "processing_function"_a); - processing_block.def("start", [](rs2::processing_block& self, std::function f) - { - self.start(f); - }, "callback"_a) - .def("invoke", &rs2::processing_block::invoke, "f"_a) - /*.def("__call__", &rs2::processing_block::operator(), "f"_a)*/; + processing_block.def(py::init([](std::function processing_function) { + return new rs2::processing_block(processing_function); + }), "processing_function"_a) + .def("start", [](rs2::processing_block& self, std::function f) { + self.start(f); + }, "callback"_a) + .def("invoke", &rs2::processing_block::invoke, "f"_a) + /*.def("__call__", &rs2::processing_block::operator(), "f"_a)*/; py::class_ filter(m, "filter"); - filter.def("__init__", [](rs2::filter &self, std::function filter_function, int queue_size){ - new (&self) rs2::filter(filter_function, queue_size); - }, "filter_function"_a, "queue_size"_a = 1); + filter.def(py::init([](std::function filter_function, int queue_size){ + return new rs2::filter(filter_function, queue_size); + }), "filter_function"_a, "queue_size"_a = 1); // Not binding syncer_processing_block, not in Python API From 0ccb2c9bd4a67fbc5b43facc70a33d24d340e63f Mon Sep 17 00:00:00 2001 From: Lior Ramati Date: Sun, 21 Apr 2019 16:22:49 +0300 Subject: [PATCH 05/16] More updates to python docs --- include/librealsense2/hpp/rs_processing.hpp | 72 +++--- .../librealsense2/hpp/rs_record_playback.hpp | 4 +- wrappers/python/python.cpp | 205 +++++++++++------- 3 files changed, 159 insertions(+), 122 deletions(-) diff --git a/include/librealsense2/hpp/rs_processing.hpp b/include/librealsense2/hpp/rs_processing.hpp index 51598fa7e7..afd95825d0 100644 --- a/include/librealsense2/hpp/rs_processing.hpp +++ b/include/librealsense2/hpp/rs_processing.hpp @@ -210,7 +210,7 @@ namespace rs2 /** * Start the processing block with callback function on_frame to inform the application the frame is processed. * - * \param[in] on_frame callback function for noticing the frame to be processed is ready. + * \param[in] on_frame callback function for notifing the frame to be processed is ready. */ template void start(S on_frame) @@ -310,7 +310,7 @@ namespace rs2 }; /** - * Define the processing block flow, inherit this class to generate your own processing_block. Best understanding is to refer to the viewer class in examples.hpp + * Define the filter workflow, inherit this class to generate your own filter. Best understanding is to refer to the viewer class in examples.hpp */ class filter : public processing_block, public filter_interface { @@ -379,7 +379,7 @@ namespace rs2 }; /** - * Generating the 3D point cloud base on depth frame also create the mapped texture. + * Generates 3D point clouds based on depth frame. Can also map textures from color frame. */ class pointcloud : public filter { @@ -417,7 +417,7 @@ namespace rs2 throw std::runtime_error("Error occured during execution of the processing block! See the log for more info"); } /** - * Map the point cloud to other frame. + * Map the point cloud to the given color frame. * * \param[in] mapped - the frame to be mapped to as texture. */ @@ -484,7 +484,7 @@ namespace rs2 { public: /** - * Creates depth thresholding processing block + * Creates depth thresholding filter * By controlling min and max options on the block, one could filter out depth values * that are either too large or too small, as a software post-processing step */ @@ -547,7 +547,7 @@ namespace rs2 { public: /** - * Sync instance to align the different frames from different streams + * Sync instance to align frames from different streams */ syncer(int queue_size = 1) :_results(queue_size) @@ -614,11 +614,11 @@ namespace rs2 { public: /** - Create align processing block + Create align filter Alignment is performed between a depth image and another image. To perform alignment of a depth image to the other, set the align_to parameter with the other stream type. - To perform alignment of a non depth image to a depth image, set the align_to parameter to RS2_STREAM_DEPTH - Camera calibration and frame's stream type are determined on the fly, according to the first valid frameset passed to process() + To perform alignment of a non depth image to a depth image, set the align_to parameter to RS2_STREAM_DEPTH. + Camera calibration and frame's stream type are determined on the fly, according to the first valid frameset passed to process(). * \param[in] align_to The stream type to which alignment should be made. */ @@ -656,8 +656,8 @@ namespace rs2 { public: /** - * Create colorizer processing block - * Colorizer generate color image base on input depth frame + * Create colorizer filter + * Colorizer generate color image based on input depth frame */ colorizer() : filter(init(), 1) { } /** @@ -711,13 +711,13 @@ namespace rs2 { public: /** - * Create decimation filter processing block - * decimation filter performing downsampling by using the median with specific kernel size + * Create decimation filter + * Decimation filter performs downsampling by using the median with specific kernel size */ decimation_filter() : filter(init(), 1) {} /** - * Create decimation filter processing block - * decimation filter performing downsampling by using the median with specific kernel size + * Create decimation filter + * Decimation filter performs downsampling by using the median with specific kernel size * \param[in] magnitude - number of filter iterations. */ decimation_filter(float magnitude) : filter(init(), 1) @@ -757,18 +757,18 @@ namespace rs2 { public: /** - * Create temporal filter processing block with default settings - * temporal filter smooth the image by calculating multiple frames with alpha and delta settings - * alpha defines the weight of current frame, delta defines threshold for edge classification and preserving. + * Create temporal filter with default settings + * Temporal filter smooths the image by calculating multiple frames with alpha and delta settings + * alpha defines the weight of current frame, and delta defines the threshold for edge classification and preserving. * For more information, check the temporal-filter.cpp */ temporal_filter() : filter(init(), 1) {} /** - * Create temporal filter processing block with user settings - * temporal filter smooth the image by calculating multiple frames with alpha and delta settings + * Create temporal filter with user settings + * Temporal filter smooths the image by calculating multiple frames with alpha and delta settings * \param[in] smooth_alpha - defines the weight of current frame. - * \param[in] smooth_delta - delta defines threshold for edge classification and preserving. - * \param[in] persistence_control - A set of predefined rules (masks) that govern when missing pixels will be replace with the last valid value so that the data will remain persistent over time: + * \param[in] smooth_delta - delta defines the threshold for edge classification and preserving. + * \param[in] persistence_control - A set of predefined rules (masks) that govern when missing pixels will be replaced with the last valid value so that the data will remain persistent over time: * 0 - Disabled - Persistency filter is not activated and no hole filling occurs. * 1 - Valid in 8/8 - Persistency activated if the pixel was valid in 8 out of the last 8 frames * 2 - Valid in 2/last 3 - Activated if the pixel was valid in two out of the last 3 frames @@ -778,7 +778,7 @@ namespace rs2 * 6 - Valid in 1/last 5 - Activated if the pixel was valid in one out of the last 5 frames * 7 - Valid in 1/last 8 - Activated if the pixel was valid in one out of the last 8 frames * 8 - Persist Indefinitely - Persistency will be imposed regardless of the stored history(most aggressive filtering) - * For more information, check the temporal-filter.cpp + * For more information, check temporal-filter.cpp */ temporal_filter(float smooth_alpha, float smooth_delta, int persistence_control) : filter(init(), 1) { @@ -818,17 +818,17 @@ namespace rs2 { public: /** - * Create spatial filter processing block - * spatial filter smooth the image by calculating frame with alpha and delta settings - * alpha defines he weight of the current pixel for smoothing is bounded within [25..100]%, + * Create spatial filter + * Spatial filter smooths the image by calculating frame with alpha and delta settings + * alpha defines the weight of the current pixel for smoothing, and is bounded within [25..100]%, * delta defines the depth gradient below which the smoothing will occur as number of depth levels * For more information, check the spatial-filter.cpp */ spatial_filter() : filter(init(), 1) { } /** - * Create spatial filter processing block - * spatial filter smooth the image by calculating frame with alpha and delta settings + * Create spatial filter + * Spatial filter smooths the image by calculating frame with alpha and delta settings * \param[in] smooth_alpha - defines the weight of the current pixel for smoothing is bounded within [25..100]% * \param[in] smooth_delta - defines the depth gradient below which the smoothing will occur as number of depth levels * \param[in] magnitude - number of filter iterations. @@ -874,8 +874,8 @@ namespace rs2 { public: /** - * Create disparity transform processing block - * the processing convert the depth and disparity from each pixel + * Create disparity transform filter + * Converts from depth representation to disparity representation and vice-versa in depth frames */ disparity_transform(bool transform_to_disparity = true) : filter(init(transform_to_disparity), 1) { } @@ -909,8 +909,8 @@ namespace rs2 { public: /** - * Create zero order fix processing block - * the processing block fix the zero order artifact + * Create zero order fix filter + * The filter fixes the zero order artifact */ zero_order_invalidation() : filter(init()) {} @@ -944,14 +944,14 @@ namespace rs2 { public: /** - * Create hole filling processing block - * the processing perform the hole filling base on different hole filling mode. + * Create hole filling filter + * The processing performed depends on the selected hole filling mode. */ hole_filling_filter() : filter(init(), 1) {} /** - * Create hole filling processing block - * the processing perform the hole filling base on different hole filling mode. + * Create hole filling filter + * The processing performed depends on the selected hole filling mode. * \param[in] mode - select the hole fill mode: * 0 - fill_from_left - Use the value from the left neighbor pixel to fill the hole * 1 - farest_from_around - Use the value from the neighboring pixel which is furthest away from the sensor diff --git a/include/librealsense2/hpp/rs_record_playback.hpp b/include/librealsense2/hpp/rs_record_playback.hpp index 8f127dee4c..1668ef9317 100644 --- a/include/librealsense2/hpp/rs_record_playback.hpp +++ b/include/librealsense2/hpp/rs_record_playback.hpp @@ -145,7 +145,7 @@ namespace rs2 /** * Register to receive callback from playback device upon its status changes * - * Callbacks are invoked from the reading thread, any heavy processing in the callback handler will affect + * Callbacks are invoked from the reading thread, and as such any heavy processing in the callback handler will affect * the reading thread and may cause frame drops\ high latency * \param[in] callback A callback handler that will be invoked when the playback status changes, can be any callable object accepting rs2_playback_status */ @@ -246,7 +246,7 @@ namespace rs2 } /** - * Unpauses the recording device, making it resume recording + * Unpauses the recording device, making it resume recording. */ void resume() { diff --git a/wrappers/python/python.cpp b/wrappers/python/python.cpp index 941aebd0e3..d57e1c97c4 100644 --- a/wrappers/python/python.cpp +++ b/wrappers/python/python.cpp @@ -535,7 +535,7 @@ PYBIND11_MODULE(NAME, m) { filter_interface.def("process", &rs2::filter_interface::process, "frame"_a); // No docstring in C++ - py::class_ options(m, "options", "Base class for options interface. Should be used via sensor."); + py::class_ options(m, "options", "Base class for options interface. Should be used via sensor or processing_block."); // No docstring in C++ options.def("is_option_read_only", &rs2::options::is_option_read_only, "Check if particular option " "is read only.", "option"_a) .def("get_option", &rs2::options::get_option, "Read option value from the device.", "option"_a) @@ -547,159 +547,196 @@ PYBIND11_MODULE(NAME, m) { .def("get_option_description", &rs2::options::get_option_description, "Get option description.", "option"_a) .def("get_option_value_description", &rs2::options::get_option_value_description, "Get option value description " "(In case a specific option value holds special meaning)", "option"_a, "value"_a) - .def("get_supported_options", &rs2::options::get_supported_options, "Retrieve list of supported options, " - "of a supported option"); + .def("get_supported_options", &rs2::options::get_supported_options, "Retrieve list of supported options"); // No docstring in C++ /* rs2_processing.hpp */ py::class_ frame_source(m, "frame_source", "The source used to generate frames, which is usually done by the low level driver for each sensor. " "frame_source is one of the parameters of processing_block's callback function, which can be used to re-generate the " - "frame and via frame_ready invoke another callback function to notify application frame is ready. Please refer to " - "\"video_processing_thread\" code snippet in rs-measure.cpp for a detailed usage example."); + "frame and via frame_ready invoke another callback function to notify application frame is ready."); frame_source.def("allocate_video_frame", &rs2::frame_source::allocate_video_frame, "Allocate a new video frame with given params" "profile"_a, "original"_a, "new_bpp"_a = 0, "new_width"_a = 0, "new_height"_a = 0, "new_stride"_a = 0, "frame_type"_a = RS2_EXTENSION_VIDEO_FRAME) - .def("allocate_points", &rs2::frame_source::allocate_points, "profile"_a, - "original"_a) // No docstring in C++ - .def("allocate_composite_frame", &rs2::frame_source::allocate_composite_frame, - "Allocate composite frame with given params", "frames"_a) // does anything special need to be done for the vector argument? - .def("frame_ready", &rs2::frame_source::frame_ready, "Invoke the " - "callback funtion informing the frame is ready.", "result"_a); + .def("allocate_points", &rs2::frame_source::allocate_points, "profile"_a, + "original"_a) // No docstring in C++ + .def("allocate_composite_frame", &rs2::frame_source::allocate_composite_frame, + "Allocate composite frame with given params", "frames"_a) + .def("frame_ready", &rs2::frame_source::frame_ready, "Invoke the " + "callback funtion informing the frame is ready.", "result"_a); py::class_ frame_queue(m, "frame_queue", "Frame queues are the simplest cross-platform " "synchronization primitive provided by librealsense to help " "developers who are not using async APIs."); frame_queue.def(py::init()) - .def(py::init<>()) - .def("enqueue", &rs2::frame_queue::enqueue, "Enqueue a new frame into the queue.", "f"_a) - .def("wait_for_frame", &rs2::frame_queue::wait_for_frame, "Wait until a new frame " - "becomes available in the queue and dequeue it.", "timeout_ms"_a = 5000, py::call_guard()) - .def("poll_for_frame", [](const rs2::frame_queue &self) - { - rs2::frame frame; - self.poll_for_frame(&frame); - return frame; - }, "Poll if a new frame is available and dequeue it if it is") - .def("try_wait_for_frame", [](const rs2::frame_queue &self, unsigned int timeout_ms) - { - rs2::frame frame; - auto success = self.try_wait_for_frame(&frame, timeout_ms); - return std::make_tuple(success, frame); - }, "timeout_ms"_a=5000, py::call_guard()) // No docstring in C++ - .def("__call__", &rs2::frame_queue::operator(), "Identical to calling enqueue", "f"_a) - .def("capacity", &rs2::frame_queue::capacity, "Return the capacity of the queue"); + .def(py::init<>()) + .def("enqueue", &rs2::frame_queue::enqueue, "Enqueue a new frame into the queue.", "f"_a) + .def("wait_for_frame", &rs2::frame_queue::wait_for_frame, "Wait until a new frame " + "becomes available in the queue and dequeue it.", "timeout_ms"_a = 5000, py::call_guard()) + .def("poll_for_frame", [](const rs2::frame_queue &self) { + rs2::frame frame; + self.poll_for_frame(&frame); + return frame; + }, "Poll if a new frame is available and dequeue it if it is") + .def("try_wait_for_frame", [](const rs2::frame_queue &self, unsigned int timeout_ms) { + rs2::frame frame; + auto success = self.try_wait_for_frame(&frame, timeout_ms); + return std::make_tuple(success, frame); + }, "timeout_ms"_a=5000, py::call_guard()) // No docstring in C++ + .def("__call__", &rs2::frame_queue::operator(), "Identical to calling enqueue", "f"_a) + .def("capacity", &rs2::frame_queue::capacity, "Return the capacity of the queue"); // Not binding frame_processor_callback, templated - py::class_ processing_block(m, "processing_block", "Define the processing block flow, inherit this class to generate your own " - "processing_block. Please refer to the viewer class in examples.hpp for a detailed usage example."); + py::class_ processing_block(m, "processing_block", "Define the processing block workflow, inherit this class to " + "generate your own processing_block."); processing_block.def(py::init([](std::function processing_function) { return new rs2::processing_block(processing_function); }), "processing_function"_a) .def("start", [](rs2::processing_block& self, std::function f) { self.start(f); - }, "callback"_a) - .def("invoke", &rs2::processing_block::invoke, "f"_a) + }, "Start the processing block with callback function to inform the application the frame is processed.", "callback"_a) + .def("invoke", &rs2::processing_block::invoke, "Ask processing block to process the frame", "f"_a) /*.def("__call__", &rs2::processing_block::operator(), "f"_a)*/; + // supports(camera_info) / get_info(camera_info)? + + py::class_ filter(m, "filter", "Define the filter workflow, inherit this class to generate your own filter."); + filter.def(py::init([](std::function filter_function, int queue_size) { + return new rs2::filter(filter_function, queue_size); + }), "filter_function"_a, "queue_size"_a = 1); + // get_queue? + // is/as? - py::class_ filter(m, "filter"); - filter.def(py::init([](std::function filter_function, int queue_size){ - return new rs2::filter(filter_function, queue_size); - }), "filter_function"_a, "queue_size"_a = 1); // Not binding syncer_processing_block, not in Python API - py::class_ pointcloud(m, "pointcloud"); - + py::class_ pointcloud(m, "pointcloud", "Generates 3D point clouds based on depth frame. Can also map textures from color frame."); pointcloud.def(py::init<>()) .def(py::init(), "stream"_a, "index"_a = 0) - .def("calculate", &rs2::pointcloud::calculate, "depth"_a) - .def("map_to", &rs2::pointcloud::map_to, "mapped"_a); + .def("calculate", &rs2::pointcloud::calculate, "Generate the pointcloud and texture mappings of depth map.", "depth"_a) + .def("map_to", &rs2::pointcloud::map_to, "Map the point cloud to the given color frame.", "mapped"_a); - py::class_ syncer(m, "syncer"); + py::class_ syncer(m, "syncer", "Sync instance to align frames from different streams"); syncer.def(py::init(), "queue_size"_a = 1) .def("wait_for_frames", &rs2::syncer::wait_for_frames, "Wait until a coherent set " "of frames becomes available", "timeout_ms"_a = 5000, py::call_guard()) - .def("poll_for_frames", [](const rs2::syncer &self) - { + .def("poll_for_frames", [](const rs2::syncer &self) { rs2::frameset frames; self.poll_for_frames(&frames); return frames; }, "Check if a coherent set of frames is available") - .def("try_wait_for_frames", [](const rs2::syncer &self, unsigned int timeout_ms) - { + .def("try_wait_for_frames", [](const rs2::syncer &self, unsigned int timeout_ms) { rs2::frameset fs; auto success = self.try_wait_for_frames(&fs, timeout_ms); return std::make_tuple(success, fs); - }, "timeout_ms"_a = 5000, py::call_guard()); + }, "timeout_ms"_a = 5000, py::call_guard()); // No docstring in C++ /*.def("__call__", &rs2::syncer::operator(), "frame"_a)*/ - py::class_ threshold(m, "threshold_filter"); - threshold.def(py::init<>()) - .def(py::init(), "min_dist"_a, "max_dist"_a); + py::class_ threshold(m, "threshold_filter", "Depth thresholding filter. By controlling min and " + "max options on the block, one could filter out depth values that are either too large " + "or too small, as a software post-processing step"); + .def(py::init(), "min_dist"_a=0.15f, "max_dist"_a=4.f); - py::class_ colorizer(m, "colorizer"); + py::class_ colorizer(m, "colorizer", "Colorizer filter generates color images based on input depth frame"); colorizer.def(py::init<>()) - .def(py::init(), "color_scheme"_a) - .def("colorize", &rs2::colorizer::colorize, "depth"_a) + .def(py::init(), "Possible values for color_scheme:\n" + "0 - Jet\n" + "1 - Classic\n" + "2 - WhiteToBlack\n" + "3 - BlackToWhite\n" + "4 - Bio\n" + "5 - Cold\n" + "6 - Warm\n" + "7 - Quantized\n" + "8 - Pattern", "color_scheme"_a) + .def("colorize", &rs2::colorizer::colorize, "Start to generate color image base on depth frame", "depth"_a) /*.def("__call__", &rs2::colorizer::operator())*/; - py::class_ align(m, "align"); - align.def(py::init(), "align_to"_a) - .def("process", (rs2::frameset (rs2::align::*)(rs2::frameset)) &rs2::align::process, "frames"_a); + py::class_ align(m, "align", "Performs alignment between depth image and another image."); + align.def(py::init(), "To perform alignment of a depth image to the other, set the align_to parameter with the other stream type.\n" + "To perform alignment of a non depth image to a depth image, set the align_to parameter to RS2_STREAM_DEPTH.\n" + "Camera calibration and frame's stream type are determined on the fly, according to the first valid frameset passed to process().", "align_to"_a) + .def("process", (rs2::frameset (rs2::align::*)(rs2::frameset)) &rs2::align::process, "Run thealignment process on the given frames to get an aligned set of frames", "frames"_a); - py::class_ decimation_filter(m, "decimation_filter"); + py::class_ decimation_filter(m, "decimation_filter", "Performs downsampling by using the median with specific kernel size."); decimation_filter.def(py::init<>()) .def(py::init(), "magnitude"_a); - py::class_ temporal_filter(m, "temporal_filter"); + py::class_ temporal_filter(m, "temporal_filter", "Temporal filter smooths the image by calculating multiple frames " + "with alpha and delta settings. Alpha defines the weight of current frame, and delta defines the" + "threshold for edge classification and preserving."); temporal_filter.def(py::init<>()) - .def(py::init(), "smooth_alpha"_a, "smooth_delta"_a, "persistence_control"_a); - - py::class_ spatial_filter(m, "spatial_filter"); + .def(py::init(), "Possible values for persistence_control:\n" + "1 - Valid in 8/8 - Persistency activated if the pixel was valid in 8 out of the last 8 frames\n" + "2 - Valid in 2 / last 3 - Activated if the pixel was valid in two out of the last 3 frames\n" + "3 - Valid in 2 / last 4 - Activated if the pixel was valid in two out of the last 4 frames\n" + "4 - Valid in 2 / 8 - Activated if the pixel was valid in two out of the last 8 frames\n" + "5 - Valid in 1 / last 2 - Activated if the pixel was valid in one of the last two frames\n" + "6 - Valid in 1 / last 5 - Activated if the pixel was valid in one out of the last 5 frames\n" + "7 - Valid in 1 / last 8 - Activated if the pixel was valid in one out of the last 8 frames\n" + "8 - Persist Indefinitely - Persistency will be imposed regardless of the stored history(most aggressive filtering)", + "smooth_alpha"_a, "smooth_delta"_a, "persistence_control"_a); + + py::class_ spatial_filter(m, "spatial_filter", "Spatial filter smooths the image by calculating frame with alpha and delta settings. " + "Alpha defines the weight of the current pixel for smoothing, and is bounded within [25..100]%. Delta " + "defines the depth gradient below which the smoothing will occur as number of depth levels."); spatial_filter.def(py::init<>()) .def(py::init(), "smooth_alpha"_a, "smooth_delta"_a, "magnitude"_a, "hole_fill"_a);; - py::class_ hole_filling_filter(m, "hole_filling_filter"); + py::class_ hole_filling_filter(m, "hole_filling_filter", "The processing performed depends on the selected hole filling mode."); hole_filling_filter.def(py::init<>()) - .def(py::init(), "mode"_a); + .def(py::init(), "Possible values for mode:\n" + "0 - fill_from_left - Use the value from the left neighbor pixel to fill the hole\n" + "1 - farest_from_around - Use the value from the neighboring pixel which is furthest away from the sensor\n" + "2 - nearest_from_around - -Use the value from the neighboring pixel closest to the sensor", "mode"_a); - py::class_ disparity_transform(m, "disparity_transform"); + py::class_ disparity_transform(m, "disparity_transform", "Converts from depth representation " + "to disparity representation and vice - versa in depth frames"); disparity_transform.def(py::init(), "transform_to_disparity"_a=true); - py::class_ yuy_decoder(m, "yuy_decoder"); + py::class_ yuy_decoder(m, "yuy_decoder", "Converts frames in raw YUY format to RGB. This conversion is somewhat costly, " + "but the SDK will automatically try to use SSE2, AVX, or CUDA instructions where available to " + "get better performance. Othere implementations (GLSL, OpenCL, Neon, NCS) should follow."); yuy_decoder.def(py::init<>()); - py::class_ zero_order_invalidation(m, "zero_order_invalidation"); + py::class_ zero_order_invalidation(m, "zero_order_invalidation", "Fixes the zero order artifact"); zero_order_invalidation.def(py::init<>()); /* rs_export.hpp */ - // py::class_ save_to_ply(m, "save_to_ply"); + // py::class_ save_to_ply(m, "save_to_ply"); // No docstring in C++ // save_to_ply.def(py::init(), "filename"_a = "RealSense Pointcloud ", "pc"_a = rs2::pointcloud()) // .def_readonly_static("option_ignore_color", &rs2::save_to_ply::OPTION_IGNORE_COLOR); - py::class_ save_single_frameset(m, "save_single_frameset"); + py::class_ save_single_frameset(m, "save_single_frameset"); // No docstring in C++ save_single_frameset.def(py::init(), "filename"_a = "RealSense Frameset "); /* rs2_record_playback.hpp */ - py::class_ playback(m, "playback"); + py::class_ playback(m, "playback"); // No docstring in C++ playback.def(py::init(), "device"_a) - .def("pause", &rs2::playback::pause) - .def("resume", &rs2::playback::resume) - .def("file_name", &rs2::playback::file_name) - .def("get_position", &rs2::playback::get_position) - .def("get_duration", &rs2::playback::get_duration) - .def("seek", &rs2::playback::seek, "time"_a) - .def("is_real_time", &rs2::playback::is_real_time) - .def("set_real_time", &rs2::playback::set_real_time, "real_time"_a) - .def("set_status_changed_callback", [](rs2::playback& self, std::function callback) - { self.set_status_changed_callback(callback); }, "callback"_a) - .def("current_status", &rs2::playback::current_status); - - py::class_ recorder(m, "recorder"); + .def("pause", &rs2::playback::pause, "Pauses the playback. Calling pause() in \"Paused\" status does nothing. If " + "pause() is called while playback status is \"Playing\" or \"Stopped\", the playback will not play until resume() is called.") + .def("resume", &rs2::playback::resume, "Un-pauses the playback. Calling resume() while playback status is \"Playing\" or \"Stopped\" does nothing.") + .def("file_name", &rs2::playback::file_name, "The name of the playback file.") + .def("get_position", &rs2::playback::get_position, "Retrieves the current position of the playback in the file in terms of time. Units are expressed in nanoseconds.") + .def("get_duration", &rs2::playback::get_duration, "Retrieves the total duration of the file.") + .def("seek", &rs2::playback::seek, "Sets the playback to a specified time point of the played data." "time"_a) + .def("is_real_time", &rs2::playback::is_real_time, "Indicates if playback is in real time mode or non real time.") + .def("set_real_time", &rs2::playback::set_real_time, "Set the playback to work in real time or non real time. In real time mode, playback will " + "play the same way the file was recorded. If the application takes too long to handle the callback, frames may be dropped. In non real time " + "mode, playback will wait for each callback to finish handling the data before reading the next frame. In this mode no frames will be dropped, " + "and the application controls the framerate of playback via callback duration." "real_time"_a) + // set_playback_speed? + .def("set_status_changed_callback", [](rs2::playback& self, std::function callback) { + self.set_status_changed_callback(callback); + }, "Register to receive callback from playback device upon its status changes. Callbacks are invoked from the reading thread, " + "and as such any heavy processing in the callback handler will affect the reading thread and may cause frame drops\ high latency.", "callback"_a) + .def("current_status", &rs2::playback::current_status, "Returns the current state of the playback device"); + // Stop? + + py::class_ recorder(m, "recorder", "Records the given device and saves it to the given file as rosbag format."); recorder.def(py::init()) - .def("pause", &rs2::recorder::pause) - .def("resume", &rs2::recorder::resume); + .def("pause", &rs2::recorder::pause, "Pause the recording device without stopping the actual device from streaming.") + .def("resume", &rs2::recorder::resume, "Unpauses the recording device, making it resume recording."); + // filename? /* rs2_sensor.hpp */ py::class_ stream_profile(m, "stream_profile"); From d8d2fe660fc593bca107ce59d5b91ce4946d19fe Mon Sep 17 00:00:00 2001 From: Lior Ramati Date: Sun, 21 Apr 2019 16:37:24 +0300 Subject: [PATCH 06/16] More updates to python docs --- include/librealsense2/hpp/rs_frame.hpp | 14 ++-- wrappers/python/python.cpp | 92 +++++++++++++------------- 2 files changed, 54 insertions(+), 52 deletions(-) diff --git a/include/librealsense2/hpp/rs_frame.hpp b/include/librealsense2/hpp/rs_frame.hpp index f61ee8890b..4bb990657d 100644 --- a/include/librealsense2/hpp/rs_frame.hpp +++ b/include/librealsense2/hpp/rs_frame.hpp @@ -32,7 +32,7 @@ namespace rs2 */ int stream_index() const { return _index; } /** - * Return the stream format + * Return the stream type * \return rs2_stream - stream type */ rs2_stream stream_type() const { return _type; } @@ -53,7 +53,7 @@ namespace rs2 int unique_id() const { return _uid; } /** - * Clone current profile and change the type, index and format to input parameters + * Clone the current profile and change the type, index and format to input parameters * \param[in] type - will change the stream type from the cloned profile. * \param[in] index - will change the stream index from the cloned profile. * \param[in] format - will change the stream format from the cloned profile. @@ -118,7 +118,7 @@ namespace rs2 } /** - * Checking if stream profile is marked/assigned as default, the meaning is that the profile will be selected when the user will request stream configuration using wildcards (RS2_DEPTH, -1,-1,... + * Checking if stream profile is marked/assigned as default, meaning that the profile will be selected when the user requests stream configuration using wildcards (RS2_DEPTH, -1,-1,... * \return bool - true or false. */ bool is_default() const { return _default; } @@ -153,8 +153,8 @@ namespace rs2 return res; } /** - * Assign extrinsic transformation parameters to a specific profile (sensor). The extrinsic information is generally available as part of the camera calibration, and librealsense is responsible to retrieve and assign these parameters where appropriate. - * The specific function is intended for synthetic/mock-up (software) devices for which the parameters are produced and injected by the user. + * Assign extrinsic transformation parameters to a specific profile (sensor). The extrinsic information is generally available as part of the camera calibration, and librealsense is responsible for retrieving and assigning these parameters where appropriate. + * This specific function is intended for synthetic/mock-up (software) devices for which the parameters are produced and injected by the user. * \param[in] stream_profile to - which stream profile to be registered with the extrinsic. * \param[in] rs2_extrinsics extrinsics - the extrinsics to be registered. */ @@ -201,7 +201,7 @@ namespace rs2 { public: /** - * Video stream profile instance which contans additional video attributes + * Video stream profile instance which contains additional video attributes * \param[in] stream_profile sp - assign exisiting stream_profile to this instance. */ explicit video_stream_profile(const stream_profile& sp) @@ -231,7 +231,7 @@ namespace rs2 return _height; } /** - * Get stream profile instrinsics attribute + * Get stream profile instrinsics attributes * \return rs2_intrinsics - stream intrinsics. */ rs2_intrinsics get_intrinsics() const diff --git a/wrappers/python/python.cpp b/wrappers/python/python.cpp index d57e1c97c4..0c7a370e00 100644 --- a/wrappers/python/python.cpp +++ b/wrappers/python/python.cpp @@ -633,7 +633,7 @@ PYBIND11_MODULE(NAME, m) { py::class_ threshold(m, "threshold_filter", "Depth thresholding filter. By controlling min and " "max options on the block, one could filter out depth values that are either too large " "or too small, as a software post-processing step"); - .def(py::init(), "min_dist"_a=0.15f, "max_dist"_a=4.f); + threshold.def(py::init(), "min_dist"_a=0.15f, "max_dist"_a=4.f); py::class_ colorizer(m, "colorizer", "Colorizer filter generates color images based on input depth frame"); @@ -704,7 +704,7 @@ PYBIND11_MODULE(NAME, m) { /* rs_export.hpp */ // py::class_ save_to_ply(m, "save_to_ply"); // No docstring in C++ // save_to_ply.def(py::init(), "filename"_a = "RealSense Pointcloud ", "pc"_a = rs2::pointcloud()) - // .def_readonly_static("option_ignore_color", &rs2::save_to_ply::OPTION_IGNORE_COLOR); + // .def_readonly_static("option_ignore_color", &rs2::save_to_ply::OPTION_IGNORE_COLOR); py::class_ save_single_frameset(m, "save_single_frameset"); // No docstring in C++ save_single_frameset.def(py::init(), "filename"_a = "RealSense Frameset "); @@ -739,55 +739,57 @@ PYBIND11_MODULE(NAME, m) { // filename? /* rs2_sensor.hpp */ - py::class_ stream_profile(m, "stream_profile"); + py::class_ stream_profile(m, "stream_profile", "Stores details about the profile of a stream."); stream_profile.def(py::init<>()) - .def("stream_index", &rs2::stream_profile::stream_index) - .def("stream_type", &rs2::stream_profile::stream_type) - .def("format", &rs2::stream_profile::format) - .def("fps", &rs2::stream_profile::fps) - .def("unique_id", &rs2::stream_profile::unique_id) - .def("clone", &rs2::stream_profile::clone, "type"_a, "index"_a, "format"_a) + .def("stream_index", &rs2::stream_profile::stream_index, "The stream's index") + .def("stream_type", &rs2::stream_profile::stream_type, "The stream's type") + .def("format", &rs2::stream_profile::format, "The stream's format") + .def("fps", &rs2::stream_profile::fps, "The streams framerate") + .def("unique_id", &rs2::stream_profile::unique_id, "Unique index assigned whent the stream was created") + .def("clone", &rs2::stream_profile::clone, "Clone the current profile and change the type, index and format to input parameters", "type"_a, "index"_a, "format"_a) .def(BIND_DOWNCAST(stream_profile, stream_profile)) .def(BIND_DOWNCAST(stream_profile, video_stream_profile)) .def(BIND_DOWNCAST(stream_profile, motion_stream_profile)) - .def("stream_name", &rs2::stream_profile::stream_name) - .def("is_default", &rs2::stream_profile::is_default) - .def("__nonzero__", &rs2::stream_profile::operator bool) - .def("get_extrinsics_to", &rs2::stream_profile::get_extrinsics_to, "to"_a) - .def("register_extrinsics_to", &rs2::stream_profile::register_extrinsics_to, "to"_a, "extrinsics"_a) - .def("__repr__", [](const rs2::stream_profile& self) - { - std::stringstream ss; - if (auto vf = self.as()) - { - ss << "<" SNAME ".video_stream_profile: " - << vf.stream_type() << "(" << vf.stream_index() << ") " << vf.width() - << "x" << vf.height() << " @ " << vf.fps() << "fps " - << vf.format() << ">"; - } - else - { - ss << "<" SNAME ".stream_profile: " << self.stream_type() << "(" << self.stream_index() - << ") @ " << self.fps() << "fps " << self.format() << ">"; - } - return ss.str(); - }); + .def("stream_name", &rs2::stream_profile::stream_name, "The stream's human-readable name.") + .def("is_default", &rs2::stream_profile::is_default, "Checks if the stream profile is marked/assigned as default, " + "meaning that the profile will be selected when the user requests stream configuration using wildcards.") + .def("__nonzero__", &rs2::stream_profile::operator bool, "check that the profile is valid") + .def("get_extrinsics_to", &rs2::stream_profile::get_extrinsics_to, "Get the extrinsic transformation between two profiles (representing physical sensors)", "to"_a) + .def("register_extrinsics_to", &rs2::stream_profile::register_extrinsics_to, "Assign extrinsic transformation parameters " + "to a specific profile (sensor). The extrinsic information is generally available as part of the camera calibration, " + "and librealsense is responsible for retrieving and assigning these parameters where appropriate. This specific function " + "is intended for synthetic/mock-up (software) devices for which the parameters are produced and injected by the user.", "to"_a, "extrinsics"_a) + .def("__repr__", [](const rs2::stream_profile& self) { + std::stringstream ss; + if (auto vf = self.as()) + { + ss << "<" SNAME ".video_stream_profile: " + << vf.stream_type() << "(" << vf.stream_index() << ") " << vf.width() + << "x" << vf.height() << " @ " << vf.fps() << "fps " + << vf.format() << ">"; + } + else + { + ss << "<" SNAME ".stream_profile: " << self.stream_type() << "(" << self.stream_index() + << ") @ " << self.fps() << "fps " << self.format() << ">"; + } + return ss.str(); + }); - py::class_ video_stream_profile(m, "video_stream_profile"); + py::class_ video_stream_profile(m, "video_stream_profile", "Video stream profile instance which contains additional video attributes."); video_stream_profile.def(py::init(), "sp"_a) - .def("width", &rs2::video_stream_profile::width) - .def("height", &rs2::video_stream_profile::height) - .def("get_intrinsics", &rs2::video_stream_profile::get_intrinsics) - .def_property_readonly("intrinsics", &rs2::video_stream_profile::get_intrinsics) - .def("__repr__", [](const rs2::video_stream_profile& self) - { - std::stringstream ss; - ss << "<" SNAME ".video_stream_profile: " - << self.stream_type() << "(" << self.stream_index() << ") " << self.width() - << "x" << self.height() << " @ " << self.fps() << "fps " - << self.format() << ">"; - return ss.str(); - }); + .def("width", &rs2::video_stream_profile::width) // No docstring in C++ + .def("height", &rs2::video_stream_profile::height) // No docstring in C++ + .def("get_intrinsics", &rs2::video_stream_profile::get_intrinsics, "Get stream profile instrinsics attributes.") + .def_property_readonly("intrinsics", &rs2::video_stream_profile::get_intrinsics, "Stream profile instrinsics attributes. Identical to calling get_intrinsics.") + .def("__repr__", [](const rs2::video_stream_profile& self) { + std::stringstream ss; + ss << "<" SNAME ".video_stream_profile: " + << self.stream_type() << "(" << self.stream_index() << ") " << self.width() + << "x" << self.height() << " @ " << self.fps() << "fps " + << self.format() << ">"; + return ss.str(); + }); py::class_ motion_stream_profile(m, "motion_stream_profile"); motion_stream_profile.def(py::init(), "sp"_a) From 28d3f76017f06d7dbc49a16b2c49e22d05e08eb4 Mon Sep 17 00:00:00 2001 From: Lior Ramati Date: Mon, 22 Apr 2019 17:20:35 +0300 Subject: [PATCH 07/16] pydocs: up to sensor.hpp --- include/librealsense2/hpp/rs_frame.hpp | 4 +- wrappers/python/python.cpp | 68 ++++++++++++++------------ 2 files changed, 38 insertions(+), 34 deletions(-) diff --git a/include/librealsense2/hpp/rs_frame.hpp b/include/librealsense2/hpp/rs_frame.hpp index 4bb990657d..95f826cb3a 100644 --- a/include/librealsense2/hpp/rs_frame.hpp +++ b/include/librealsense2/hpp/rs_frame.hpp @@ -201,7 +201,7 @@ namespace rs2 { public: /** - * Video stream profile instance which contains additional video attributes + * Stream profile instance which contains additional video attributes * \param[in] stream_profile sp - assign exisiting stream_profile to this instance. */ explicit video_stream_profile(const stream_profile& sp) @@ -253,7 +253,7 @@ namespace rs2 { public: /** - * Motion stream profile instance which contans IMU-specific intrinsic + * Stream profile instance which contains IMU-specific intrinsics. * \param[in] stream_profile sp - assign exisiting stream_profile to this instance. */ explicit motion_stream_profile(const stream_profile& sp) diff --git a/wrappers/python/python.cpp b/wrappers/python/python.cpp index 0c7a370e00..a1c6f068ac 100644 --- a/wrappers/python/python.cpp +++ b/wrappers/python/python.cpp @@ -776,7 +776,7 @@ PYBIND11_MODULE(NAME, m) { return ss.str(); }); - py::class_ video_stream_profile(m, "video_stream_profile", "Video stream profile instance which contains additional video attributes."); + py::class_ video_stream_profile(m, "video_stream_profile", "Stream profile instance which contains additional video attributes."); video_stream_profile.def(py::init(), "sp"_a) .def("width", &rs2::video_stream_profile::width) // No docstring in C++ .def("height", &rs2::video_stream_profile::height) // No docstring in C++ @@ -791,56 +791,60 @@ PYBIND11_MODULE(NAME, m) { return ss.str(); }); - py::class_ motion_stream_profile(m, "motion_stream_profile"); + py::class_ motion_stream_profile(m, "motion_stream_profile", "Stream profile instance which contans additional motion attributes"); motion_stream_profile.def(py::init(), "sp"_a) .def("get_motion_intrinsics", &rs2::motion_stream_profile::get_motion_intrinsics, "Returns scale and bias of a motion stream."); - py::class_ notification(m, "notification"); + py::class_ notification(m, "notification"); // No docstring in C++ notification.def(py::init<>()) .def("get_category", &rs2::notification::get_category, - "Retrieve the notification's category.") - .def("get_description", &rs2::notification::get_description, - "Retrieve the notification's description.") - .def("get_timestamp", &rs2::notification::get_timestamp, - "Retrieve the notification's arrival timestamp.") - .def("get_severity", &rs2::notification::get_severity, - "Retrieve the notification's severity.") - .def("get_serialized_data", &rs2::notification::get_severity, - "Retrieve the notification's serialized data.") + "Retrieve the notification's category.") .def_property_readonly("category", &rs2::notification::get_category, - "Retrieve the notification's category.") + "The notification's category. Identical to calling get_category.") + .def("get_description", &rs2::notification::get_description, + "Retrieve the notification's description.") .def_property_readonly("description", &rs2::notification::get_description, - "Retrieve the notification's description.") + "The notification's description. Identical to calling get_description.") + .def("get_timestamp", &rs2::notification::get_timestamp, + "Retrieve the notification's arrival timestamp.") .def_property_readonly("timestamp", &rs2::notification::get_timestamp, - "Retrieve the notification's arrival timestamp.") + "The notification's arrival timestamp. Identical to calling get_timestamp.") + .def("get_severity", &rs2::notification::get_severity, + "Retrieve the notification's severity.") .def_property_readonly("severity", &rs2::notification::get_severity, - "Retrieve the notification's severity.") + "The notification's severity. Identical to calling get_severity.") + .def("get_serialized_data", &rs2::notification::get_severity, + "Retrieve the notification's serialized data.") .def_property_readonly("serialized_data", &rs2::notification::get_serialized_data, - "Retrieve the notification's serialized data.") + "The notification's serialized data. Identical to calling get_serialized_data.") .def("__repr__", [](const rs2::notification &n) { - return n.get_description(); - }); + return n.get_description(); + }); // not binding notifications_callback, templated - py::class_ sensor(m, "sensor"); + py::class_ sensor(m, "sensor"); // No docstring in C++ sensor.def("open", (void (rs2::sensor::*)(const rs2::stream_profile&) const) &rs2::sensor::open, - "Open sensor for exclusive access, by commiting to a configuration", "profile"_a) + "Open sensor for exclusive access, by commiting to a configuration", "profile"_a) .def("supports", (bool (rs2::sensor::*)(rs2_camera_info) const) &rs2::sensor::supports, - "Check if specific camera info is supported.", "info") + "Check if specific camera info is supported.", "info") .def("supports", (bool (rs2::sensor::*)(rs2_option) const) &rs2::options::supports, - "Check if specific camera info is supported.", "info") + "Check if specific camera info is supported.", "info") .def("get_info", &rs2::sensor::get_info, "Retrieve camera specific information, " - "like versions of various internal components.", "info"_a) - .def("set_notifications_callback", [](const rs2::sensor& self, std::function callback) - { self.set_notifications_callback(callback); }, "Register Notifications callback", "callback"_a) + "like versions of various internal components.", "info"_a) + .def("set_notifications_callback", [](const rs2::sensor& self, std::function callback) { + self.set_notifications_callback(callback); + }, "Register Notifications callback", "callback"_a) .def("open", (void (rs2::sensor::*)(const std::vector&) const) &rs2::sensor::open, - "Open sensor for exclusive access, by committing to a composite configuration, specifying one or " - "more stream profiles.", "profiles"_a) + "Open sensor for exclusive access, by committing to a composite configuration, specifying one or " + "more stream profiles.", "profiles"_a) .def("close", &rs2::sensor::close, "Close sensor for exclusive access.", py::call_guard()) - .def("start", [](const rs2::sensor& self, std::function callback) - { self.start(callback); }, "Start passing frames into user provided callback.", "callback"_a) - .def("start", [](const rs2::sensor& self, rs2::frame_queue& queue) { self.start(queue); }) - .def("stop", &rs2::sensor::stop, "Stop streaming.", py::call_guard()) + .def("start", [](const rs2::sensor& self, std::function callback) { + self.start(callback); + }, "Start passing frames into user provided callback.", "callback"_a) + .def("start", [](const rs2::sensor& self, rs2::frame_queue& queue) { + self.start(queue); + }, "start passing frames into specified frame_queue", "queue"_a) + .def("stop", [](const rs2::sensor& self) { py::gil_scoped_release lock; self.stop(); }, "Stop streaming.") .def("get_stream_profiles", &rs2::sensor::get_stream_profiles, "Check if physical sensor is supported.") .def("get_recommended_filters", &rs2::sensor::get_recommended_filters, "Return the recommended list of filters by the sensor.") From 73cd9037fa048b8d8e7c50c1723c77baa57e1ee2 Mon Sep 17 00:00:00 2001 From: Lior Ramati Date: Mon, 22 Apr 2019 17:35:16 +0300 Subject: [PATCH 08/16] [python] Restore gil release on sensor::stop --- wrappers/python/python.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wrappers/python/python.cpp b/wrappers/python/python.cpp index a1c6f068ac..c583ea9567 100644 --- a/wrappers/python/python.cpp +++ b/wrappers/python/python.cpp @@ -844,7 +844,7 @@ PYBIND11_MODULE(NAME, m) { .def("start", [](const rs2::sensor& self, rs2::frame_queue& queue) { self.start(queue); }, "start passing frames into specified frame_queue", "queue"_a) - .def("stop", [](const rs2::sensor& self) { py::gil_scoped_release lock; self.stop(); }, "Stop streaming.") + .def("stop", &rs2::sensor::stop, "Stop streaming.", py::call_guard()) .def("get_stream_profiles", &rs2::sensor::get_stream_profiles, "Check if physical sensor is supported.") .def("get_recommended_filters", &rs2::sensor::get_recommended_filters, "Return the recommended list of filters by the sensor.") From 58195dffa448ec3bbbb408b8df28e880dd2e3cf7 Mon Sep 17 00:00:00 2001 From: Lior Ramati Date: Mon, 22 Apr 2019 18:04:30 +0300 Subject: [PATCH 09/16] Pydocs: up to pipeline.hpp --- include/librealsense2/hpp/rs_pipeline.hpp | 4 +- include/librealsense2/hpp/rs_sensor.hpp | 4 +- wrappers/python/python.cpp | 84 +++++++++++++++-------- 3 files changed, 61 insertions(+), 31 deletions(-) diff --git a/include/librealsense2/hpp/rs_pipeline.hpp b/include/librealsense2/hpp/rs_pipeline.hpp index 2ab8567885..795d2eb06b 100644 --- a/include/librealsense2/hpp/rs_pipeline.hpp +++ b/include/librealsense2/hpp/rs_pipeline.hpp @@ -407,7 +407,7 @@ namespace rs2 * Start the pipeline streaming with its default configuration. * The pipeline captures samples from the device, and delivers them to the through the provided frame callback. * Starting the pipeline is possible only when it is not started. If the pipeline was started, an exception is raised. - * When starting the pipeline with a callback both \c wait_for_frames() or \c poll_for_frames() will throw exception. + * When starting the pipeline with a callback both \c wait_for_frames() and \c poll_for_frames() will throw exception. * * \param[in] callback Stream callback, can be any callable object accepting rs2::frame * \return The actual pipeline device and streams profile, which was successfully configured to the streaming device. @@ -428,7 +428,7 @@ namespace rs2 * Start the pipeline streaming according to the configuraion. * The pipeline captures samples from the device, and delivers them to the through the provided frame callback. * Starting the pipeline is possible only when it is not started. If the pipeline was started, an exception is raised. - * When starting the pipeline with a callback both \c wait_for_frames() or \c poll_for_frames() will throw exception. + * When starting the pipeline with a callback both \c wait_for_frames() and \c poll_for_frames() will throw exception. * The pipeline selects and activates the device upon start, according to configuration or a default configuration. * When the rs2::config is provided to the method, the pipeline tries to activate the config \c resolve() result. * If the application requests are conflicting with pipeline computer vision modules or no matching device is available on diff --git a/include/librealsense2/hpp/rs_sensor.hpp b/include/librealsense2/hpp/rs_sensor.hpp index cb340a339a..4f53d3ec12 100644 --- a/include/librealsense2/hpp/rs_sensor.hpp +++ b/include/librealsense2/hpp/rs_sensor.hpp @@ -215,8 +215,8 @@ namespace rs2 /** - * check if physical sensor is supported - * \return list of stream profiles that given sensor can provide, should be released by rs2_delete_profiles_list + * Retrieves the list of stream profiles supported by the sensor. + * \return list of stream profiles that given sensor can provide */ std::vector get_stream_profiles() const { diff --git a/wrappers/python/python.cpp b/wrappers/python/python.cpp index c583ea9567..b85af911b1 100644 --- a/wrappers/python/python.cpp +++ b/wrappers/python/python.cpp @@ -845,57 +845,87 @@ PYBIND11_MODULE(NAME, m) { self.start(queue); }, "start passing frames into specified frame_queue", "queue"_a) .def("stop", &rs2::sensor::stop, "Stop streaming.", py::call_guard()) - .def("get_stream_profiles", &rs2::sensor::get_stream_profiles, "Check if physical sensor is supported.") + .def("get_stream_profiles", &rs2::sensor::get_stream_profiles, "Retrieves the list of stream profiles supported by the sensor.") + .def_property_readonly("profiles", &rs2::sensor::get_stream_profiles, "The list of stream profiles supported by the sensor. Identical to calling get_stream_profiles") .def("get_recommended_filters", &rs2::sensor::get_recommended_filters, "Return the recommended list of filters by the sensor.") - - .def_property_readonly("profiles", &rs2::sensor::get_stream_profiles, "Check if physical sensor is supported.") .def(py::init<>()) - .def("__nonzero__", &rs2::sensor::operator bool) + .def("__nonzero__", &rs2::sensor::operator bool) // No docstring in C++ .def(BIND_DOWNCAST(sensor, roi_sensor)) .def(BIND_DOWNCAST(sensor, depth_sensor)) .def(BIND_DOWNCAST(sensor, pose_sensor)) .def(BIND_DOWNCAST(sensor, wheel_odometer)); - py::class_ roi_sensor(m, "roi_sensor"); + py::class_ roi_sensor(m, "roi_sensor"); // No docstring in C++ roi_sensor.def(py::init(), "sensor"_a) - .def("set_region_of_interest", &rs2::roi_sensor::set_region_of_interest, "roi"_a) - .def("get_region_of_interest", &rs2::roi_sensor::get_region_of_interest) - .def("__nonzero__", &rs2::roi_sensor::operator bool); + .def("set_region_of_interest", &rs2::roi_sensor::set_region_of_interest, "roi"_a) // No docstring in C++ + .def("get_region_of_interest", &rs2::roi_sensor::get_region_of_interest) // No docstring in C++ + .def("__nonzero__", &rs2::roi_sensor::operator bool); // No docstring in C++ - py::class_ depth_sensor(m, "depth_sensor"); + py::class_ depth_sensor(m, "depth_sensor"); // No docstring in C++ depth_sensor.def(py::init(), "sensor"_a) .def("get_depth_scale", &rs2::depth_sensor::get_depth_scale, - "Retrieves mapping between the units of the depth image and meters.") - .def("__nonzero__", &rs2::depth_sensor::operator bool); + "Retrieves mapping between the units of the depth image and meters.") + .def("__nonzero__", &rs2::depth_sensor::operator bool); // No docstring in C++ - py::class_ pose_sensor(m, "pose_sensor"); + py::class_ pose_sensor(m, "pose_sensor"); // No docstring in C++ pose_sensor.def(py::init(), "sensor"_a) .def("import_localization_map", &rs2::pose_sensor::import_localization_map, - "Load SLAM localization map from host to device.", "lmap_buf"_a) + "Load SLAM localization map from host to device.", "lmap_buf"_a) .def("export_localization_map", &rs2::pose_sensor::export_localization_map, - "Extract SLAM localization map from device and store on host.") + "Extract SLAM localization map from device and store on host.") .def("set_static_node", &rs2::pose_sensor::set_static_node, - "Create a named reference frame anchored to a specific 3D pose.") + "Create a named reference frame anchored to a specific 3D pose.") .def("get_static_node", &rs2::pose_sensor::get_static_node, - "Retrieve a named reference frame anchored to a specific 3D pose.") - .def("__nonzero__", &rs2::pose_sensor::operator bool); + "Retrieve a named reference frame anchored to a specific 3D pose.") + .def("__nonzero__", &rs2::pose_sensor::operator bool); // No docstring in C++ - py::class_ wheel_odometer(m, "wheel_odometer"); + py::class_ wheel_odometer(m, "wheel_odometer"); // No docstring in C++ wheel_odometer.def(py::init(), "sensor"_a) .def("load_wheel_odometery_config", &rs2::wheel_odometer::load_wheel_odometery_config, - "odometry_config_buf"_a, "Load Wheel odometer settings from host to device.") + "Load Wheel odometer settings from host to device.", "odometry_config_buf"_a) .def("send_wheel_odometry", &rs2::wheel_odometer::send_wheel_odometry, - "wo_sensor_id"_a, "frame_num"_a, "translational_velocity"_a, - "Send wheel odometry data for each individual sensor (wheel)") + "Send wheel odometry data for each individual sensor (wheel)", + "wo_sensor_id"_a, "frame_num"_a, "translational_velocity"_a) .def("__nonzero__", &rs2::wheel_odometer::operator bool); /* rs2_pipeline.hpp */ - py::class_ pipeline(m, "pipeline"); - pipeline.def(py::init(), "ctx"_a = rs2::context()) - .def("start", (rs2::pipeline_profile(rs2::pipeline::*)(const rs2::config&)) &rs2::pipeline::start, "config"_a) - .def("start", (rs2::pipeline_profile(rs2::pipeline::*)()) &rs2::pipeline::start) - .def("start", [](rs2::pipeline& self, std::function f) { self.start(f); }, "callback"_a) - .def("start", [](rs2::pipeline& self, const rs2::config& config, std::function f) { self.start(config, f); }, "config"_a, "callback"_a) + py::class_ pipeline(m, "pipeline", "The pipeline simplifies the user interaction with the device and computer vision processing modules.\n" + "The class abstracts the camera configuration and streaming, and the vision modules triggering and threading.\n" + "It lets the application focus on the computer vision output of the modules, or the device output data.\n" + "The pipeline can manage computer vision modules, which are implemented as a processing blocks.\n" + "The pipeline is the consumer of the processing block interface, while the application consumes the computer vision interface."); + pipeline.def(py::init(), "ctx"_a = rs2::context(), "The caller can provide a context created by the application, usually for playback or testing purposes.") + // TODO: Streamline this wall of text + .def("start", (rs2::pipeline_profile(rs2::pipeline::*)(const rs2::config&)) &rs2::pipeline::start, "Start the pipeline streaming according to the configuraion.\n" + "The pipeline streaming loop captures samples from the device, and delivers them to the attached computer vision modules and processing blocks, according to " + "each module requirements and threading model.\n" + "During the loop execution, the application can access the camera streams by calling wait_for_frames() or poll_for_frames().\n" + "The streaming loop runs until the pipeline is stopped.\n" + "Starting the pipeline is possible only when it is not started. If the pipeline was started, an exception is raised.\n" + "The pipeline selects and activates the device upon start, according to configuration or a default configuration.\n" + "When the rs2::config is provided to the method, the pipeline tries to activate the config resolve() result.\n" + "If the application requests are conflicting with pipeline computer vision modules or no matching device is available on the platform, the method fails.\n" + "Available configurations and devices may change between config resolve() call and pipeline start, in case devices are connected or disconnected, or another " + "application acquires ownership of a device.", "config"_a) + .def("start", (rs2::pipeline_profile(rs2::pipeline::*)()) &rs2::pipeline::start, "Start the pipeline streaming with its default configuration.\n" + "The pipeline streaming loop captures samples from the device, and delivers them to the attached computer vision modules and processing " + "blocks, according to each module requirements and threading model.\n" + "During the loop execution, the application can access the camera streams by calling wait_for_frames() or poll_for_frames().\n" + "The streaming loop runs until the pipeline is stopped.\n" + "Starting the pipeline is possible only when it is not started. If the pipeline was started, an exception is raised.\n") + .def("start", [](rs2::pipeline& self, std::function f) { self.start(f); }, "Start the pipeline streaming with its default configuration.\n" + "The pipeline captures samples from the device, and delivers them to the through the provided frame callback.\n" + "Starting the pipeline is possible only when it is not started. If the pipeline was started, an exception is raised.\n" + "When starting the pipeline with a callback both wait_for_frames() and poll_for_frames() will throw exception.", "callback"_a) + .def("start", [](rs2::pipeline& self, const rs2::config& config, std::function f) { self.start(config, f); }, "Start the pipeline streaming according to the configuraion.\n" + "The pipeline captures samples from the device, and delivers them to the through the provided frame callback.\n" + "Starting the pipeline is possible only when it is not started. If the pipeline was started, an exception is raised.\n" + "When starting the pipeline with a callback both wait_for_frames() and poll_for_frames() will throw exception.\n" + "The pipeline selects and activates the device upon start, according to configuration or a default configuration.\n" + "When the rs2::config is provided to the method, the pipeline tries to activate the config resolve() result.\n" + "If the application requests are conflicting with pipeline computer vision modules or no matching device is available on the platform, the method fails.\n" + "Available configurations and devices may change between config resolve() call and pipeline start, in case devices are connected or disconnected, " + "or another application acquires ownership of a device.", "config"_a, "callback"_a) .def("stop", &rs2::pipeline::stop, py::call_guard()) .def("wait_for_frames", &rs2::pipeline::wait_for_frames, "timeout_ms"_a = 5000, py::call_guard()) .def("poll_for_frames", [](const rs2::pipeline &self) From 0d50c038526d047d4afd471923cfdef4c4bec0e5 Mon Sep 17 00:00:00 2001 From: Lior Ramati Date: Thu, 16 May 2019 13:50:17 +0300 Subject: [PATCH 10/16] Complete updating python documentation --- include/librealsense2/hpp/rs_pipeline.hpp | 22 ++--- wrappers/python/python.cpp | 112 +++++++++++++++++----- 2 files changed, 97 insertions(+), 37 deletions(-) diff --git a/include/librealsense2/hpp/rs_pipeline.hpp b/include/librealsense2/hpp/rs_pipeline.hpp index 795d2eb06b..209947fe12 100644 --- a/include/librealsense2/hpp/rs_pipeline.hpp +++ b/include/librealsense2/hpp/rs_pipeline.hpp @@ -11,7 +11,7 @@ namespace rs2 { /** - * The pipeline profile includes a device and a selection of active streams, with specific profile. + * The pipeline profile includes a device and a selection of active streams, with specific profiles. * The profile is a selection of the above under filters and conditions defined by the pipeline. * Streams may belong to more than one sensor of the device. */ @@ -50,7 +50,7 @@ namespace rs2 } /** - * Return the selected stream profile, which are enabled in this profile. + * Return the stream profile enabled for the specified stream in this profile. * * \param[in] stream_type Stream type of the desired profile * \param[in] stream_index Stream index of the desired profile. -1 for any matching. @@ -160,25 +160,25 @@ namespace rs2 error::handle(e); } - //Stream type and possibly also stream index + // Stream type and possibly also stream index void enable_stream(rs2_stream stream_type, int stream_index = -1) { enable_stream(stream_type, stream_index, 0, 0, RS2_FORMAT_ANY, 0); } - //Stream type and resolution, and possibly format and frame rate + // Stream type and resolution, and possibly format and frame rate void enable_stream(rs2_stream stream_type, int width, int height, rs2_format format = RS2_FORMAT_ANY, int framerate = 0) { enable_stream(stream_type, -1, width, height, format, framerate); } - //Stream type and format + // Stream type and format, and possibly frame rate void enable_stream(rs2_stream stream_type, rs2_format format, int framerate = 0) { enable_stream(stream_type, -1, 0, 0, format, framerate); } - //Stream type and format + // Stream type, index, and format, and possibly framerate void enable_stream(rs2_stream stream_type, int stream_index, rs2_format format, int framerate = 0) { enable_stream(stream_type, stream_index, 0, 0, format, framerate); @@ -216,7 +216,7 @@ namespace rs2 * Select a recorded device from a file, to be used by the pipeline through playback. * The device available streams are as recorded to the file, and \c resolve() considers only this device and * configuration as available. - * This request cannot be used if enable_record_to_file() is called for the current config, and vise versa + * This request cannot be used if \c enable_record_to_file() is called for the current config, and vise versa * * \param[in] file_name The playback file of the device */ @@ -228,8 +228,8 @@ namespace rs2 } /** - * Requires that the resolved device would be recorded to file - * This request cannot be used if enable_device_from_file() is called for the current config, and vise versa + * Requires that the resolved device would be recorded to file. + * This request cannot be used if \c enable_device_from_file() is called for the current config, and vise versa * as available. * * \param[in] file_name The desired file for the output record @@ -476,7 +476,7 @@ namespace rs2 * should be called as fast as the device frame rate. * The application can maintain the frames handles to defer processing. However, if the application maintains too long * history, the device may lack memory resources to produce new frames, and the following call to this method shall fail - * to retrieve new frames, until resources are retained. + * to retrieve new frames, until resources become available. * * \param[in] timeout_ms Max time in milliseconds to wait until an exception will be thrown * \return Set of time synchronized frames, one from each active stream @@ -499,7 +499,7 @@ namespace rs2 * To avoid frame drops, this method should be called as fast as the device frame rate. * The application can maintain the frames handles to defer processing. However, if the application maintains too long * history, the device may lack memory resources to produce new frames, and the following calls to this method shall - * return no new frames, until resources are retained. + * return no new frames, until resources become available. * * \param[out] f Frames set handle * \return True if new set of time synchronized frames was stored to f, false if no new frames set is available diff --git a/wrappers/python/python.cpp b/wrappers/python/python.cpp index b85af911b1..b2166e2a50 100644 --- a/wrappers/python/python.cpp +++ b/wrappers/python/python.cpp @@ -791,7 +791,7 @@ PYBIND11_MODULE(NAME, m) { return ss.str(); }); - py::class_ motion_stream_profile(m, "motion_stream_profile", "Stream profile instance which contans additional motion attributes"); + py::class_ motion_stream_profile(m, "motion_stream_profile", "Stream profile instance which contains IMU-specific intrinsics."); motion_stream_profile.def(py::init(), "sp"_a) .def("get_motion_intrinsics", &rs2::motion_stream_profile::get_motion_intrinsics, "Returns scale and bias of a motion stream."); @@ -894,7 +894,7 @@ PYBIND11_MODULE(NAME, m) { "It lets the application focus on the computer vision output of the modules, or the device output data.\n" "The pipeline can manage computer vision modules, which are implemented as a processing blocks.\n" "The pipeline is the consumer of the processing block interface, while the application consumes the computer vision interface."); - pipeline.def(py::init(), "ctx"_a = rs2::context(), "The caller can provide a context created by the application, usually for playback or testing purposes.") + pipeline.def(py::init(), "The caller can provide a context created by the application, usually for playback or testing purposes.", "ctx"_a = rs2::context()) // TODO: Streamline this wall of text .def("start", (rs2::pipeline_profile(rs2::pipeline::*)(const rs2::config&)) &rs2::pipeline::start, "Start the pipeline streaming according to the configuraion.\n" "The pipeline streaming loop captures samples from the device, and delivers them to the attached computer vision modules and processing blocks, according to " @@ -926,21 +926,39 @@ PYBIND11_MODULE(NAME, m) { "If the application requests are conflicting with pipeline computer vision modules or no matching device is available on the platform, the method fails.\n" "Available configurations and devices may change between config resolve() call and pipeline start, in case devices are connected or disconnected, " "or another application acquires ownership of a device.", "config"_a, "callback"_a) - .def("stop", &rs2::pipeline::stop, py::call_guard()) - .def("wait_for_frames", &rs2::pipeline::wait_for_frames, "timeout_ms"_a = 5000, py::call_guard()) + .def("stop", &rs2::pipeline::stop, "Stop the pipeline streaming.\n" + "The pipeline stops delivering samples to the attached computer vision modules and processing blocks, stops the device streaming and releases " + "the device resources used by the pipeline. It is the application's responsibility to release any frame reference it owns.\n" + "The method takes effect only after start() was called, otherwise an exception is raised.", py::call_guard()) + .def("wait_for_frames", &rs2::pipeline::wait_for_frames, "Wait until a new set of frames becomes available.\n" + "The frames set includes time-synchronized frames of each enabled stream in the pipeline.\n" + "In case of different frame rates of the streams, the frames set include a matching frame of the slow stream, which may have been included in previous frames set.\n" + "The method blocks the calling thread, and fetches the latest unread frames set.\n" + "Device frames, which were produced while the function wasn't called, are dropped. To avoid frame drops, this method should be called as fast as the device frame rate.\n" + "The application can maintain the frames handles to defer processing. However, if the application maintains too long history, " + "the device may lack memory resources to produce new frames, and the following call to this method shall fail to retrieve new " + "frames, until resources become available.", "timeout_ms"_a = 5000, py::call_guard()) .def("poll_for_frames", [](const rs2::pipeline &self) { rs2::frameset frames; self.poll_for_frames(&frames); return frames; - }) + }, "Check if a new set of frames is available and retrieve the latest undelivered set.\n" + "The frames set includes time-synchronized frames of each enabled stream in the pipeline.\n" + "The method returns without blocking the calling thread, with status of new frames available or not.\n" + "If available, it fetches the latest frames set.\n" + "Device frames, which were produced while the function wasn't called, are dropped.\n" + "To avoid frame drops, this method should be called as fast as the device frame rate.\n" + "The application can maintain the frames handles to defer processing. However, if the application maintains too long " + "history, the device may lack memory resources to produce new frames, and the following calls to this method shall " + "return no new frames, until resources become available.") .def("try_wait_for_frames", [](const rs2::pipeline &self, unsigned int timeout_ms) { rs2::frameset fs; auto success = self.try_wait_for_frames(&fs, timeout_ms); return std::make_tuple(success, fs); }, "timeout_ms"_a = 5000, py::call_guard()) - .def("get_active_profile", &rs2::pipeline::get_active_profile); + .def("get_active_profile", &rs2::pipeline::get_active_profile); // No docstring in C++ struct pipeline_wrapper //Workaround to allow python implicit conversion of pipeline to std::shared_ptr { @@ -952,28 +970,70 @@ PYBIND11_MODULE(NAME, m) { py::implicitly_convertible(); - py::class_ pipeline_profile(m, "pipeline_profile"); + py::class_ pipeline_profile(m, "pipeline_profile", "The pipeline profile includes a device and a selection of active streams, with specific profiles.\n" + "The profile is a selection of the above under filters and conditions defined by the pipeline.\n" + "Streams may belong to more than one sensor of the device."); pipeline_profile.def(py::init<>()) - .def("get_streams", &rs2::pipeline_profile::get_streams) - .def("get_stream", &rs2::pipeline_profile::get_stream, "stream_type"_a, "stream_index"_a = -1) - .def("get_device", &rs2::pipeline_profile::get_device); - - - py::class_ config(m, "config"); + .def("get_streams", &rs2::pipeline_profile::get_streams, "Return the selected streams profiles, which are enabled in this profile.") + .def("get_stream", &rs2::pipeline_profile::get_stream, "Return the stream profile enabled for the specified stream in this profile.", "stream_type"_a, "stream_index"_a = -1) + .def("get_device", &rs2::pipeline_profile::get_device, "Retrieve the device used by the pipeline.\n" + "The device class provides the application access to control camera additional settings - get device " + "information, sensor options information, options value query and set, sensor specific extensions.\n" + "Since the pipeline controls the device streams configuration, activation state and frames reading, " + "calling the device API functions, which execute those operations, results in unexpected behavior.\n" + "The pipeline streaming device is selected during pipeline start(). Devices of profiles, which are not returned " + "by pipeline start() or get_active_profile(), are not guaranteed to be used by the pipeline."); + + + py::class_ config(m, "config", "The config allows pipeline users to request filters for the pipeline streams and device selection and configuration.\n" + "This is an optional step in pipeline creation, as the pipeline resolves its streaming device internally.\n" + "Config provides its users a way to set the filters and test if there is no conflict with the pipeline requirements from the device.\n" + "It also allows the user to find a matching device for the config filters and the pipeline, in order to select a device explicitly, " + "and modify its controls before streaming starts."); config.def(py::init<>()) - .def("enable_stream", (void (rs2::config::*)(rs2_stream, int, int, int, rs2_format, int)) &rs2::config::enable_stream, "stream_type"_a, "stream_index"_a, "width"_a, "height"_a, "format"_a = RS2_FORMAT_ANY, "framerate"_a = 0) - .def("enable_stream", (void (rs2::config::*)(rs2_stream, int)) &rs2::config::enable_stream, "stream_type"_a, "stream_index"_a = -1) - .def("enable_stream", (void (rs2::config::*)(rs2_stream, rs2_format, int))&rs2::config::enable_stream, "stream_type"_a, "format"_a, "framerate"_a = 0) - .def("enable_stream", (void (rs2::config::*)(rs2_stream, int, int, rs2_format, int)) &rs2::config::enable_stream, "stream_type"_a, "width"_a, "height"_a, "format"_a = RS2_FORMAT_ANY, "framerate"_a = 0) - .def("enable_stream", (void (rs2::config::*)(rs2_stream, int, rs2_format, int)) &rs2::config::enable_stream, "stream_type"_a, "stream_index"_a, "format"_a, "framerate"_a = 0) - .def("enable_all_streams", &rs2::config::enable_all_streams) - .def("enable_device", &rs2::config::enable_device, "serial"_a) - .def("enable_device_from_file", &rs2::config::enable_device_from_file, "file_name"_a, "repeat_playback"_a = true) - .def("enable_record_to_file", &rs2::config::enable_record_to_file, "file_name"_a) - .def("disable_stream", &rs2::config::disable_stream, "stream"_a, "index"_a = -1) - .def("disable_all_streams", &rs2::config::disable_all_streams) - .def("resolve", [](rs2::config* c, pipeline_wrapper pw) -> rs2::pipeline_profile { return c->resolve(pw._ptr); }) - .def("can_resolve", [](rs2::config* c, pipeline_wrapper pw) -> bool { return c->can_resolve(pw._ptr); }); + .def("enable_stream", (void (rs2::config::*)(rs2_stream, int, int, int, rs2_format, int)) &rs2::config::enable_stream, "Enable a device stream explicitly, with selected stream parameters.\n" + "The method allows the application to request a stream with specific configuration.\n" + "If no stream is explicitly enabled, the pipeline configures the device and its streams according to the attached computer vision modules and processing blocks " + "requirements, or default configuration for the first available device.\n" + "The application can configure any of the input stream parameters according to its requirement, or set to 0 for don't care value.\n" + "The config accumulates the application calls for enable configuration methods, until the configuration is applied.\n" + "Multiple enable stream calls for the same stream override each other, and the last call is maintained.\n" + "Upon calling resolve(), the config checks for conflicts between the application configuration requests and the attached computer vision " + "modules and processing blocks requirements, and fails if conflicts are found.\n" + "Before resolve() is called, no conflict check is done.", "stream_type"_a, "stream_index"_a, "width"_a, "height"_a, "format"_a = RS2_FORMAT_ANY, "framerate"_a = 0) + .def("enable_stream", (void (rs2::config::*)(rs2_stream, int)) &rs2::config::enable_stream, "Stream type and possibly also stream index", "stream_type"_a, "stream_index"_a = -1) + .def("enable_stream", (void (rs2::config::*)(rs2_stream, rs2_format, int))&rs2::config::enable_stream, "Stream type and format, and possibly frame rate", "stream_type"_a, "format"_a, "framerate"_a = 0) + .def("enable_stream", (void (rs2::config::*)(rs2_stream, int, int, rs2_format, int)) &rs2::config::enable_stream, "Stream type and resolution, and possibly format and frame rate", "stream_type"_a, "width"_a, "height"_a, "format"_a = RS2_FORMAT_ANY, "framerate"_a = 0) + .def("enable_stream", (void (rs2::config::*)(rs2_stream, int, rs2_format, int)) &rs2::config::enable_stream, "Stream type, index, and format, and possibly framerate", "stream_type"_a, "stream_index"_a, "format"_a, "framerate"_a = 0) + .def("enable_all_streams", &rs2::config::enable_all_streams, "Enable all device streams explicitly.\n" + "The conditions and behavior of this method are similar to those of enable_stream().\n" + "This filter enables all raw streams of the selected device. The device is either selected explicitly by the application, " + "or by the pipeline requirements or default. The list of streams is device dependent.") + .def("enable_device", &rs2::config::enable_device, "Select a specific device explicitly by its serial number, to be used by the pipeline.\n" + "The conditions and behavior of this method are similar to those of enable_stream().\n" + "This method is required if the application needs to set device or sensor settings prior to pipeline streaming, " + "to enforce the pipeline to use the configured device.", "serial"_a) + .def("enable_device_from_file", &rs2::config::enable_device_from_file, "Select a recorded device from a file, to be used by the pipeline through playback.\n" + "The device available streams are as recorded to the file, and resolve() considers only this device and configuration as available.\n" + "This request cannot be used if enable_record_to_file() is called for the current config, and vise versa", "file_name"_a, "repeat_playback"_a = true) + .def("enable_record_to_file", &rs2::config::enable_record_to_file, "Requires that the resolved device would be recorded to file.\n" + "This request cannot be used if enable_device_from_file() is called for the current config, and vise versa as available.", "file_name"_a) + .def("disable_stream", &rs2::config::disable_stream, "Disable a device stream explicitly, to remove any requests on this stream profile.\n" + "The stream can still be enabled due to pipeline computer vision module request. This call removes any filter on the stream configuration.", "stream"_a, "index"_a = -1) + .def("disable_all_streams", &rs2::config::disable_all_streams, "Disable all device stream explicitly, to remove any requests on the streams profiles.\n" + "The streams can still be enabled due to pipeline computer vision module request. This call removes any filter on the streams configuration.") + .def("resolve", [](rs2::config* c, pipeline_wrapper pw) -> rs2::pipeline_profile { return c->resolve(pw._ptr); }, "Resolve the configuration filters, " + "to find a matching device and streams profiles.\n" + "The method resolves the user configuration filters for the device and streams, and combines them with the requirements of the computer vision modules " + "and processing blocks attached to the pipeline. If there are no conflicts of requests, it looks for an available device, which can satisfy all requests, " + "and selects the first matching streams configuration.\n" + "In the absence of any request, the config object selects the first available device and the first color and depth streams configuration." + "The pipeline profile selection during start() follows the same method. Thus, the selected profile is the same, if no change occurs to the available devices." + "Resolving the pipeline configuration provides the application access to the pipeline selected device for advanced control." + "The returned configuration is not applied to the device, so the application doesn't own the device sensors. However, the application can call enable_device(), " + "to enforce the device returned by this method is selected by pipeline start(), and configure the device and sensors options or extensions before streaming starts.", "p"_a) + .def("can_resolve", [](rs2::config* c, pipeline_wrapper pw) -> bool { return c->can_resolve(pw._ptr); }, "Check if the config can resolve the configuration filters, " + "to find a matching device and streams profiles. The resolution conditions are as described in resolve().", "p"_a); /** RS400 Advanced Mode commands From 31c18a38fb612932eac45a7d3812b990f6a1043f Mon Sep 17 00:00:00 2001 From: Lior Ramati Date: Thu, 16 May 2019 16:05:37 +0300 Subject: [PATCH 11/16] replace backslash with forward slash in python documentation --- wrappers/python/python.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wrappers/python/python.cpp b/wrappers/python/python.cpp index b2166e2a50..fa94bc0477 100644 --- a/wrappers/python/python.cpp +++ b/wrappers/python/python.cpp @@ -728,7 +728,7 @@ PYBIND11_MODULE(NAME, m) { .def("set_status_changed_callback", [](rs2::playback& self, std::function callback) { self.set_status_changed_callback(callback); }, "Register to receive callback from playback device upon its status changes. Callbacks are invoked from the reading thread, " - "and as such any heavy processing in the callback handler will affect the reading thread and may cause frame drops\ high latency.", "callback"_a) + "and as such any heavy processing in the callback handler will affect the reading thread and may cause frame drops/high latency.", "callback"_a) .def("current_status", &rs2::playback::current_status, "Returns the current state of the playback device"); // Stop? From f25f93cf097dabe076554234a86f22e38c2ac9b4 Mon Sep 17 00:00:00 2001 From: Lior Ramati Date: Thu, 16 May 2019 17:23:04 +0300 Subject: [PATCH 12/16] Make python html doc generation work --- wrappers/python/docs/_templates/module.rst | 2 +- wrappers/python/docs/conf.py.in | 1 + wrappers/python/docs/index.rst | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/wrappers/python/docs/_templates/module.rst b/wrappers/python/docs/_templates/module.rst index 22ed68d1b2..354c011854 100644 --- a/wrappers/python/docs/_templates/module.rst +++ b/wrappers/python/docs/_templates/module.rst @@ -19,7 +19,7 @@ .. rubric:: Classes .. autosummary:: - :toctree: _generated + :toctree: :template: class.rst {% for item in classes %} diff --git a/wrappers/python/docs/conf.py.in b/wrappers/python/docs/conf.py.in index bae0b558fc..280c183ce9 100644 --- a/wrappers/python/docs/conf.py.in +++ b/wrappers/python/docs/conf.py.in @@ -153,3 +153,4 @@ epub_exclude_files = ['search.html'] # -- Extension configuration ------------------------------------------------- +autosummary_generate = True \ No newline at end of file diff --git a/wrappers/python/docs/index.rst b/wrappers/python/docs/index.rst index 99b71bdc61..f2b0e58ee5 100644 --- a/wrappers/python/docs/index.rst +++ b/wrappers/python/docs/index.rst @@ -7,7 +7,7 @@ Welcome to pyrealsense2's documentation! ======================================== .. autosummary:: - :toctree: + :toctree: _generated :template: module.rst pyrealsense2 \ No newline at end of file From 4cbd27e66bf4ee79142d50feb456ff1e4ad5f316 Mon Sep 17 00:00:00 2001 From: Lior Ramati Date: Thu, 16 May 2019 18:01:00 +0300 Subject: [PATCH 13/16] Update sphinx generation because github pages doesn't like directories starting with _ --- wrappers/python/docs/CMakeLists.txt | 4 ++-- wrappers/python/docs/conf.py.in | 4 ++-- wrappers/python/docs/index.rst | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/wrappers/python/docs/CMakeLists.txt b/wrappers/python/docs/CMakeLists.txt index b116174568..3a447c3e3e 100644 --- a/wrappers/python/docs/CMakeLists.txt +++ b/wrappers/python/docs/CMakeLists.txt @@ -20,10 +20,10 @@ if(NOT DEFINED SPHINX_THEME_DIR) endif() # configured documentation tools and intermediate build results -set(BINARY_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/_build") +set(BINARY_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/build") # Sphinx cache with pickled ReST documents -set(SPHINX_CACHE_DIR "${CMAKE_CURRENT_BINARY_DIR}/_doctrees") +set(SPHINX_CACHE_DIR "${CMAKE_CURRENT_BINARY_DIR}/doctrees") # HTML output directory set(SPHINX_HTML_DIR "${CMAKE_CURRENT_BINARY_DIR}/html") diff --git a/wrappers/python/docs/conf.py.in b/wrappers/python/docs/conf.py.in index 280c183ce9..3cfbb201de 100644 --- a/wrappers/python/docs/conf.py.in +++ b/wrappers/python/docs/conf.py.in @@ -70,7 +70,7 @@ language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_templates'] +exclude_patterns = ['build', 'Thumbs.db', '.DS_Store', '_templates'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = None @@ -93,7 +93,7 @@ html_theme_path = ['@SPHINX_THEME_DIR@'] # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ['static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. diff --git a/wrappers/python/docs/index.rst b/wrappers/python/docs/index.rst index f2b0e58ee5..732849541f 100644 --- a/wrappers/python/docs/index.rst +++ b/wrappers/python/docs/index.rst @@ -7,7 +7,7 @@ Welcome to pyrealsense2's documentation! ======================================== .. autosummary:: - :toctree: _generated + :toctree: generated :template: module.rst pyrealsense2 \ No newline at end of file From 4b489cf2ec1d82765db88a70f81312979e1cf63d Mon Sep 17 00:00:00 2001 From: Lior Ramati Date: Sun, 19 May 2019 15:30:25 +0300 Subject: [PATCH 14/16] Correct way to make sphinx documentation work is add .nojekyll file to gh-pages branch --- wrappers/python/docs/CMakeLists.txt | 4 ++-- wrappers/python/docs/conf.py.in | 4 ++-- wrappers/python/docs/index.rst | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/wrappers/python/docs/CMakeLists.txt b/wrappers/python/docs/CMakeLists.txt index 3a447c3e3e..b116174568 100644 --- a/wrappers/python/docs/CMakeLists.txt +++ b/wrappers/python/docs/CMakeLists.txt @@ -20,10 +20,10 @@ if(NOT DEFINED SPHINX_THEME_DIR) endif() # configured documentation tools and intermediate build results -set(BINARY_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/build") +set(BINARY_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/_build") # Sphinx cache with pickled ReST documents -set(SPHINX_CACHE_DIR "${CMAKE_CURRENT_BINARY_DIR}/doctrees") +set(SPHINX_CACHE_DIR "${CMAKE_CURRENT_BINARY_DIR}/_doctrees") # HTML output directory set(SPHINX_HTML_DIR "${CMAKE_CURRENT_BINARY_DIR}/html") diff --git a/wrappers/python/docs/conf.py.in b/wrappers/python/docs/conf.py.in index 3cfbb201de..280c183ce9 100644 --- a/wrappers/python/docs/conf.py.in +++ b/wrappers/python/docs/conf.py.in @@ -70,7 +70,7 @@ language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['build', 'Thumbs.db', '.DS_Store', '_templates'] +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_templates'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = None @@ -93,7 +93,7 @@ html_theme_path = ['@SPHINX_THEME_DIR@'] # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['static'] +html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. diff --git a/wrappers/python/docs/index.rst b/wrappers/python/docs/index.rst index 732849541f..f2b0e58ee5 100644 --- a/wrappers/python/docs/index.rst +++ b/wrappers/python/docs/index.rst @@ -7,7 +7,7 @@ Welcome to pyrealsense2's documentation! ======================================== .. autosummary:: - :toctree: generated + :toctree: _generated :template: module.rst pyrealsense2 \ No newline at end of file From 24516b3cdb95ee549e84c477a226da914c9d7f07 Mon Sep 17 00:00:00 2001 From: Lior Ramati Date: Sun, 19 May 2019 17:06:08 +0300 Subject: [PATCH 15/16] Fix grammar per @bfulkers-i's instructions --- include/librealsense2/h/rs_option.h | 2 +- include/librealsense2/h/rs_types.h | 10 ++-- include/librealsense2/hpp/rs_frame.hpp | 34 ++++++------ include/librealsense2/hpp/rs_pipeline.hpp | 6 +-- include/librealsense2/hpp/rs_processing.hpp | 6 +-- wrappers/python/python.cpp | 58 ++++++++++----------- 6 files changed, 58 insertions(+), 58 deletions(-) diff --git a/include/librealsense2/h/rs_option.h b/include/librealsense2/h/rs_option.h index 39db8dce08..3f0b09548b 100644 --- a/include/librealsense2/h/rs_option.h +++ b/include/librealsense2/h/rs_option.h @@ -17,7 +17,7 @@ extern "C" { #include "rs_types.h" /** \brief Defines general configuration controls. - These can generally be mapped to camera UVC controls, and unless stated otherwise, can be set/queried at any time. + These can generally be mapped to camera UVC controls, and can be set / queried at any time unless stated otherwise. */ typedef enum rs2_option { diff --git a/include/librealsense2/h/rs_types.h b/include/librealsense2/h/rs_types.h index c7890aad58..5fcda749cd 100644 --- a/include/librealsense2/h/rs_types.h +++ b/include/librealsense2/h/rs_types.h @@ -63,7 +63,7 @@ typedef struct rs2_intrinsics float fx; /**< Focal length of the image plane, as a multiple of pixel width */ float fy; /**< Focal length of the image plane, as a multiple of pixel height */ rs2_distortion model; /**< Distortion model of the image */ - float coeffs[5]; /**< Distortion coefficients, order: k1, k2, p1, p2, k3 */ + float coeffs[5]; /**< Distortion coefficients */ } rs2_intrinsics; /** \brief Motion device intrinsics: scale, bias, and variances. */ @@ -107,13 +107,13 @@ typedef struct rs2_quaternion typedef struct rs2_pose { rs2_vector translation; /**< X, Y, Z values of translation, in meters (relative to initial position) */ - rs2_vector velocity; /**< X, Y, Z values of velocity, in meter/sec */ - rs2_vector acceleration; /**< X, Y, Z values of acceleration, in meter/sec^2 */ + rs2_vector velocity; /**< X, Y, Z values of velocity, in meters/sec */ + rs2_vector acceleration; /**< X, Y, Z values of acceleration, in meters/sec^2 */ rs2_quaternion rotation; /**< Qi, Qj, Qk, Qr components of rotation as represented in quaternion rotation (relative to initial position) */ rs2_vector angular_velocity; /**< X, Y, Z values of angular velocity, in radians/sec */ rs2_vector angular_acceleration; /**< X, Y, Z values of angular acceleration, in radians/sec^2 */ - unsigned int tracker_confidence; /**< Pose data confidence 0x0 - Failed, 0x1 - Low, 0x2 - Medium, 0x3 - High */ - unsigned int mapper_confidence; /**< Pose data confidence 0x0 - Failed, 0x1 - Low, 0x2 - Medium, 0x3 - High */ + unsigned int tracker_confidence; /**< Pose confidence 0x0 - Failed, 0x1 - Low, 0x2 - Medium, 0x3 - High */ + unsigned int mapper_confidence; /**< Pose map confidence 0x0 - Failed, 0x1 - Low, 0x2 - Medium, 0x3 - High */ } rs2_pose; /** \brief Severity of the librealsense logger. */ diff --git a/include/librealsense2/hpp/rs_frame.hpp b/include/librealsense2/hpp/rs_frame.hpp index 95f826cb3a..b3ce8e5be0 100644 --- a/include/librealsense2/hpp/rs_frame.hpp +++ b/include/librealsense2/hpp/rs_frame.hpp @@ -118,13 +118,13 @@ namespace rs2 } /** - * Checking if stream profile is marked/assigned as default, meaning that the profile will be selected when the user requests stream configuration using wildcards (RS2_DEPTH, -1,-1,... + * Checks if stream profile is marked/assigned as default, meaning that the profile will be selected when the user requests stream configuration using wildcards (RS2_DEPTH, -1,-1,... * \return bool - true or false. */ bool is_default() const { return _default; } /** - * Parenthesis operator check that the profile is valid + * Checks if the profile is valid * \return bool - true or false. */ operator bool() const { return _profile != nullptr; } @@ -570,7 +570,7 @@ namespace rs2 { public: /** - * Extend frame class with additional video related attributes and functions + * Extends the frame class with additional video related attributes and functions * \param[in] frame - existing frame instance */ video_frame(const frame& f) @@ -653,12 +653,12 @@ namespace rs2 { public: /** - * Extend frame class with additional point cloud related attributes and functions + * Extends the frame class with additional point cloud related attributes and functions */ points() : frame(), _size(0) {} /** - * Extend frame class with additional point cloud related attributes and functions + * Extends the frame class with additional point cloud related attributes and functions * \param[in] frame - existing frame instance */ points(const frame& f) @@ -678,7 +678,7 @@ namespace rs2 } } /** - * Retrieve the vertices for the point cloud + * Retrieve the vertices of the point cloud * \param[in] vertex* - pointer of vertex sturcture */ const vertex* get_vertices() const @@ -690,7 +690,7 @@ namespace rs2 } /** - * Export the point cloud to PLY file + * Export the point cloud to a PLY file * \param[in] string fname - file name of the PLY to be saved * \param[in] video_frame texture - the texture for the PLY. */ @@ -727,7 +727,7 @@ namespace rs2 { public: /** - * Extend video_frame class with additional depth related attributes and functions + * Extends the video_frame class with additional depth related attributes and functions * \param[in] frame - existing frame instance */ depth_frame(const frame& f) @@ -790,7 +790,7 @@ namespace rs2 { public: /** - * Extends frame class with additional motion related attributes and functions + * Extends the frame class with additional motion related attributes and functions * \param[in] frame - existing frame instance */ motion_frame(const frame& f) @@ -818,7 +818,7 @@ namespace rs2 { public: /** - * Extends frame class with additional pose related attributes and functions + * Extends the frame class with additional pose related attributes and functions * \param[in] frame - existing frame instance */ pose_frame(const frame& f) @@ -849,11 +849,11 @@ namespace rs2 { public: /** - * Extend frame class with additional frameset related attributes and functions + * Extends the frame class with additional frameset related attributes and functions */ frameset() :_size(0) {}; /** - * Extend frame class with additional frameset related attributes and functions + * Extends the frame class with additional frameset related attributes and functions * \param[in] frame - existing frame instance */ frameset(const frame& f) @@ -875,7 +875,7 @@ namespace rs2 } /** - * Retrieve the first frame of specific stream and format types, if no frame found, return the default one (frame instance) + * Retrieve the first frame of a specific stream and optionally with a specific format. If no frame is found, return an empty frame instance. * \param[in] rs2_stream s - frame to be retrieved from this stream type. * \param[in] rs2_format f - frame to be retrieved from this format type. * \return frame - first found frame with s stream type. @@ -892,7 +892,7 @@ namespace rs2 return result; } /** - * Retrieve the first frame of specific stream type, if no frame found, an error will be thrown + * Retrieve the first frame of a specific stream type and optionally with a specific format. If no frame is found, an error will be thrown. * \param[in] rs2_stream s - frame to be retrieved from this stream type. * \param[in] rs2_format f - frame to be retrieved from this format type. * \return frame - first found frame with s stream type. @@ -905,7 +905,7 @@ namespace rs2 } /** - * Retrieve the first depth frame, if no frame found, return the default one (frame instance) + * Retrieve the first depth frame, if no frame is found, return an empty frame instance. * \return depth_frame - first found depth frame. */ depth_frame get_depth_frame() const @@ -914,7 +914,7 @@ namespace rs2 return f.as(); } /** - * Retrieve the first color frame, if no frame found, search the color frame from IR stream. If one still can't be found, return the default one (frame instance) + * Retrieve the first color frame, if no frame is found, search for the color frame from IR stream. If one still can't be found, return an empty frame instance. * \return video_frame - first found color frame. */ video_frame get_color_frame() const @@ -930,7 +930,7 @@ namespace rs2 return f; } /** - * Retrieve the first infrared frame, if no frame found, return the default one (frame instance) + * Retrieve the first infrared frame, if no frame is found, return an empty frame instance. * \param[in] size_t index * \return video_frame - first found infrared frame. */ diff --git a/include/librealsense2/hpp/rs_pipeline.hpp b/include/librealsense2/hpp/rs_pipeline.hpp index 209947fe12..23d33e6d2e 100644 --- a/include/librealsense2/hpp/rs_pipeline.hpp +++ b/include/librealsense2/hpp/rs_pipeline.hpp @@ -50,7 +50,7 @@ namespace rs2 } /** - * Return the stream profile enabled for the specified stream in this profile. + * Return the stream profile that is enabled for the specified stream in this profile. * * \param[in] stream_type Stream type of the desired profile * \param[in] stream_index Stream index of the desired profile. -1 for any matching. @@ -216,7 +216,7 @@ namespace rs2 * Select a recorded device from a file, to be used by the pipeline through playback. * The device available streams are as recorded to the file, and \c resolve() considers only this device and * configuration as available. - * This request cannot be used if \c enable_record_to_file() is called for the current config, and vise versa + * This request cannot be used if \c enable_record_to_file() is called for the current config, and vice versa. * * \param[in] file_name The playback file of the device */ @@ -229,7 +229,7 @@ namespace rs2 /** * Requires that the resolved device would be recorded to file. - * This request cannot be used if \c enable_device_from_file() is called for the current config, and vise versa + * This request cannot be used if \c enable_device_from_file() is called for the current config, and vice versa. * as available. * * \param[in] file_name The desired file for the output record diff --git a/include/librealsense2/hpp/rs_processing.hpp b/include/librealsense2/hpp/rs_processing.hpp index afd95825d0..36782e54a0 100644 --- a/include/librealsense2/hpp/rs_processing.hpp +++ b/include/librealsense2/hpp/rs_processing.hpp @@ -210,7 +210,7 @@ namespace rs2 /** * Start the processing block with callback function on_frame to inform the application the frame is processed. * - * \param[in] on_frame callback function for notifing the frame to be processed is ready. + * \param[in] on_frame callback function for notifying the frame to be processed is ready. */ template void start(S on_frame) @@ -310,7 +310,7 @@ namespace rs2 }; /** - * Define the filter workflow, inherit this class to generate your own filter. Best understanding is to refer to the viewer class in examples.hpp + * Define the filter workflow, inherit this class to generate your own filter. Refer to the viewer class in examples.hpp for a more detailed example. */ class filter : public processing_block, public filter_interface { @@ -379,7 +379,7 @@ namespace rs2 }; /** - * Generates 3D point clouds based on depth frame. Can also map textures from color frame. + * Generates 3D point clouds based on a depth frame. Can also map textures from a color frame. */ class pointcloud : public filter { diff --git a/wrappers/python/python.cpp b/wrappers/python/python.cpp index fa94bc0477..f483eab7d9 100644 --- a/wrappers/python/python.cpp +++ b/wrappers/python/python.cpp @@ -150,7 +150,7 @@ PYBIND11_MODULE(NAME, m) { BIND_ENUM(m, rs2_format, RS2_FORMAT_COUNT, "A stream's format identifies how binary data is encoded within a frame.") BIND_ENUM(m, rs2_notification_category, RS2_NOTIFICATION_CATEGORY_COUNT, "Category of the librealsense notification.") BIND_ENUM(m, rs2_log_severity, RS2_LOG_SEVERITY_COUNT, "Severity of the librealsense logger.") - BIND_ENUM(m, rs2_option, RS2_OPTION_COUNT, "Defines general configuration controls. These can generally be mapped to camera UVC controls, and unless stated otherwise, can be set / queried at any time.") + BIND_ENUM(m, rs2_option, RS2_OPTION_COUNT, "Defines general configuration controls. These can generally be mapped to camera UVC controls, and can be set / queried at any time unless stated otherwise.") BIND_ENUM(m, rs2_timestamp_domain, RS2_TIMESTAMP_DOMAIN_COUNT, "Specifies the clock in relation to which the frame timestamp was measured.") BIND_ENUM(m, rs2_distortion, RS2_DISTORTION_COUNT, "Distortion model: defines how pixel coordinates should be mapped to sensor coordinates.") BIND_ENUM(m, rs2_playback_status, RS2_PLAYBACK_STATUS_COUNT, "") // No docstring in C++ @@ -175,7 +175,7 @@ PYBIND11_MODULE(NAME, m) { .def_readwrite("fx", &rs2_intrinsics::fx, "Focal length of the image plane, as a multiple of pixel width") .def_readwrite("fy", &rs2_intrinsics::fy, "Focal length of the image plane, as a multiple of pixel height") .def_readwrite("model", &rs2_intrinsics::model, "Distortion model of the image") - .def_property(BIND_RAW_ARRAY_PROPERTY(rs2_intrinsics, coeffs, float, 5), "Distortion coefficients, order: k1, k2, p1, p2, k3") + .def_property(BIND_RAW_ARRAY_PROPERTY(rs2_intrinsics, coeffs, float, 5), "Distortion coefficients") .def("__repr__", [](const rs2_intrinsics& self) { std::stringstream ss; ss << "width: " << self.width << ", "; @@ -218,7 +218,7 @@ PYBIND11_MODULE(NAME, m) { py::class_ context(m, "context", "Librealsense context class. Includes realsense API version."); context.def(py::init<>()) .def("query_devices", (rs2::device_list(rs2::context::*)() const) &rs2::context::query_devices, "Create a static" - " snapshot of all connected devices a the time of the call.") + " snapshot of all connected devices at the time of the call.") .def_property_readonly("devices", (rs2::device_list(rs2::context::*)() const) &rs2::context::query_devices, "A static snapshot of all connected devices at time of access. Identical to calling query_devices.") .def("query_all_sensors", &rs2::context::query_all_sensors, "Generate a flat list of " @@ -367,7 +367,7 @@ PYBIND11_MODULE(NAME, m) { .def(BIND_DOWNCAST(frame, pose_frame)); // No apply_filter? - py::class_ video_frame(m, "video_frame", "Extend frame class with additional video related attributes and functions."); + py::class_ video_frame(m, "video_frame", "Extends the frame class with additional video related attributes and functions."); video_frame.def(py::init()) .def("get_width", &rs2::video_frame::get_width, "Returns image width in pixels.") .def_property_readonly("width", &rs2::video_frame::get_width, "Image width in pixels. Identical to calling get_width.") @@ -413,20 +413,20 @@ PYBIND11_MODULE(NAME, m) { py::class_ pose(m, "pose"); // No docstring in C++ pose.def(py::init<>()) .def_readwrite("translation", &rs2_pose::translation, "X, Y, Z values of translation, in meters (relative to initial position)") - .def_readwrite("velocity", &rs2_pose::velocity, "X, Y, Z values of velocity, in meter/sec") - .def_readwrite("acceleration", &rs2_pose::acceleration, "X, Y, Z values of acceleration, in meter/sec^2") + .def_readwrite("velocity", &rs2_pose::velocity, "X, Y, Z values of velocity, in meters/sec") + .def_readwrite("acceleration", &rs2_pose::acceleration, "X, Y, Z values of acceleration, in meters/sec^2") .def_readwrite("rotation", &rs2_pose::rotation, "Qi, Qj, Qk, Qr components of rotation as represented in quaternion rotation (relative to initial position)") .def_readwrite("angular_velocity", &rs2_pose::angular_velocity, "X, Y, Z values of angular velocity, in radians/sec") .def_readwrite("angular_acceleration", &rs2_pose::angular_acceleration, "X, Y, Z values of angular acceleration, in radians/sec^2") - .def_readwrite("tracker_confidence", &rs2_pose::tracker_confidence, "Pose data confidence 0x0 - Failed, 0x1 - Low, 0x2 - Medium, 0x3 - High") - .def_readwrite("mapper_confidence", &rs2_pose::mapper_confidence, "Pose data confidence 0x0 - Failed, 0x1 - Low, 0x2 - Medium, 0x3 - High"); + .def_readwrite("tracker_confidence", &rs2_pose::tracker_confidence, "Pose confidence 0x0 - Failed, 0x1 - Low, 0x2 - Medium, 0x3 - High") + .def_readwrite("mapper_confidence", &rs2_pose::mapper_confidence, "Pose map confidence 0x0 - Failed, 0x1 - Low, 0x2 - Medium, 0x3 - High"); - py::class_ motion_frame(m, "motion_frame", "Extends frame class with additional motion related attributes and functions"); + py::class_ motion_frame(m, "motion_frame", "Extends the frame class with additional motion related attributes and functions"); motion_frame.def(py::init()) .def("get_motion_data", &rs2::motion_frame::get_motion_data, "Retrieve the motion data from IMU sensor.") .def_property_readonly("motion_data", &rs2::motion_frame::get_motion_data, "Motion data from IMU sensor. Identical to calling get_motion_data."); - py::class_ pose_frame(m, "pose_frame", "Extends frame class with additional pose related attributes and functions."); + py::class_ pose_frame(m, "pose_frame", "Extends the frame class with additional pose related attributes and functions."); pose_frame.def(py::init()) .def("get_pose_data", &rs2::pose_frame::get_pose_data, "Retrieve the pose data from T2xx position tracking sensor.") .def_property_readonly("pose_data", &rs2::pose_frame::get_pose_data, "Pose data from T2xx position tracking sensor. Identical to calling get_pose_data."); @@ -454,7 +454,7 @@ PYBIND11_MODULE(NAME, m) { return oss.str(); }); - py::class_ points(m, "points", "Extend frame class with additional point cloud related attributes and functions."); + py::class_ points(m, "points", "Extends the frame class with additional point cloud related attributes and functions."); points.def(py::init<>()) .def(py::init()) .def("get_vertices", [](rs2::points& self, int dims) { @@ -471,7 +471,7 @@ PYBIND11_MODULE(NAME, m) { default: throw std::domain_error("dims arg only supports values of 1, 2 or 3"); } - }, "Retrieve the vertices for the point cloud", py::keep_alive<0, 1>(), "dims"_a=1) + }, "Retrieve the vertices of the point cloud", py::keep_alive<0, 1>(), "dims"_a=1) .def("get_texture_coordinates", [](rs2::points& self, int dims) { auto tex = const_cast(self.get_texture_coordinates()); auto profile = self.get_profile().as(); @@ -487,27 +487,27 @@ PYBIND11_MODULE(NAME, m) { throw std::domain_error("dims arg only supports values of 1, 2 or 3"); } }, "Retrieve the texture coordinates (uv map) for the point cloud", py::keep_alive<0, 1>(), "dims"_a=1) - .def("export_to_ply", &rs2::points::export_to_ply, "Export the point cloud to PLY file") + .def("export_to_ply", &rs2::points::export_to_ply, "Export the point cloud to a PLY file") .def("size", &rs2::points::size); // No docstring in C++ // TODO: Deprecate composite_frame, replace with frameset - py::class_ frameset(m, "composite_frame", "Extend frame class with additional frameset related attributes and functions"); + py::class_ frameset(m, "composite_frame", "Extends the frame class with additional frameset related attributes and functions"); frameset.def(py::init()) - .def("first_or_default", &rs2::frameset::first_or_default, "Retrieve the first frame of specific stream and " - "format types, if no frame found, return the default one. (frame instance)", "s"_a, "f"_a = RS2_FORMAT_ANY) - .def("first", &rs2::frameset::first, "Retrieve the first frame of specific stream type, " - "if no frame found, an error will be thrown.", "s"_a, "f"_a = RS2_FORMAT_ANY) + .def("first_or_default", &rs2::frameset::first_or_default, "Retrieve the first frame of a specific stream and optionally with a specific format. " + "If no frame is found, return an empty frame instance.", "s"_a, "f"_a = RS2_FORMAT_ANY) + .def("first", &rs2::frameset::first, "Retrieve the first frame of a specific stream type and optionally with a specific format. " + "If no frame is found, an error will be thrown.", "s"_a, "f"_a = RS2_FORMAT_ANY) .def("size", &rs2::frameset::size, "Return the size of the frameset") .def("__len__", &rs2::frameset::size, "Return the size of the frameset") .def("foreach", [](const rs2::frameset& self, std::function callable) { self.foreach(callable); }, "Extract internal frame handles from the frameset and invoke the action function", "callable"_a) .def("__getitem__", &rs2::frameset::operator[]) - .def("get_depth_frame", &rs2::frameset::get_depth_frame, "Retrieve the first depth frame, if no frame found, return the default one. (frame instance)") - .def("get_color_frame", &rs2::frameset::get_color_frame, "Retrieve the first color frame, if no frame found, search the " - "color frame from IR stream. If one still can't be found, return the default one. (frame instance)") - .def("get_infrared_frame", &rs2::frameset::get_infrared_frame, "Retrieve the first infrared frame, if no frame " - "found, return the default one (frame instance)", "index"_a = 0) + .def("get_depth_frame", &rs2::frameset::get_depth_frame, "Retrieve the first depth frame, if no frame is found, return an empty frame instance.") + .def("get_color_frame", &rs2::frameset::get_color_frame, "Retrieve the first color frame, if no frame is found, search for the color frame from IR stream. " + "If one still can't be found, return an empty frame instance.") + .def("get_infrared_frame", &rs2::frameset::get_infrared_frame, "Retrieve the first infrared frame, if no frame is " + "found, return an empty frame instance.", "index"_a = 0) .def("get_fisheye_frame", &rs2::frameset::get_fisheye_frame, "Retrieve the fisheye monochrome video frame", "index"_a=0) .def("get_pose_frame", &rs2::frameset::get_pose_frame, "Retrieve the pose frame", "index"_a = 0) .def("get_pose_frame", [](rs2::frameset& self){ return self.get_pose_frame(); }) @@ -526,7 +526,7 @@ PYBIND11_MODULE(NAME, m) { return flist; }); - py::class_ depth_frame(m, "depth_frame", "Extend video_frame class with additional depth related attributes and functions."); + py::class_ depth_frame(m, "depth_frame", "Extends the video_frame class with additional depth related attributes and functions."); depth_frame.def(py::init()) .def("get_distance", &rs2::depth_frame::get_distance, "x"_a, "y"_a, "Provide the depth in metric units at the given pixel"); @@ -608,7 +608,7 @@ PYBIND11_MODULE(NAME, m) { // Not binding syncer_processing_block, not in Python API - py::class_ pointcloud(m, "pointcloud", "Generates 3D point clouds based on depth frame. Can also map textures from color frame."); + py::class_ pointcloud(m, "pointcloud", "Generates 3D point clouds based on a depth frame. Can also map textures from a color frame."); pointcloud.def(py::init<>()) .def(py::init(), "stream"_a, "index"_a = 0) .def("calculate", &rs2::pointcloud::calculate, "Generate the pointcloud and texture mappings of depth map.", "depth"_a) @@ -753,7 +753,7 @@ PYBIND11_MODULE(NAME, m) { .def("stream_name", &rs2::stream_profile::stream_name, "The stream's human-readable name.") .def("is_default", &rs2::stream_profile::is_default, "Checks if the stream profile is marked/assigned as default, " "meaning that the profile will be selected when the user requests stream configuration using wildcards.") - .def("__nonzero__", &rs2::stream_profile::operator bool, "check that the profile is valid") + .def("__nonzero__", &rs2::stream_profile::operator bool, "Checks if the profile is valid") .def("get_extrinsics_to", &rs2::stream_profile::get_extrinsics_to, "Get the extrinsic transformation between two profiles (representing physical sensors)", "to"_a) .def("register_extrinsics_to", &rs2::stream_profile::register_extrinsics_to, "Assign extrinsic transformation parameters " "to a specific profile (sensor). The extrinsic information is generally available as part of the camera calibration, " @@ -975,7 +975,7 @@ PYBIND11_MODULE(NAME, m) { "Streams may belong to more than one sensor of the device."); pipeline_profile.def(py::init<>()) .def("get_streams", &rs2::pipeline_profile::get_streams, "Return the selected streams profiles, which are enabled in this profile.") - .def("get_stream", &rs2::pipeline_profile::get_stream, "Return the stream profile enabled for the specified stream in this profile.", "stream_type"_a, "stream_index"_a = -1) + .def("get_stream", &rs2::pipeline_profile::get_stream, "Return the stream profile that is enabled for the specified stream in this profile.", "stream_type"_a, "stream_index"_a = -1) .def("get_device", &rs2::pipeline_profile::get_device, "Retrieve the device used by the pipeline.\n" "The device class provides the application access to control camera additional settings - get device " "information, sensor options information, options value query and set, sensor specific extensions.\n" @@ -1015,9 +1015,9 @@ PYBIND11_MODULE(NAME, m) { "to enforce the pipeline to use the configured device.", "serial"_a) .def("enable_device_from_file", &rs2::config::enable_device_from_file, "Select a recorded device from a file, to be used by the pipeline through playback.\n" "The device available streams are as recorded to the file, and resolve() considers only this device and configuration as available.\n" - "This request cannot be used if enable_record_to_file() is called for the current config, and vise versa", "file_name"_a, "repeat_playback"_a = true) + "This request cannot be used if enable_record_to_file() is called for the current config, and vice versa.", "file_name"_a, "repeat_playback"_a = true) .def("enable_record_to_file", &rs2::config::enable_record_to_file, "Requires that the resolved device would be recorded to file.\n" - "This request cannot be used if enable_device_from_file() is called for the current config, and vise versa as available.", "file_name"_a) + "This request cannot be used if enable_device_from_file() is called for the current config, and vice versa as available.", "file_name"_a) .def("disable_stream", &rs2::config::disable_stream, "Disable a device stream explicitly, to remove any requests on this stream profile.\n" "The stream can still be enabled due to pipeline computer vision module request. This call removes any filter on the stream configuration.", "stream"_a, "index"_a = -1) .def("disable_all_streams", &rs2::config::disable_all_streams, "Disable all device stream explicitly, to remove any requests on the streams profiles.\n" From 0ce699ed082f64bf1a39ef7c46dd86da167811a6 Mon Sep 17 00:00:00 2001 From: Lior Ramati Date: Sun, 19 May 2019 17:47:49 +0300 Subject: [PATCH 16/16] Add additional changes clarified by #ev-mp --- include/librealsense2/hpp/rs_frame.hpp | 2 +- include/librealsense2/hpp/rs_pipeline.hpp | 34 ++++++++++++++++++++--- wrappers/python/python.cpp | 10 +++---- 3 files changed, 36 insertions(+), 10 deletions(-) diff --git a/include/librealsense2/hpp/rs_frame.hpp b/include/librealsense2/hpp/rs_frame.hpp index b3ce8e5be0..d48aa45e20 100644 --- a/include/librealsense2/hpp/rs_frame.hpp +++ b/include/librealsense2/hpp/rs_frame.hpp @@ -742,7 +742,7 @@ namespace rs2 } /** - * Provide the depth in metric units at the given pixel + * Provide the depth in meters at the given pixel * \param[in] int x - pixel's x coordinate. * \param[in] int y - pixel's y coordinate. * \return float - depth in metric units at given pixel diff --git a/include/librealsense2/hpp/rs_pipeline.hpp b/include/librealsense2/hpp/rs_pipeline.hpp index 23d33e6d2e..d56213b295 100644 --- a/include/librealsense2/hpp/rs_pipeline.hpp +++ b/include/librealsense2/hpp/rs_pipeline.hpp @@ -160,25 +160,51 @@ namespace rs2 error::handle(e); } - // Stream type and possibly also stream index + /** + * Stream type and possibly also stream index. Other parameters are resolved internally. + * + * \param[in] stream_type Stream type to be enabled + * \param[in] stream_index Stream index, used for multiple streams of the same type. -1 indicates any. + */ void enable_stream(rs2_stream stream_type, int stream_index = -1) { enable_stream(stream_type, stream_index, 0, 0, RS2_FORMAT_ANY, 0); } - // Stream type and resolution, and possibly format and frame rate + /** + * Stream type and resolution, and possibly format and frame rate. Other parameters are resolved internally. + * + * \param[in] stream_type Stream type to be enabled + * \param[in] width Stream image width - for images streams. 0 indicates any. + * \param[in] height Stream image height - for images streams. 0 indicates any. + * \param[in] format Stream data format - pixel format for images streams, of data type for other streams. RS2_FORMAT_ANY indicates any. + * \param[in] framerate Stream frames per second. 0 indicates any. + */ void enable_stream(rs2_stream stream_type, int width, int height, rs2_format format = RS2_FORMAT_ANY, int framerate = 0) { enable_stream(stream_type, -1, width, height, format, framerate); } - // Stream type and format, and possibly frame rate + /** + * Stream type and format, and possibly frame rate. Other parameters are resolved internally. + * + * \param[in] stream_type Stream type to be enabled + * \param[in] format Stream data format - pixel format for images streams, of data type for other streams. RS2_FORMAT_ANY indicates any. + * \param[in] framerate Stream frames per second. 0 indicates any. + */ void enable_stream(rs2_stream stream_type, rs2_format format, int framerate = 0) { enable_stream(stream_type, -1, 0, 0, format, framerate); } - // Stream type, index, and format, and possibly framerate + /** + * Stream type, index, and format, and possibly framerate. Other parameters are resolved internally. + * + * \param[in] stream_type Stream type to be enabled + * \param[in] stream_index Stream index, used for multiple streams of the same type. -1 indicates any. + * \param[in] format Stream data format - pixel format for images streams, of data type for other streams. RS2_FORMAT_ANY indicates any. + * \param[in] framerate Stream frames per second. 0 indicates any. + */ void enable_stream(rs2_stream stream_type, int stream_index, rs2_format format, int framerate = 0) { enable_stream(stream_type, stream_index, 0, 0, format, framerate); diff --git a/wrappers/python/python.cpp b/wrappers/python/python.cpp index f483eab7d9..fdf4218ef9 100644 --- a/wrappers/python/python.cpp +++ b/wrappers/python/python.cpp @@ -528,7 +528,7 @@ PYBIND11_MODULE(NAME, m) { py::class_ depth_frame(m, "depth_frame", "Extends the video_frame class with additional depth related attributes and functions."); depth_frame.def(py::init()) - .def("get_distance", &rs2::depth_frame::get_distance, "x"_a, "y"_a, "Provide the depth in metric units at the given pixel"); + .def("get_distance", &rs2::depth_frame::get_distance, "x"_a, "y"_a, "Provide the depth in meters at the given pixel"); /* rs2_processing.hpp */ py::class_ filter_interface(m, "filter_interface", "Interface for frame filtering functionality"); @@ -1001,10 +1001,10 @@ PYBIND11_MODULE(NAME, m) { "Upon calling resolve(), the config checks for conflicts between the application configuration requests and the attached computer vision " "modules and processing blocks requirements, and fails if conflicts are found.\n" "Before resolve() is called, no conflict check is done.", "stream_type"_a, "stream_index"_a, "width"_a, "height"_a, "format"_a = RS2_FORMAT_ANY, "framerate"_a = 0) - .def("enable_stream", (void (rs2::config::*)(rs2_stream, int)) &rs2::config::enable_stream, "Stream type and possibly also stream index", "stream_type"_a, "stream_index"_a = -1) - .def("enable_stream", (void (rs2::config::*)(rs2_stream, rs2_format, int))&rs2::config::enable_stream, "Stream type and format, and possibly frame rate", "stream_type"_a, "format"_a, "framerate"_a = 0) - .def("enable_stream", (void (rs2::config::*)(rs2_stream, int, int, rs2_format, int)) &rs2::config::enable_stream, "Stream type and resolution, and possibly format and frame rate", "stream_type"_a, "width"_a, "height"_a, "format"_a = RS2_FORMAT_ANY, "framerate"_a = 0) - .def("enable_stream", (void (rs2::config::*)(rs2_stream, int, rs2_format, int)) &rs2::config::enable_stream, "Stream type, index, and format, and possibly framerate", "stream_type"_a, "stream_index"_a, "format"_a, "framerate"_a = 0) + .def("enable_stream", (void (rs2::config::*)(rs2_stream, int)) &rs2::config::enable_stream, "Stream type and possibly also stream index. Other parameters are resolved internally.", "stream_type"_a, "stream_index"_a = -1) + .def("enable_stream", (void (rs2::config::*)(rs2_stream, rs2_format, int))&rs2::config::enable_stream, "Stream type and format, and possibly frame rate. Other parameters are resolved internally.", "stream_type"_a, "format"_a, "framerate"_a = 0) + .def("enable_stream", (void (rs2::config::*)(rs2_stream, int, int, rs2_format, int)) &rs2::config::enable_stream, "Stream type and resolution, and possibly format and frame rate. Other parameters are resolved internally.", "stream_type"_a, "width"_a, "height"_a, "format"_a = RS2_FORMAT_ANY, "framerate"_a = 0) + .def("enable_stream", (void (rs2::config::*)(rs2_stream, int, rs2_format, int)) &rs2::config::enable_stream, "Stream type, index, and format, and possibly framerate. Other parameters are resolved internally.", "stream_type"_a, "stream_index"_a, "format"_a, "framerate"_a = 0) .def("enable_all_streams", &rs2::config::enable_all_streams, "Enable all device streams explicitly.\n" "The conditions and behavior of this method are similar to those of enable_stream().\n" "This filter enables all raw streams of the selected device. The device is either selected explicitly by the application, "