diff --git a/wrappers/python/examples/__init__.py b/wrappers/python/examples/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/wrappers/python/examples/align-depth2color.py b/wrappers/python/examples/align-depth2color.py index 8ce5d6df66..d925bff6c6 100644 --- a/wrappers/python/examples/align-depth2color.py +++ b/wrappers/python/examples/align-depth2color.py @@ -12,88 +12,92 @@ # Import OpenCV for easy image rendering import cv2 -# Create a pipeline -pipeline = rs.pipeline() - -# Create a config and configure the pipeline to stream -# different resolutions of color and depth streams -config = rs.config() - -# Get device product line for setting a supporting resolution -pipeline_wrapper = rs.pipeline_wrapper(pipeline) -pipeline_profile = config.resolve(pipeline_wrapper) -device = pipeline_profile.get_device() -device_product_line = str(device.get_info(rs.camera_info.product_line)) - -found_rgb = False -for s in device.sensors: - if s.get_info(rs.camera_info.name) == 'RGB Camera': - found_rgb = True - break -if not found_rgb: - print("The demo requires Depth camera with Color sensor") - exit(0) - -config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) -config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30) - -# Start streaming -profile = pipeline.start(config) - -# Getting the depth sensor's depth scale (see rs-align example for explanation) -depth_sensor = profile.get_device().first_depth_sensor() -depth_scale = depth_sensor.get_depth_scale() -print("Depth Scale is: " , depth_scale) - -# We will be removing the background of objects more than -# clipping_distance_in_meters meters away -clipping_distance_in_meters = 1 #1 meter -clipping_distance = clipping_distance_in_meters / depth_scale - -# Create an align object -# rs.align allows us to perform alignment of depth frames to others frames -# The "align_to" is the stream type to which we plan to align depth frames. -align_to = rs.stream.color -align = rs.align(align_to) - -# Streaming loop -try: - while True: - # Get frameset of color and depth - frames = pipeline.wait_for_frames() - # frames.get_depth_frame() is a 640x360 depth image - - # Align the depth frame to color frame - aligned_frames = align.process(frames) - - # Get aligned frames - aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image - color_frame = aligned_frames.get_color_frame() - - # Validate that both frames are valid - if not aligned_depth_frame or not color_frame: - continue - - depth_image = np.asanyarray(aligned_depth_frame.get_data()) - color_image = np.asanyarray(color_frame.get_data()) - - # Remove background - Set pixels further than clipping_distance to grey - grey_color = 153 - depth_image_3d = np.dstack((depth_image,depth_image,depth_image)) #depth image is 1 channel, color is 3 channels - bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color, color_image) - - # Render images: - # depth align to color on left - # depth on right - depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET) - images = np.hstack((bg_removed, depth_colormap)) - - cv2.namedWindow('Align Example', cv2.WINDOW_NORMAL) - cv2.imshow('Align Example', images) - key = cv2.waitKey(1) - # Press esc or 'q' to close the image window - if key & 0xFF == ord('q') or key == 27: - cv2.destroyAllWindows() +def main(): + # Create a pipeline + pipeline = rs.pipeline() + + # Create a config and configure the pipeline to stream + # different resolutions of color and depth streams + config = rs.config() + + # Get device product line for setting a supporting resolution + pipeline_wrapper = rs.pipeline_wrapper(pipeline) + pipeline_profile = config.resolve(pipeline_wrapper) + device = pipeline_profile.get_device() + device_product_line = str(device.get_info(rs.camera_info.product_line)) + + found_rgb = False + for s in device.sensors: + if s.get_info(rs.camera_info.name) == 'RGB Camera': + found_rgb = True break -finally: - pipeline.stop() + if not found_rgb: + print("The demo requires Depth camera with Color sensor") + exit(0) + + config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) + config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30) + + # Start streaming + profile = pipeline.start(config) + + # Getting the depth sensor's depth scale (see rs-align example for explanation) + depth_sensor = profile.get_device().first_depth_sensor() + depth_scale = depth_sensor.get_depth_scale() + print("Depth Scale is: " , depth_scale) + + # We will be removing the background of objects more than + # clipping_distance_in_meters meters away + clipping_distance_in_meters = 1 #1 meter + clipping_distance = clipping_distance_in_meters / depth_scale + + # Create an align object + # rs.align allows us to perform alignment of depth frames to others frames + # The "align_to" is the stream type to which we plan to align depth frames. + align_to = rs.stream.color + align = rs.align(align_to) + + # Streaming loop + try: + while True: + # Get frameset of color and depth + frames = pipeline.wait_for_frames() + # frames.get_depth_frame() is a 640x360 depth image + + # Align the depth frame to color frame + aligned_frames = align.process(frames) + + # Get aligned frames + aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image + color_frame = aligned_frames.get_color_frame() + + # Validate that both frames are valid + if not aligned_depth_frame or not color_frame: + continue + + depth_image = np.asanyarray(aligned_depth_frame.get_data()) + color_image = np.asanyarray(color_frame.get_data()) + + # Remove background - Set pixels further than clipping_distance to grey + grey_color = 153 + depth_image_3d = np.dstack((depth_image,depth_image,depth_image)) #depth image is 1 channel, color is 3 channels + bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color, color_image) + + # Render images: + # depth align to color on left + # depth on right + depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET) + images = np.hstack((bg_removed, depth_colormap)) + + cv2.namedWindow('Align Example', cv2.WINDOW_NORMAL) + cv2.imshow('Align Example', images) + key = cv2.waitKey(1) + # Press esc or 'q' to close the image window + if key & 0xFF == ord('q') or key == 27: + cv2.destroyAllWindows() + break + finally: + pipeline.stop() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/wrappers/python/examples/export_ply_example.py b/wrappers/python/examples/export_ply_example.py index 65448bbe7a..77e6fd44f6 100644 --- a/wrappers/python/examples/export_ply_example.py +++ b/wrappers/python/examples/export_ply_example.py @@ -8,41 +8,44 @@ # First import the library import pyrealsense2 as rs - -# Declare pointcloud object, for calculating pointclouds and texture mappings -pc = rs.pointcloud() -# We want the points object to be persistent so we can display the last cloud when a frame drops -points = rs.points() - -# Declare RealSense pipeline, encapsulating the actual device and sensors -pipe = rs.pipeline() -config = rs.config() -# Enable depth stream -config.enable_stream(rs.stream.depth) - -# Start streaming with chosen configuration -pipe.start(config) - -# We'll use the colorizer to generate texture for our PLY -# (alternatively, texture can be obtained from color or infrared stream) -colorizer = rs.colorizer() - -try: - # Wait for the next set of frames from the camera - frames = pipe.wait_for_frames() - colorized = colorizer.process(frames) - - # Create save_to_ply object - ply = rs.save_to_ply("1.ply") - - # Set options to the desired values - # In this example we'll generate a textual PLY with normals (mesh is already created by default) - ply.set_option(rs.save_to_ply.option_ply_binary, False) - ply.set_option(rs.save_to_ply.option_ply_normals, True) - - print("Saving to 1.ply...") - # Apply the processing block to the frameset which contains the depth frame and the texture - ply.process(colorized) - print("Done") -finally: - pipe.stop() +def main(): + # Declare pointcloud object, for calculating pointclouds and texture mappings + pc = rs.pointcloud() + # We want the points object to be persistent so we can display the last cloud when a frame drops + points = rs.points() + + # Declare RealSense pipeline, encapsulating the actual device and sensors + pipe = rs.pipeline() + config = rs.config() + # Enable depth stream + config.enable_stream(rs.stream.depth) + + # Start streaming with chosen configuration + pipe.start(config) + + # We'll use the colorizer to generate texture for our PLY + # (alternatively, texture can be obtained from color or infrared stream) + colorizer = rs.colorizer() + + try: + # Wait for the next set of frames from the camera + frames = pipe.wait_for_frames() + colorized = colorizer.process(frames) + + # Create save_to_ply object + ply = rs.save_to_ply("1.ply") + + # Set options to the desired values + # In this example we'll generate a textual PLY with normals (mesh is already created by default) + ply.set_option(rs.save_to_ply.option_ply_binary, False) + ply.set_option(rs.save_to_ply.option_ply_normals, True) + + print("Saving to 1.ply...") + # Apply the processing block to the frameset which contains the depth frame and the texture + ply.process(colorized) + print("Done") + finally: + pipe.stop() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/wrappers/python/examples/opencv_viewer_example.py b/wrappers/python/examples/opencv_viewer_example.py index f68b832c70..261182491a 100644 --- a/wrappers/python/examples/opencv_viewer_example.py +++ b/wrappers/python/examples/opencv_viewer_example.py @@ -9,64 +9,68 @@ import numpy as np import cv2 -# Configure depth and color streams -pipeline = rs.pipeline() -config = rs.config() - -# Get device product line for setting a supporting resolution -pipeline_wrapper = rs.pipeline_wrapper(pipeline) -pipeline_profile = config.resolve(pipeline_wrapper) -device = pipeline_profile.get_device() -device_product_line = str(device.get_info(rs.camera_info.product_line)) - -found_rgb = False -for s in device.sensors: - if s.get_info(rs.camera_info.name) == 'RGB Camera': - found_rgb = True - break -if not found_rgb: - print("The demo requires Depth camera with Color sensor") - exit(0) - -config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) -config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30) - -# Start streaming -pipeline.start(config) - -try: - while True: - - # Wait for a coherent pair of frames: depth and color - frames = pipeline.wait_for_frames() - depth_frame = frames.get_depth_frame() - color_frame = frames.get_color_frame() - if not depth_frame or not color_frame: - continue - - # Convert images to numpy arrays - depth_image = np.asanyarray(depth_frame.get_data()) - color_image = np.asanyarray(color_frame.get_data()) - - # Apply colormap on depth image (image must be converted to 8-bit per pixel first) - depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET) - - depth_colormap_dim = depth_colormap.shape - color_colormap_dim = color_image.shape - - # If depth and color resolutions are different, resize color image to match depth image for display - if depth_colormap_dim != color_colormap_dim: - resized_color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]), interpolation=cv2.INTER_AREA) - images = np.hstack((resized_color_image, depth_colormap)) - else: - images = np.hstack((color_image, depth_colormap)) - - # Show images - cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE) - cv2.imshow('RealSense', images) - cv2.waitKey(1) - -finally: - - # Stop streaming - pipeline.stop() +def main(): + # Configure depth and color streams + pipeline = rs.pipeline() + config = rs.config() + + # Get device product line for setting a supporting resolution + pipeline_wrapper = rs.pipeline_wrapper(pipeline) + pipeline_profile = config.resolve(pipeline_wrapper) + device = pipeline_profile.get_device() + device_product_line = str(device.get_info(rs.camera_info.product_line)) + + found_rgb = False + for s in device.sensors: + if s.get_info(rs.camera_info.name) == 'RGB Camera': + found_rgb = True + break + if not found_rgb: + print("The demo requires Depth camera with Color sensor") + exit(0) + + config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) + config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30) + + # Start streaming + pipeline.start(config) + + try: + while True: + + # Wait for a coherent pair of frames: depth and color + frames = pipeline.wait_for_frames() + depth_frame = frames.get_depth_frame() + color_frame = frames.get_color_frame() + if not depth_frame or not color_frame: + continue + + # Convert images to numpy arrays + depth_image = np.asanyarray(depth_frame.get_data()) + color_image = np.asanyarray(color_frame.get_data()) + + # Apply colormap on depth image (image must be converted to 8-bit per pixel first) + depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET) + + depth_colormap_dim = depth_colormap.shape + color_colormap_dim = color_image.shape + + # If depth and color resolutions are different, resize color image to match depth image for display + if depth_colormap_dim != color_colormap_dim: + resized_color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]), interpolation=cv2.INTER_AREA) + images = np.hstack((resized_color_image, depth_colormap)) + else: + images = np.hstack((color_image, depth_colormap)) + + # Show images + cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE) + cv2.imshow('RealSense', images) + cv2.waitKey(1) + + finally: + + # Stop streaming + pipeline.stop() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/wrappers/python/examples/python-rs400-advanced-mode-example.py b/wrappers/python/examples/python-rs400-advanced-mode-example.py index bee72ef461..c29bcfbd6c 100644 --- a/wrappers/python/examples/python-rs400-advanced-mode-example.py +++ b/wrappers/python/examples/python-rs400-advanced-mode-example.py @@ -22,66 +22,69 @@ def find_device_that_supports_advanced_mode() : print("Found device that supports advanced mode:", dev.get_info(rs.camera_info.name)) return dev raise Exception("No D400 product line device that supports advanced mode was found") - -try: - dev = find_device_that_supports_advanced_mode() - advnc_mode = rs.rs400_advanced_mode(dev) - print("Advanced mode is", "enabled" if advnc_mode.is_enabled() else "disabled") - - # Loop until we successfully enable advanced mode - while not advnc_mode.is_enabled(): - print("Trying to enable advanced mode...") - advnc_mode.toggle_advanced_mode(True) - # At this point the device will disconnect and re-connect. - print("Sleeping for 5 seconds...") - time.sleep(5) - # The 'dev' object will become invalid and we need to initialize it again +def main(): + try: dev = find_device_that_supports_advanced_mode() advnc_mode = rs.rs400_advanced_mode(dev) print("Advanced mode is", "enabled" if advnc_mode.is_enabled() else "disabled") - # Get each control's current value - print("Depth Control: \n", advnc_mode.get_depth_control()) - print("RSM: \n", advnc_mode.get_rsm()) - print("RAU Support Vector Control: \n", advnc_mode.get_rau_support_vector_control()) - print("Color Control: \n", advnc_mode.get_color_control()) - print("RAU Thresholds Control: \n", advnc_mode.get_rau_thresholds_control()) - print("SLO Color Thresholds Control: \n", advnc_mode.get_slo_color_thresholds_control()) - print("SLO Penalty Control: \n", advnc_mode.get_slo_penalty_control()) - print("HDAD: \n", advnc_mode.get_hdad()) - print("Color Correction: \n", advnc_mode.get_color_correction()) - print("Depth Table: \n", advnc_mode.get_depth_table()) - print("Auto Exposure Control: \n", advnc_mode.get_ae_control()) - print("Census: \n", advnc_mode.get_census()) + # Loop until we successfully enable advanced mode + while not advnc_mode.is_enabled(): + print("Trying to enable advanced mode...") + advnc_mode.toggle_advanced_mode(True) + # At this point the device will disconnect and re-connect. + print("Sleeping for 5 seconds...") + time.sleep(5) + # The 'dev' object will become invalid and we need to initialize it again + dev = find_device_that_supports_advanced_mode() + advnc_mode = rs.rs400_advanced_mode(dev) + print("Advanced mode is", "enabled" if advnc_mode.is_enabled() else "disabled") + + # Get each control's current value + print("Depth Control: \n", advnc_mode.get_depth_control()) + print("RSM: \n", advnc_mode.get_rsm()) + print("RAU Support Vector Control: \n", advnc_mode.get_rau_support_vector_control()) + print("Color Control: \n", advnc_mode.get_color_control()) + print("RAU Thresholds Control: \n", advnc_mode.get_rau_thresholds_control()) + print("SLO Color Thresholds Control: \n", advnc_mode.get_slo_color_thresholds_control()) + print("SLO Penalty Control: \n", advnc_mode.get_slo_penalty_control()) + print("HDAD: \n", advnc_mode.get_hdad()) + print("Color Correction: \n", advnc_mode.get_color_correction()) + print("Depth Table: \n", advnc_mode.get_depth_table()) + print("Auto Exposure Control: \n", advnc_mode.get_ae_control()) + print("Census: \n", advnc_mode.get_census()) + + #To get the minimum and maximum value of each control use the mode value: + query_min_values_mode = 1 + query_max_values_mode = 2 + current_std_depth_control_group = advnc_mode.get_depth_control() + min_std_depth_control_group = advnc_mode.get_depth_control(query_min_values_mode) + max_std_depth_control_group = advnc_mode.get_depth_control(query_max_values_mode) + print("Depth Control Min Values: \n ", min_std_depth_control_group) + print("Depth Control Max Values: \n ", max_std_depth_control_group) - #To get the minimum and maximum value of each control use the mode value: - query_min_values_mode = 1 - query_max_values_mode = 2 - current_std_depth_control_group = advnc_mode.get_depth_control() - min_std_depth_control_group = advnc_mode.get_depth_control(query_min_values_mode) - max_std_depth_control_group = advnc_mode.get_depth_control(query_max_values_mode) - print("Depth Control Min Values: \n ", min_std_depth_control_group) - print("Depth Control Max Values: \n ", max_std_depth_control_group) + # Set some control with a new (median) value + current_std_depth_control_group.scoreThreshA = int((max_std_depth_control_group.scoreThreshA - min_std_depth_control_group.scoreThreshA) / 2) + advnc_mode.set_depth_control(current_std_depth_control_group) + print("After Setting new value, Depth Control: \n", advnc_mode.get_depth_control()) - # Set some control with a new (median) value - current_std_depth_control_group.scoreThreshA = int((max_std_depth_control_group.scoreThreshA - min_std_depth_control_group.scoreThreshA) / 2) - advnc_mode.set_depth_control(current_std_depth_control_group) - print("After Setting new value, Depth Control: \n", advnc_mode.get_depth_control()) + # Serialize all controls to a Json string + serialized_string = advnc_mode.serialize_json() + print("Controls as JSON: \n", serialized_string) + as_json_object = json.loads(serialized_string) - # Serialize all controls to a Json string - serialized_string = advnc_mode.serialize_json() - print("Controls as JSON: \n", serialized_string) - as_json_object = json.loads(serialized_string) + # We can also load controls from a json string + # For Python 2, the values in 'as_json_object' dict need to be converted from unicode object to utf-8 + if type(next(iter(as_json_object))) != str: + as_json_object = {k.encode('utf-8'): v.encode("utf-8") for k, v in as_json_object.items()} + # The C++ JSON parser requires double-quotes for the json object so we need + # to replace the single quote of the pythonic json to double-quotes + json_string = str(as_json_object).replace("'", '\"') + advnc_mode.load_json(json_string) - # We can also load controls from a json string - # For Python 2, the values in 'as_json_object' dict need to be converted from unicode object to utf-8 - if type(next(iter(as_json_object))) != str: - as_json_object = {k.encode('utf-8'): v.encode("utf-8") for k, v in as_json_object.items()} - # The C++ JSON parser requires double-quotes for the json object so we need - # to replace the single quote of the pythonic json to double-quotes - json_string = str(as_json_object).replace("'", '\"') - advnc_mode.load_json(json_string) + except Exception as e: + print(e) + pass -except Exception as e: - print(e) - pass +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/wrappers/python/examples/python-tutorial-1-depth.py b/wrappers/python/examples/python-tutorial-1-depth.py index 8ca438b66e..61f7869236 100644 --- a/wrappers/python/examples/python-tutorial-1-depth.py +++ b/wrappers/python/examples/python-tutorial-1-depth.py @@ -8,44 +8,49 @@ # First import the library import pyrealsense2 as rs -try: - # Create a context object. This object owns the handles to all connected realsense devices - pipeline = rs.pipeline() - - # Configure streams - config = rs.config() - config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) - - # Start streaming - pipeline.start(config) - - while True: - # This call waits until a new coherent set of frames is available on a device - # Calls to get_frame_data(...) and get_frame_timestamp(...) on a device will return stable values until wait_for_frames(...) is called - frames = pipeline.wait_for_frames() - depth = frames.get_depth_frame() - if not depth: continue - - # Print a simple text-based representation of the image, by breaking it into 10x20 pixel regions and approximating the coverage of pixels within one meter - coverage = [0]*64 - for y in range(480): - for x in range(640): - dist = depth.get_distance(x, y) - if 0 < dist and dist < 1: - coverage[x//10] += 1 - - if y%20 is 19: - line = "" - for c in coverage: - line += " .:nhBXWW"[c//25] - coverage = [0]*64 - print(line) - exit(0) -#except rs.error as e: -# # Method calls agaisnt librealsense objects may throw exceptions of type pylibrs.error -# print("pylibrs.error was thrown when calling %s(%s):\n", % (e.get_failed_function(), e.get_failed_args())) -# print(" %s\n", e.what()) -# exit(1) -except Exception as e: - print(e) - pass +def main(): + try: + # Create a context object. This object owns the handles to all connected realsense devices + pipeline = rs.pipeline() + + # Configure streams + config = rs.config() + config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) + + # Start streaming + pipeline.start(config) + + while True: + # This call waits until a new coherent set of frames is available on a device + # Calls to get_frame_data(...) and get_frame_timestamp(...) on a device will return stable values until wait_for_frames(...) is called + frames = pipeline.wait_for_frames() + depth = frames.get_depth_frame() + if not depth: continue + + # Print a simple text-based representation of the image, by breaking it into 10x20 pixel regions and approximating the coverage of pixels within one meter + coverage = [0]*64 + for y in range(480): + for x in range(640): + dist = depth.get_distance(x, y) + if 0 < dist and dist < 1: + coverage[x//10] += 1 + + if y%20 is 19: + line = "" + for c in coverage: + line += " .:nhBXWW"[c//25] + coverage = [0]*64 + print(line) + exit(0) + #except rs.error as e: + # # Method calls agaisnt librealsense objects may throw exceptions of type pylibrs.error + # print("pylibrs.error was thrown when calling %s(%s):\n", % (e.get_failed_function(), e.get_failed_args())) + # print(" %s\n", e.what()) + # exit(1) + except Exception as e: + print(e) + pass + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/wrappers/python/hatch_build.py b/wrappers/python/hatch_build.py new file mode 100644 index 0000000000..dac3b8ab79 --- /dev/null +++ b/wrappers/python/hatch_build.py @@ -0,0 +1,7 @@ +from hatchling.builders.hooks.plugin.interface import BuildHookInterface + +# With infer_tag = True, Hatchling will automatically infer the tag +# based on the current environment (the platform, Python version, etc.) +class CustomBuildHook(BuildHookInterface): + def initialize(self, version, build_data): + build_data['infer_tag'] = True \ No newline at end of file diff --git a/wrappers/python/pyproject.toml b/wrappers/python/pyproject.toml new file mode 100644 index 0000000000..e9506611fa --- /dev/null +++ b/wrappers/python/pyproject.toml @@ -0,0 +1,61 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "pyrealsense2" +dynamic = ["version"] +description = "Python Wrapper for Intel Realsense SDK 2.0." +readme = {file = "README.rst", content-type = "text/x-rst", charset = "utf-8"} +authors = [ + { name = "Intel(R) RealSense(TM)", email = "realsense@intel.com" }, +] +license = {text = "Apache License, Version 2.0"} +requires-python = ">=3.9" +classifiers = [ + "Development Status :: 3 - Alpha", + "Environment :: Console", + "Intended Audience :: Developers", + "Intended Audience :: Education", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX", + "Operating System :: Unix", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Topic :: Multimedia :: Video", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Human Machine Interfaces", + "Topic :: Scientific/Engineering :: Image Recognition", + "Topic :: Software Development", + "Topic :: Software Development :: Libraries :: Application Frameworks" +] + +[tool.hatch.version] +path = "pyrealsense2/_version.py" + +[project.urls] +homepage = "https://github.com/IntelRealSense/librealsense" + +[project.scripts] +align-depth2color = "examples.align_depth2color:main" +export_ply_example = "examples.export_ply:main" +opencv_viewer_example = "examples.opencv_viewer:main" +python-rs400-advanced-mode-example = "examples.rs400_advanced_mode:main" +python-tutorial-1-depth = "examples.tutorial_1_depth:main" + +[tool.hatch.build] +artifacts = [ + "pyrealsense2/*.so", + "pyrealsense2/*.pyd", + "pyrealsense2/*.dll" +] +packages = ["pyrealsense2"] + +[tool.hatch.build.targets.wheel.hooks.custom] +path = "hatch_build.py" \ No newline at end of file diff --git a/wrappers/python/setup.py b/wrappers/python/setup.py deleted file mode 100644 index 187d2acdc6..0000000000 --- a/wrappers/python/setup.py +++ /dev/null @@ -1,75 +0,0 @@ -from setuptools import setup, find_packages -from setuptools.dist import Distribution - -# _version.py should be generated by running find_librs_version.py and copied to pyrealsense2 folder -from pyrealsense2._version import __version__ - -import os -import io - -package_name = "pyrealsense2" -package_data = {} - -print("version = ", __version__) - -def load_readme(): - with io.open('README.rst', encoding="utf-8") as f: - return f.read() - -if os.name == 'posix': - package_data[package_name] = ['*.so'] -else: - package_data[package_name] = ['*.pyd', '*.dll'] - - -# This creates a list which is empty but returns a length of 1. -# Should make the wheel a binary distribution and platlib compliant. -class EmptyListWithLength(list): - def __len__(self): - return 1 - - -setup( - name=package_name, - version=__version__, - author='Intel(R) RealSense(TM)', - author_email='realsense@intel.com', - url='https://github.com/IntelRealSense/librealsense', - scripts=['examples/align-depth2color.py', - 'examples/export_ply_example.py', - 'examples/opencv_viewer_example.py', - 'examples/python-rs400-advanced-mode-example.py', - 'examples/python-tutorial-1-depth.py' - ], - license='Apache License, Version 2.0', - description='Python Wrapper for Intel Realsense SDK 2.0.', - long_description=load_readme(), - install_requires=[], - classifiers=[ - 'Development Status :: 3 - Alpha', - 'Environment :: Console', - 'Intended Audience :: Developers', - 'Intended Audience :: Education', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: Apache Software License', - 'Operating System :: Microsoft :: Windows', - 'Operating System :: POSIX', - 'Operating System :: Unix', - 'Programming Language :: Python', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.9', - 'Programming Language :: Python :: 3.10', - 'Programming Language :: Python :: 3.11', - 'Programming Language :: Python :: 3.12', - 'Topic :: Multimedia :: Video', - 'Topic :: Scientific/Engineering', - 'Topic :: Scientific/Engineering :: Human Machine Interfaces', - 'Topic :: Scientific/Engineering :: Image Recognition', - 'Topic :: Software Development', - 'Topic :: Software Development :: Libraries :: Application Frameworks' - ], - packages=find_packages(exclude=['third_party', 'docs', 'examples']), - include_package_data=True, - ext_modules=EmptyListWithLength(), - package_data=package_data -)