std::cout << "Setting up visualization" << std::endl;
Zivid::Visualization::Visualizer visualizer;
std::cout << "Visualizing point cloud" << std::endl;
visualizer.showMaximized();
visualizer.show(frame);
visualizer.resetToFit();
std::cout << "Running visualizer. Blocking until window closes." << std::endl;
visualizer.run();
Console.WriteLine("Setting up visualization");
using (var visualizer = new Zivid.NET.Visualization.Visualizer())
Console.WriteLine("Visualizing point cloud");
visualizer.Show(frame);
visualizer.ShowMaximized();
visualizer.ResetToFit();
Console.WriteLine("Running visualizer. Blocking until window closes.");
visualizer.Run();
std::cout << "Getting point cloud from frame" << std::endl;
auto pointCloud = frame.pointCloud();
std::cout << "Setting up visualization" << std::endl;
Zivid::Visualization::Visualizer visualizer;
std::cout << "Visualizing point cloud" << std::endl;
visualizer.showMaximized();
visualizer.show(pointCloud);
visualizer.resetToFit();
std::cout << "Running visualizer. Blocking until window closes." << std::endl;
visualizer.run();
Console.WriteLine("Getting point cloud from frame");
var pointCloud = frame.PointCloud;
Console.WriteLine("Setting up visualization");
var visualizer = new Zivid.NET.Visualization.Visualizer();
Console.WriteLine("Visualizing point cloud");
visualizer.Show(pointCloud);
visualizer.ShowMaximized();
visualizer.ResetToFit();
Console.WriteLine("Running visualizer. Blocking until window closes.");
visualizer.Run();
point_cloud = frame.point_cloud()
xyz = point_cloud.copy_data("xyz")
rgba = point_cloud.copy_data("rgba")
print("Visualizing point cloud")
display_pointcloud(xyz, rgba[:, :, 0:3])
def display_pointcloud(xyz: np.ndarray, rgb: np.ndarray, normals: Optional[np.ndarray] = None) -> None:
"""Display point cloud provided from 'xyz' with colors from 'rgb'.
Args:
rgb: RGB image
xyz: A numpy array of X, Y and Z point cloud coordinates
normals: Ordered array of normal vectors, mapped to xyz
xyz = np.nan_to_num(xyz).reshape(-1, 3)
if normals is not None:
normals = np.nan_to_num(normals).reshape(-1, 3)
rgb = rgb.reshape(-1, 3)
point_cloud_open3d = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(xyz))
point_cloud_open3d.colors = o3d.utility.Vector3dVector(rgb / 255)
if normals is not None:
point_cloud_open3d.normals = o3d.utility.Vector3dVector(normals)
print("Open 3D controls:")
print(" n: for normals")
print(" 9: for point cloud colored by normals")
print(" h: for all controls")
visualizer = o3d.visualization.Visualizer() # pylint: disable=no-member
visualizer.create_window()
visualizer.add_geometry(point_cloud_open3d)
if normals is None:
visualizer.get_render_option().background_color = (0, 0, 0)
visualizer.get_render_option().point_size = 1
visualizer.get_render_option().show_coordinate_frame = True
visualizer.get_view_control().set_front([0, 0, -1])
visualizer.get_view_control().set_up([0, -1, 0])
visualizer.run()
visualizer.destroy_window()
Since Zivid SDK and Zivid-Python do not support 2D color image visualization, we have implemented it using third party libraries: OpenCV in C++ and Python, and Matplotlib in Python.
First, we convert the point cloud to OpenCV color image.
source
std::cout << "Converting point cloud to BGRA image in OpenCV format" << std::endl;
cv::Mat bgra = pointCloudToCvBGRA(pointCloud);
auto bgra = cv::Mat(pointCloud.height(), pointCloud.width(), CV_8UC4);
pointCloud.copyData(&(*bgra.begin<Zivid::ColorBGRA>()));
return bgra;
It is also possible to get the OpenCV color image directly from the Zivid 2D color image
We can now visualize the color image.
source
cv::namedWindow("BGR image", cv::WINDOW_AUTOSIZE);
cv::imshow("BGR image", bgra);
cv::waitKey(0);
def _point_cloud_to_cv_bgr(point_cloud: zivid.PointCloud) -> np.ndarray:
"""Get bgr image from frame.
Args:
point_cloud: Zivid point cloud
Returns:
bgr: BGR image (HxWx3 ndarray)
bgra = point_cloud.copy_data("bgra")
return bgra[:, :, :3]
It is also possible to get the OpenCV color image directly from the Zivid 2D color image
We can now visualize the color image.
Python
source
_visualize_and_save_image(bgr, bgr_image_file, "BGR image")
def _visualize_and_save_image(image: np.ndarray, image_file: str, title: str) -> None:
"""Visualize and save image to file.
Args:
image: BGR image (HxWx3 ndarray)
image_file: File name
title: OpenCV Window name
display_bgr(image, title)
cv2.imwrite(image_file, image)
def display_rgb(rgb: np.ndarray, title: str = "RGB image", block: bool = True) -> None:
"""Display RGB image.
Args:
rgb: RGB image (HxWx3 ndarray)
title: Image title
block: Stops the running program until the windows is closed
plt.figure()
plt.imshow(rgb)
plt.title(title)
plt.show(block=block)
Since Zivid SDK and Zivid-Python do not support depth map visualization, we have implemented it using third party libraries: OpenCV in C++ and Python, and Matplotlib in Python.
First, we convert the point cloud to OpenCV depth map.
source
std::cout << "Converting to Depth map in OpenCV format" << std::endl;
cv::Mat zColorMap = pointCloudToCvZ(pointCloud);
cv::Mat pointCloudToCvZ(const Zivid::PointCloud &pointCloud)
cv::Mat z(pointCloud.height(), pointCloud.width(), CV_8UC1, cv::Scalar(0)); // NOLINT(hicpp-signed-bitwise)
const auto points = pointCloud.copyPointsZ();
// Getting min and max values for X, Y, Z images
const auto *maxZ = std::max_element(points.data(), points.data() + pointCloud.size(), isLesserOrNan);
const auto *minZ = std::max_element(points.data(), points.data() + pointCloud.size(), isGreaterOrNaN);
// Filling in OpenCV matrix with the cloud data
for(size_t i = 0; i < pointCloud.height(); i++)
for(size_t j = 0; j < pointCloud.width(); j++)
if(std::isnan(points(i, j).z))
z.at<uchar>(i, j) = 0;
z.at<uchar>(i, j) =
static_cast<unsigned char>((255.0F * (points(i, j).z - minZ->z) / (maxZ->z - minZ->z)));
// Applying color map
cv::Mat zColorMap;
cv::applyColorMap(z, zColorMap, cv::COLORMAP_VIRIDIS);
// Setting invalid points (nan) to black
for(size_t i = 0; i < pointCloud.height(); i++)
for(size_t j = 0; j < pointCloud.width(); j++)
if(std::isnan(points(i, j).z))
auto &zRGB = zColorMap.at<cv::Vec3b>(i, j);
zRGB[0] = 0;
zRGB[1] = 0;
zRGB[2] = 0;
return zColorMap;
cv::namedWindow("Depth map", cv::WINDOW_AUTOSIZE);
cv::imshow("Depth map", zColorMap);
cv::waitKey(0);
def _point_cloud_to_cv_z(point_cloud: zivid.PointCloud) -> np.ndarray:
"""Get depth map from frame.
Args:
point_cloud: Zivid point cloud
Returns:
depth_map_color_map: Depth map (HxWx1 ndarray)
depth_map = point_cloud.copy_data("z")
depth_map_uint8 = ((depth_map - np.nanmin(depth_map)) / (np.nanmax(depth_map) - np.nanmin(depth_map)) * 255).astype(
np.uint8
depth_map_color_map = cv2.applyColorMap(depth_map_uint8, cv2.COLORMAP_VIRIDIS)
# Setting nans to black
depth_map_color_map[np.isnan(depth_map)[:, :]] = 0
return depth_map_color_map
def _visualize_and_save_image(image: np.ndarray, image_file: str, title: str) -> None:
"""Visualize and save image to file.
Args:
image: BGR image (HxWx3 ndarray)
image_file: File name
title: OpenCV Window name
display_bgr(image, title)
cv2.imwrite(image_file, image)
def display_depthmap(xyz: np.ndarray, block: bool = True) -> None:
"""Create and display depthmap.
Args:
xyz: A numpy array of X, Y and Z point cloud coordinates
block: Stops the running program until the windows is closed
plt.figure()
plt.imshow(
xyz[:, :, 2],
vmin=np.nanmin(xyz[:, :, 2]),
vmax=np.nanmax(xyz[:, :, 2]),
cmap="viridis",
plt.colorbar()
plt.title("Depth map")
plt.show(block=block)
Since Zivid SDK does not support visualization of normals, we have implemented it using third party libraries, PCL in C++, and Open3D in Python.
We can visualize normals as follows.
source
std::cout << "Visualizing normals" << std::endl;
visualizePointCloudAndNormalsPCL(pointCloudPCL.makeShared(), pointCloudWithNormalsPCL.makeShared());
void visualizePointCloudAndNormalsPCL(
const pcl::PointCloud<pcl::PointXYZRGB>::ConstPtr &pointCloud,
const pcl::PointCloud<pcl::PointXYZRGBNormal>::ConstPtr &pointCloudWithNormals)
auto viewer = pcl::visualization::PCLVisualizer("Viewer");
int viewRgb(0);
viewer.createViewPort(0.0, 0.0, 0.5, 1.0, viewRgb);
viewer.addText("Cloud RGB", 0, 0, "RGBText", viewRgb);
viewer.addPointCloud<pcl::PointXYZRGB>(pointCloud, "cloud", viewRgb);
const int normalsSkipped = 10;
std::cout << "Note! 1 out of " << normalsSkipped << " normals are visualized" << std::endl;
int viewNormals(0);
viewer.createViewPort(0.5, 0.0, 1.0, 1.0, viewNormals);
viewer.addText("Cloud Normals", 0, 0, "NormalsText", viewNormals);
viewer.addPointCloud<pcl::PointXYZRGBNormal>(pointCloudWithNormals, "cloudNormals", viewNormals);
viewer.addPointCloudNormals<pcl::PointXYZRGBNormal>(
pointCloudWithNormals, normalsSkipped, 1, "normals", viewNormals);
viewer.setCameraPosition(0, 0, -100, 0, -1, 0);
std::cout << "Press r to centre and zoom the viewer so that the entire cloud is visible" << std::endl;
std::cout << "Press q to exit the viewer application" << std::endl;
while(!viewer.wasStopped())
viewer.spinOnce(100);
std::this_thread::sleep_for(std::chrono::milliseconds(100));
print("Visualizing normals in 2D")
display_rgb(rgb=rgba[:, :, :3], title="RGB image", block=False)
display_rgb(rgb=normals_colormap, title="Colormapped normals", block=True)
print("Visualizing normals in 3D")
display_pointcloud_with_downsampled_normals(point_cloud, zivid.PointCloud.Downsampling.by4x4)
def display_pointcloud_with_downsampled_normals(
point_cloud: PointCloud,
downsampling: PointCloud.Downsampling,
) -> None:
"""Display point cloud with downsampled normals.
Args:
point_cloud: A Zivid point cloud handle
downsampling: A valid Zivid downsampling factor to apply to normals
rgb = point_cloud.copy_data("rgba")[:, :, :3]
xyz = point_cloud.copy_data("xyz")
point_cloud.downsample(downsampling)
normals = point_cloud.copy_data("normals")
display_pointcloud(xyz=xyz, rgb=rgb, normals=normals)
def display_pointcloud(xyz: np.ndarray, rgb: np.ndarray, normals: Optional[np.ndarray] = None) -> None:
"""Display point cloud provided from 'xyz' with colors from 'rgb'.
Args:
rgb: RGB image
xyz: A numpy array of X, Y and Z point cloud coordinates
normals: Ordered array of normal vectors, mapped to xyz
xyz = np.nan_to_num(xyz).reshape(-1, 3)
if normals is not None:
normals = np.nan_to_num(normals).reshape(-1, 3)
rgb = rgb.reshape(-1, 3)
point_cloud_open3d = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(xyz))
point_cloud_open3d.colors = o3d.utility.Vector3dVector(rgb / 255)
if normals is not None:
point_cloud_open3d.normals = o3d.utility.Vector3dVector(normals)
print("Open 3D controls:")
print(" n: for normals")
print(" 9: for point cloud colored by normals")
print(" h: for all controls")
visualizer = o3d.visualization.Visualizer() # pylint: disable=no-member
visualizer.create_window()
visualizer.add_geometry(point_cloud_open3d)
if normals is None:
visualizer.get_render_option().background_color = (0, 0, 0)
visualizer.get_render_option().point_size = 1
visualizer.get_render_option().show_coordinate_frame = True
visualizer.get_view_control().set_front([0, 0, -1])
visualizer.get_view_control().set_up([0, -1, 0])
visualizer.run()
visualizer.destroy_window()
This tutorial shows how to use Zivid SDK to visualize point clouds in C++ and C# and third party libraries to visualize it in Python.
Using third party libraries is demonstrated to visualize point clouds in Python, and to visualize color images, depth maps, and normals in C++, C#, and Python.