-
Notifications
You must be signed in to change notification settings - Fork 193
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #1011 from luxonis/tof_docs
Updated ToF docs
- Loading branch information
Showing
7 changed files
with
262 additions
and
60 deletions.
There are no files selected for viewing
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,52 +1,119 @@ | ||
#!/usr/bin/env python3 | ||
|
||
import time | ||
import cv2 | ||
import depthai as dai | ||
import numpy as np | ||
|
||
pipeline = dai.Pipeline() | ||
|
||
cam_a = pipeline.create(dai.node.Camera) | ||
# We assume the ToF camera sensor is on port CAM_A | ||
cam_a.setBoardSocket(dai.CameraBoardSocket.CAM_A) | ||
|
||
tof = pipeline.create(dai.node.ToF) | ||
|
||
# Configure the ToF node | ||
tofConfig = tof.initialConfig.get() | ||
# tofConfig.depthParams.freqModUsed = dai.RawToFConfig.DepthParams.TypeFMod.MIN | ||
tofConfig.depthParams.freqModUsed = dai.RawToFConfig.DepthParams.TypeFMod.MAX | ||
tofConfig.depthParams.avgPhaseShuffle = False | ||
tofConfig.depthParams.minimumAmplitude = 3.0 | ||
tof.initialConfig.set(tofConfig) | ||
# Link the ToF sensor to the ToF node | ||
cam_a.raw.link(tof.input) | ||
|
||
xout = pipeline.create(dai.node.XLinkOut) | ||
xout.setStreamName("depth") | ||
tof.depth.link(xout.input) | ||
|
||
# Connect to device and start pipeline | ||
with dai.Device(pipeline) as device: | ||
print('Connected cameras:', device.getConnectedCameraFeatures()) | ||
q = device.getOutputQueue(name="depth") | ||
|
||
while True: | ||
imgFrame = q.get() # blocking call, will wait until a new data has arrived | ||
depth_map = imgFrame.getFrame() | ||
|
||
# Colorize the depth frame to jet colormap | ||
depth_downscaled = depth_map[::4] | ||
non_zero_depth = depth_downscaled[depth_downscaled != 0] # Remove invalid depth values | ||
if len(non_zero_depth) == 0: | ||
min_depth, max_depth = 0, 0 | ||
else: | ||
min_depth = np.percentile(non_zero_depth, 3) | ||
max_depth = np.percentile(non_zero_depth, 97) | ||
depth_colorized = np.interp(depth_map, (min_depth, max_depth), (0, 255)).astype(np.uint8) | ||
depth_colorized = cv2.applyColorMap(depth_colorized, cv2.COLORMAP_JET) | ||
|
||
cv2.imshow("Colorized depth", depth_colorized) | ||
|
||
if cv2.waitKey(1) == ord('q'): | ||
break | ||
print(dai.__version__) | ||
|
||
cvColorMap = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_JET) | ||
cvColorMap[0] = [0, 0, 0] | ||
|
||
def create_pipeline(): | ||
pipeline = dai.Pipeline() | ||
|
||
tof = pipeline.create(dai.node.ToF) | ||
|
||
# Configure the ToF node | ||
tofConfig = tof.initialConfig.get() | ||
|
||
# Optional. Best accuracy, but adds motion blur. | ||
# see ToF node docs on how to reduce/eliminate motion blur. | ||
tofConfig.enableOpticalCorrection = True | ||
tofConfig.enablePhaseShuffleTemporalFilter = True | ||
tofConfig.phaseUnwrappingLevel = 4 | ||
tofConfig.phaseUnwrapErrorThreshold = 300 | ||
|
||
tofConfig.enableTemperatureCorrection = False # Not yet supported | ||
|
||
xinTofConfig = pipeline.create(dai.node.XLinkIn) | ||
xinTofConfig.setStreamName("tofConfig") | ||
xinTofConfig.out.link(tof.inputConfig) | ||
|
||
tof.initialConfig.set(tofConfig) | ||
|
||
cam_tof = pipeline.create(dai.node.Camera) | ||
cam_tof.setFps(60) # ToF node will produce depth frames at /2 of this rate | ||
cam_tof.setBoardSocket(dai.CameraBoardSocket.CAM_A) | ||
cam_tof.raw.link(tof.input) | ||
|
||
xout = pipeline.create(dai.node.XLinkOut) | ||
xout.setStreamName("depth") | ||
tof.depth.link(xout.input) | ||
|
||
tofConfig = tof.initialConfig.get() | ||
|
||
return pipeline, tofConfig | ||
|
||
|
||
if __name__ == '__main__': | ||
pipeline, tofConfig = create_pipeline() | ||
|
||
with dai.Device(pipeline) as device: | ||
print('Connected cameras:', device.getConnectedCameraFeatures()) | ||
qDepth = device.getOutputQueue(name="depth") | ||
|
||
tofConfigInQueue = device.getInputQueue("tofConfig") | ||
|
||
counter = 0 | ||
while True: | ||
start = time.time() | ||
key = cv2.waitKey(1) | ||
if key == ord('f'): | ||
tofConfig.enableFPPNCorrection = not tofConfig.enableFPPNCorrection | ||
tofConfigInQueue.send(tofConfig) | ||
elif key == ord('o'): | ||
tofConfig.enableOpticalCorrection = not tofConfig.enableOpticalCorrection | ||
tofConfigInQueue.send(tofConfig) | ||
elif key == ord('w'): | ||
tofConfig.enableWiggleCorrection = not tofConfig.enableWiggleCorrection | ||
tofConfigInQueue.send(tofConfig) | ||
elif key == ord('t'): | ||
tofConfig.enableTemperatureCorrection = not tofConfig.enableTemperatureCorrection | ||
tofConfigInQueue.send(tofConfig) | ||
elif key == ord('q'): | ||
break | ||
elif key == ord('0'): | ||
tofConfig.enablePhaseUnwrapping = False | ||
tofConfig.phaseUnwrappingLevel = 0 | ||
tofConfigInQueue.send(tofConfig) | ||
elif key == ord('1'): | ||
tofConfig.enablePhaseUnwrapping = True | ||
tofConfig.phaseUnwrappingLevel = 1 | ||
tofConfigInQueue.send(tofConfig) | ||
elif key == ord('2'): | ||
tofConfig.enablePhaseUnwrapping = True | ||
tofConfig.phaseUnwrappingLevel = 2 | ||
tofConfigInQueue.send(tofConfig) | ||
elif key == ord('3'): | ||
tofConfig.enablePhaseUnwrapping = True | ||
tofConfig.phaseUnwrappingLevel = 3 | ||
tofConfigInQueue.send(tofConfig) | ||
elif key == ord('4'): | ||
tofConfig.enablePhaseUnwrapping = True | ||
tofConfig.phaseUnwrappingLevel = 4 | ||
tofConfigInQueue.send(tofConfig) | ||
elif key == ord('5'): | ||
tofConfig.enablePhaseUnwrapping = True | ||
tofConfig.phaseUnwrappingLevel = 5 | ||
tofConfigInQueue.send(tofConfig) | ||
elif key == ord('m'): | ||
medianSettings = [dai.MedianFilter.MEDIAN_OFF, dai.MedianFilter.KERNEL_3x3, dai.MedianFilter.KERNEL_5x5, | ||
dai.MedianFilter.KERNEL_7x7] | ||
currentMedian = tofConfig.median | ||
nextMedian = medianSettings[(medianSettings.index(currentMedian) + 1) % len(medianSettings)] | ||
print(f"Changing median to {nextMedian.name} from {currentMedian.name}") | ||
tofConfig.median = nextMedian | ||
tofConfigInQueue.send(tofConfig) | ||
|
||
imgFrame = qDepth.get() # blocking call, will wait until a new data has arrived | ||
depth_map = imgFrame.getFrame() | ||
max_depth = (tofConfig.phaseUnwrappingLevel + 1) * 1500 # 100MHz modulation freq. | ||
depth_colorized = np.interp(depth_map, (0, max_depth), (0, 255)).astype(np.uint8) | ||
depth_colorized = cv2.applyColorMap(depth_colorized, cvColorMap) | ||
|
||
cv2.imshow("Colorized depth", depth_colorized) | ||
counter += 1 | ||
|
||
device.close() |