Skip to content

Commit

Permalink
Merge branch 'release_v2.18.0.0' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
themarpe committed Oct 20, 2022
2 parents 07e7d03 + 8d9a1c6 commit 3aea75a
Show file tree
Hide file tree
Showing 40 changed files with 1,079 additions and 110 deletions.
2 changes: 2 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@ pybind11_add_module(${TARGET_NAME}
src/pipeline/AssetManagerBindings.cpp
src/openvino/OpenVINOBindings.cpp
src/log/LogBindings.cpp
src/VersionBindings.cpp

src/pipeline/node/NodeBindings.cpp

Expand All @@ -124,6 +125,7 @@ pybind11_add_module(${TARGET_NAME}
src/pipeline/node/FeatureTrackerBindings.cpp
src/pipeline/node/AprilTagBindings.cpp
src/pipeline/node/DetectionParserBindings.cpp
src/pipeline/node/WarpBindings.cpp

src/pipeline/datatype/ADatatypeBindings.cpp
src/pipeline/datatype/AprilTagConfigBindings.cpp
Expand Down
9 changes: 8 additions & 1 deletion ci/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,11 +1,18 @@
FROM python:3.9-bullseye

RUN apt-get update && apt-get install -y wget build-essential cmake pkg-config libjpeg-dev libtiff5-dev libavcodec-dev libavformat-dev libswscale-dev libv4l-dev libxvidcore-dev libx264-dev libgtk2.0-dev libgtk-3-dev libatlas-base-dev gfortran git
RUN apt-get update && apt-get install -y wget build-essential cmake pkg-config libjpeg-dev libtiff5-dev libavcodec-dev libavformat-dev libswscale-dev libv4l-dev libxvidcore-dev libx264-dev libgtk2.0-dev libgtk-3-dev libatlas-base-dev gfortran git libopencv-dev

ADD ci/docker_dependencies.sh .
RUN ./docker_dependencies.sh

RUN pip install -U pip && pip install --extra-index-url https://www.piwheels.org/simple/ --prefer-binary opencv-python

# Copy over the files
COPY . /depthai-python

# Install C++ library
RUN cmake -S /depthai-python/depthai-core -B /build -D CMAKE_BUILD_TYPE=Release -D BUILD_SHARED_LIBS=ON -D CMAKE_INSTALL_PREFIX=/usr/local
RUN cmake --build /build --parallel 4 --config Relase --target install

# Install Python library
RUN cd /depthai-python && python3 -m pip install .
2 changes: 1 addition & 1 deletion depthai-core
Submodule depthai-core updated 48 files
+3 −1 CMakeLists.txt
+2 −2 cmake/Depthai/DepthaiBootloaderConfig.cmake
+1 −1 cmake/Depthai/DepthaiDeviceSideConfig.cmake
+1 −1 docs/Doxyfile.in
+7 −2 examples/CMakeLists.txt
+5 −0 examples/ColorCamera/rgb_preview.cpp
+2 −2 examples/IMU/imu_firmware_update.cpp
+2 −2 examples/IMU/imu_gyroscope_accelerometer.cpp
+1 −1 examples/IMU/imu_rotation_vector.cpp
+5 −5 examples/Script/script_change_pipeline_flow.cpp
+2 −3 examples/SpatialDetection/spatial_calculator_multi_roi.cpp
+11 −21 examples/SpatialDetection/spatial_mobilenet.cpp
+11 −20 examples/SpatialDetection/spatial_mobilenet_mono.cpp
+11 −21 examples/SpatialDetection/spatial_tiny_yolo.cpp
+74 −0 examples/Warp/warp_mesh.cpp
+1 −1 examples/bootloader/bootloader_version.cpp
+50 −0 examples/bootloader/flash_user_bootloader.cpp
+2 −2 examples/calibration/calibration_factory_reset.cpp
+0 −1 examples/host_side/device_information.cpp
+18 −3 include/depthai/common/CameraBoardSocket.hpp
+25 −22 include/depthai/device/CalibrationHandler.hpp
+1 −1 include/depthai/device/Device.hpp
+52 −1 include/depthai/device/DeviceBase.hpp
+22 −35 include/depthai/device/DeviceBootloader.hpp
+43 −0 include/depthai/device/Version.hpp
+97 −19 include/depthai/pipeline/datatype/CameraControl.hpp
+21 −0 include/depthai/pipeline/datatype/ImgFrame.hpp
+10 −0 include/depthai/pipeline/node/ColorCamera.hpp
+20 −0 include/depthai/pipeline/node/MonoCamera.hpp
+6 −4 include/depthai/pipeline/node/Script.hpp
+99 −0 include/depthai/pipeline/node/Warp.hpp
+1 −0 include/depthai/pipeline/nodes.hpp
+3 −1 include/depthai/xlink/XLinkConnection.hpp
+1 −1 shared/depthai-bootloader-shared
+1 −1 shared/depthai-shared
+85 −81 src/device/CalibrationHandler.cpp
+40 −0 src/device/DeviceBase.cpp
+147 −65 src/device/DeviceBootloader.cpp
+74 −0 src/device/Version.cpp
+56 −0 src/pipeline/datatype/CameraControl.cpp
+12 −0 src/pipeline/datatype/ImgFrame.cpp
+68 −4 src/pipeline/node/ColorCamera.cpp
+1 −0 src/pipeline/node/DetectionNetwork.cpp
+22 −1 src/pipeline/node/MonoCamera.cpp
+6 −2 src/pipeline/node/Script.cpp
+98 −0 src/pipeline/node/Warp.cpp
+14 −0 src/utility/Initialization.cpp
+11 −21 tests/src/stability_stress_test.cpp
13 changes: 13 additions & 0 deletions examples/ColorCamera/rgb_camera_control.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@
For the 'Select control: ...' options, use these keys to modify the value:
'-' or '_' to decrease
'+' or '=' to increase
'/' to toggle showing camera settings: exposure, ISO, lens position, color temperature
"""

import depthai as dai
Expand Down Expand Up @@ -117,6 +119,7 @@ def clamp(num, v0, v1):
luma_denoise = 0
chroma_denoise = 0
control = 'none'
show = False

awb_mode = cycle([item for name, item in vars(dai.CameraControl.AutoWhiteBalanceMode).items() if name.isupper()])
anti_banding_mode = cycle([item for name, item in vars(dai.CameraControl.AntiBandingMode).items() if name.isupper()])
Expand All @@ -129,6 +132,13 @@ def clamp(num, v0, v1):

ispFrames = ispQueue.tryGetAll()
for ispFrame in ispFrames:
if show:
txt = f"[{ispFrame.getSequenceNum()}] "
txt += f"Exposure: {ispFrame.getExposureTime().total_seconds()*1000:.3f} ms, "
txt += f"ISO: {ispFrame.getSensitivity()}, "
txt += f"Lens position: {ispFrame.getLensPosition()}, "
txt += f"Color temp: {ispFrame.getColorTemperature()} K"
print(txt)
cv2.imshow('isp', ispFrame.getCvFrame())

# Send new cfg to camera
Expand All @@ -150,6 +160,9 @@ def clamp(num, v0, v1):
key = cv2.waitKey(1)
if key == ord('q'):
break
elif key == ord('/'):
show = not show
if not show: print("Printing camera settings: OFF")
elif key == ord('c'):
ctrl = dai.CameraControl()
ctrl.setCaptureStill(True)
Expand Down
3 changes: 3 additions & 0 deletions examples/ColorCamera/rgb_preview.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,9 @@
print('Connected cameras: ', device.getConnectedCameras())
# Print out usb speed
print('Usb speed: ', device.getUsbSpeed().name)
# Bootloader version
if device.getBootloaderVersion() is not None:
print('Bootloader version: ', device.getBootloaderVersion())

# Output queue will be used to get the rgb frames from the output defined above
qRgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
Expand Down
88 changes: 88 additions & 0 deletions examples/ColorCamera/rgb_undistort.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
import cv2
import depthai as dai
import numpy as np

camRes = dai.ColorCameraProperties.SensorResolution.THE_1080_P
camSocket = dai.CameraBoardSocket.RGB
ispScale = (1,2)

def getMesh(calibData, ispSize):
M1 = np.array(calibData.getCameraIntrinsics(camSocket, ispSize[0], ispSize[1]))
d1 = np.array(calibData.getDistortionCoefficients(camSocket))
R1 = np.identity(3)
mapX, mapY = cv2.initUndistortRectifyMap(M1, d1, R1, M1, ispSize, cv2.CV_32FC1)

meshCellSize = 16
mesh0 = []
# Creates subsampled mesh which will be loaded on to device to undistort the image
for y in range(mapX.shape[0] + 1): # iterating over height of the image
if y % meshCellSize == 0:
rowLeft = []
for x in range(mapX.shape[1]): # iterating over width of the image
if x % meshCellSize == 0:
if y == mapX.shape[0] and x == mapX.shape[1]:
rowLeft.append(mapX[y - 1, x - 1])
rowLeft.append(mapY[y - 1, x - 1])
elif y == mapX.shape[0]:
rowLeft.append(mapX[y - 1, x])
rowLeft.append(mapY[y - 1, x])
elif x == mapX.shape[1]:
rowLeft.append(mapX[y, x - 1])
rowLeft.append(mapY[y, x - 1])
else:
rowLeft.append(mapX[y, x])
rowLeft.append(mapY[y, x])
if (mapX.shape[1] % meshCellSize) % 2 != 0:
rowLeft.append(0)
rowLeft.append(0)

mesh0.append(rowLeft)

mesh0 = np.array(mesh0)
meshWidth = mesh0.shape[1] // 2
meshHeight = mesh0.shape[0]
mesh0.resize(meshWidth * meshHeight, 2)

mesh = list(map(tuple, mesh0))

return mesh, meshWidth, meshHeight

def create_pipeline(calibData):
pipeline = dai.Pipeline()

cam = pipeline.create(dai.node.ColorCamera)
cam.setIspScale(ispScale)
cam.setBoardSocket(camSocket)
cam.setResolution(camRes)

manip = pipeline.create(dai.node.ImageManip)
mesh, meshWidth, meshHeight = getMesh(calibData, cam.getIspSize())
manip.setWarpMesh(mesh, meshWidth, meshHeight)
manip.setMaxOutputFrameSize(cam.getIspWidth() * cam.getIspHeight() * 3 // 2)
cam.isp.link(manip.inputImage)

cam_xout = pipeline.create(dai.node.XLinkOut)
cam_xout.setStreamName("Undistorted")
manip.out.link(cam_xout.input)

dist_xout = pipeline.create(dai.node.XLinkOut)
dist_xout.setStreamName("Distorted")
cam.isp.link(dist_xout.input)

return pipeline

with dai.Device() as device:

calibData = device.readCalibration()
pipeline = create_pipeline(calibData)
device.startPipeline(pipeline)

queues = [device.getOutputQueue(name, 4, False) for name in ['Undistorted', 'Distorted']]

while True:
for q in queues:
frame = q.get().getCvFrame()
cv2.imshow(q.getName(), frame)

if cv2.waitKey(1) == ord('q'):
break
4 changes: 2 additions & 2 deletions examples/IMU/imu_firmware_update.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ def timeDeltaToMilliS(delta) -> float:
acceleroValues = imuPacket.acceleroMeter
gyroValues = imuPacket.gyroscope

acceleroTs = acceleroValues.timestamp.get()
gyroTs = gyroValues.timestamp.get()
acceleroTs = acceleroValues.getTimestampDevice()
gyroTs = gyroValues.getTimestampDevice()
if baseTs is None:
baseTs = acceleroTs if acceleroTs < gyroTs else gyroTs
acceleroTs = timeDeltaToMilliS(acceleroTs - baseTs)
Expand Down
4 changes: 2 additions & 2 deletions examples/IMU/imu_gyroscope_accelerometer.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@ def timeDeltaToMilliS(delta) -> float:
acceleroValues = imuPacket.acceleroMeter
gyroValues = imuPacket.gyroscope

acceleroTs = acceleroValues.timestamp.get()
gyroTs = gyroValues.timestamp.get()
acceleroTs = acceleroValues.getTimestampDevice()
gyroTs = gyroValues.getTimestampDevice()
if baseTs is None:
baseTs = acceleroTs if acceleroTs < gyroTs else gyroTs
acceleroTs = timeDeltaToMilliS(acceleroTs - baseTs)
Expand Down
2 changes: 1 addition & 1 deletion examples/IMU/imu_rotation_vector.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def timeDeltaToMilliS(delta) -> float:
for imuPacket in imuPackets:
rVvalues = imuPacket.rotationVector

rvTs = rVvalues.timestamp.get()
rvTs = rVvalues.getTimestampDevice()
if baseTs is None:
baseTs = rvTs
rvTs = rvTs - baseTs
Expand Down
2 changes: 1 addition & 1 deletion examples/ImageManip/image_manip_warp_mesh.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
maxFrameSize = camRgb.getPreviewWidth() * camRgb.getPreviewHeight() * 3

# Warp preview frame 1
manip1 = pipeline.create(dai.node.ImageManip)
manip1 = pipeline.create(dai.node.Warp)
# Create a custom warp mesh
tl = dai.Point2f(20, 20)
tr = dai.Point2f(460, 20)
Expand Down
116 changes: 116 additions & 0 deletions examples/MonoCamera/mono_preview_alternate_pro.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
#!/usr/bin/env python3

import cv2
import depthai as dai

if 1: # PoE config
fps = 30
res = dai.MonoCameraProperties.SensorResolution.THE_400_P
poolSize = 24 # default 3, increased to prevent desync
else: # USB
fps = 30
res = dai.MonoCameraProperties.SensorResolution.THE_720_P
poolSize = 8 # default 3, increased to prevent desync

# Create pipeline
pipeline = dai.Pipeline()

# Define sources and outputs
monoL = pipeline.create(dai.node.MonoCamera)
monoR = pipeline.create(dai.node.MonoCamera)

monoL.setBoardSocket(dai.CameraBoardSocket.LEFT)
monoL.setResolution(res)
monoL.setFps(fps)
monoL.setNumFramesPool(poolSize)
monoR.setBoardSocket(dai.CameraBoardSocket.RIGHT)
monoR.setResolution(res)
monoR.setFps(fps)
monoR.setNumFramesPool(poolSize)

xoutDotL = pipeline.create(dai.node.XLinkOut)
xoutDotR = pipeline.create(dai.node.XLinkOut)
xoutFloodL = pipeline.create(dai.node.XLinkOut)
xoutFloodR = pipeline.create(dai.node.XLinkOut)

xoutDotL.setStreamName('dot-left')
xoutDotR.setStreamName('dot-right')
xoutFloodL.setStreamName('flood-left')
xoutFloodR.setStreamName('flood-right')
streams = ['dot-left', 'dot-right', 'flood-left', 'flood-right']

# Script node for frame routing and IR dot/flood alternate
script = pipeline.create(dai.node.Script)
script.setProcessor(dai.ProcessorType.LEON_CSS)
script.setScript("""
dotBright = 500 # Note: recommended to not exceed 765, for max duty cycle
floodBright = 200
LOGGING = False # Set `True` for latency/timings debugging
node.warn(f'IR drivers detected: {str(Device.getIrDrivers())}')
flagDot = False
while True:
# Wait first for a frame event, received at MIPI start-of-frame
event = node.io['event'].get()
if LOGGING: tEvent = Clock.now()
# Immediately reconfigure the IR driver.
# Note the logic is inverted, as it applies for next frame
Device.setIrLaserDotProjectorBrightness(0 if flagDot else dotBright)
Device.setIrFloodLightBrightness(floodBright if flagDot else 0)
if LOGGING: tIrSet = Clock.now()
# Wait for the actual frames (after MIPI capture and ISP proc is done)
frameL = node.io['frameL'].get()
if LOGGING: tLeft = Clock.now()
frameR = node.io['frameR'].get()
if LOGGING: tRight = Clock.now()
if LOGGING:
latIR = (tIrSet - tEvent ).total_seconds() * 1000
latEv = (tEvent - event.getTimestamp() ).total_seconds() * 1000
latProcL = (tLeft - event.getTimestamp() ).total_seconds() * 1000
diffRecvRL = (tRight - tLeft ).total_seconds() * 1000
node.warn(f'T[ms] latEv:{latEv:5.3f} latIR:{latIR:5.3f} latProcL:{latProcL:6.3f} '
+ f' diffRecvRL:{diffRecvRL:5.3f}')
# Sync checks
diffSeq = frameL.getSequenceNum() - event.getSequenceNum()
diffTsEv = (frameL.getTimestamp() - event.getTimestamp()).total_seconds() * 1000
diffTsRL = (frameR.getTimestamp() - frameL.getTimestamp()).total_seconds() * 1000
if diffSeq or diffTsEv or (abs(diffTsRL) > 0.8):
node.error(f'frame/event desync! Fr-Ev: {diffSeq} frames,'
+ f' {diffTsEv:.3f} ms; R-L: {diffTsRL:.3f} ms')
# Route the frames to their respective outputs
node.io['dotL' if flagDot else 'floodL'].send(frameL)
node.io['dotR' if flagDot else 'floodR'].send(frameR)
flagDot = not flagDot
""")

# Linking
monoL.frameEvent.link(script.inputs['event'])
monoL.out.link(script.inputs['frameL'])
monoR.out.link(script.inputs['frameR'])

script.outputs['dotL'].link(xoutDotL.input)
script.outputs['dotR'].link(xoutDotR.input)
script.outputs['floodL'].link(xoutFloodL.input)
script.outputs['floodR'].link(xoutFloodR.input)

# Connect to device and start pipeline
with dai.Device(pipeline) as device:
queues = [device.getOutputQueue(name=s, maxSize=4, blocking=False) for s in streams]

while True:
for q in queues:
pkt = q.tryGet()
if pkt is not None:
name = q.getName()
frame = pkt.getCvFrame()
cv2.imshow(name, frame)

if cv2.waitKey(5) == ord('q'):
break
Empty file modified examples/SpatialDetection/spatial_calculator_multi_roi.py
100644 → 100755
Empty file.
Loading

0 comments on commit 3aea75a

Please sign in to comment.