-
Notifications
You must be signed in to change notification settings - Fork 193
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge branch 'release_v2.18.0.0' into main
- Loading branch information
Showing
40 changed files
with
1,079 additions
and
110 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,11 +1,18 @@ | ||
FROM python:3.9-bullseye | ||
|
||
RUN apt-get update && apt-get install -y wget build-essential cmake pkg-config libjpeg-dev libtiff5-dev libavcodec-dev libavformat-dev libswscale-dev libv4l-dev libxvidcore-dev libx264-dev libgtk2.0-dev libgtk-3-dev libatlas-base-dev gfortran git | ||
RUN apt-get update && apt-get install -y wget build-essential cmake pkg-config libjpeg-dev libtiff5-dev libavcodec-dev libavformat-dev libswscale-dev libv4l-dev libxvidcore-dev libx264-dev libgtk2.0-dev libgtk-3-dev libatlas-base-dev gfortran git libopencv-dev | ||
|
||
ADD ci/docker_dependencies.sh . | ||
RUN ./docker_dependencies.sh | ||
|
||
RUN pip install -U pip && pip install --extra-index-url https://www.piwheels.org/simple/ --prefer-binary opencv-python | ||
|
||
# Copy over the files | ||
COPY . /depthai-python | ||
|
||
# Install C++ library | ||
RUN cmake -S /depthai-python/depthai-core -B /build -D CMAKE_BUILD_TYPE=Release -D BUILD_SHARED_LIBS=ON -D CMAKE_INSTALL_PREFIX=/usr/local | ||
RUN cmake --build /build --parallel 4 --config Relase --target install | ||
|
||
# Install Python library | ||
RUN cd /depthai-python && python3 -m pip install . |
Submodule depthai-core
updated
48 files
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,88 @@ | ||
import cv2 | ||
import depthai as dai | ||
import numpy as np | ||
|
||
camRes = dai.ColorCameraProperties.SensorResolution.THE_1080_P | ||
camSocket = dai.CameraBoardSocket.RGB | ||
ispScale = (1,2) | ||
|
||
def getMesh(calibData, ispSize): | ||
M1 = np.array(calibData.getCameraIntrinsics(camSocket, ispSize[0], ispSize[1])) | ||
d1 = np.array(calibData.getDistortionCoefficients(camSocket)) | ||
R1 = np.identity(3) | ||
mapX, mapY = cv2.initUndistortRectifyMap(M1, d1, R1, M1, ispSize, cv2.CV_32FC1) | ||
|
||
meshCellSize = 16 | ||
mesh0 = [] | ||
# Creates subsampled mesh which will be loaded on to device to undistort the image | ||
for y in range(mapX.shape[0] + 1): # iterating over height of the image | ||
if y % meshCellSize == 0: | ||
rowLeft = [] | ||
for x in range(mapX.shape[1]): # iterating over width of the image | ||
if x % meshCellSize == 0: | ||
if y == mapX.shape[0] and x == mapX.shape[1]: | ||
rowLeft.append(mapX[y - 1, x - 1]) | ||
rowLeft.append(mapY[y - 1, x - 1]) | ||
elif y == mapX.shape[0]: | ||
rowLeft.append(mapX[y - 1, x]) | ||
rowLeft.append(mapY[y - 1, x]) | ||
elif x == mapX.shape[1]: | ||
rowLeft.append(mapX[y, x - 1]) | ||
rowLeft.append(mapY[y, x - 1]) | ||
else: | ||
rowLeft.append(mapX[y, x]) | ||
rowLeft.append(mapY[y, x]) | ||
if (mapX.shape[1] % meshCellSize) % 2 != 0: | ||
rowLeft.append(0) | ||
rowLeft.append(0) | ||
|
||
mesh0.append(rowLeft) | ||
|
||
mesh0 = np.array(mesh0) | ||
meshWidth = mesh0.shape[1] // 2 | ||
meshHeight = mesh0.shape[0] | ||
mesh0.resize(meshWidth * meshHeight, 2) | ||
|
||
mesh = list(map(tuple, mesh0)) | ||
|
||
return mesh, meshWidth, meshHeight | ||
|
||
def create_pipeline(calibData): | ||
pipeline = dai.Pipeline() | ||
|
||
cam = pipeline.create(dai.node.ColorCamera) | ||
cam.setIspScale(ispScale) | ||
cam.setBoardSocket(camSocket) | ||
cam.setResolution(camRes) | ||
|
||
manip = pipeline.create(dai.node.ImageManip) | ||
mesh, meshWidth, meshHeight = getMesh(calibData, cam.getIspSize()) | ||
manip.setWarpMesh(mesh, meshWidth, meshHeight) | ||
manip.setMaxOutputFrameSize(cam.getIspWidth() * cam.getIspHeight() * 3 // 2) | ||
cam.isp.link(manip.inputImage) | ||
|
||
cam_xout = pipeline.create(dai.node.XLinkOut) | ||
cam_xout.setStreamName("Undistorted") | ||
manip.out.link(cam_xout.input) | ||
|
||
dist_xout = pipeline.create(dai.node.XLinkOut) | ||
dist_xout.setStreamName("Distorted") | ||
cam.isp.link(dist_xout.input) | ||
|
||
return pipeline | ||
|
||
with dai.Device() as device: | ||
|
||
calibData = device.readCalibration() | ||
pipeline = create_pipeline(calibData) | ||
device.startPipeline(pipeline) | ||
|
||
queues = [device.getOutputQueue(name, 4, False) for name in ['Undistorted', 'Distorted']] | ||
|
||
while True: | ||
for q in queues: | ||
frame = q.get().getCvFrame() | ||
cv2.imshow(q.getName(), frame) | ||
|
||
if cv2.waitKey(1) == ord('q'): | ||
break |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,116 @@ | ||
#!/usr/bin/env python3 | ||
|
||
import cv2 | ||
import depthai as dai | ||
|
||
if 1: # PoE config | ||
fps = 30 | ||
res = dai.MonoCameraProperties.SensorResolution.THE_400_P | ||
poolSize = 24 # default 3, increased to prevent desync | ||
else: # USB | ||
fps = 30 | ||
res = dai.MonoCameraProperties.SensorResolution.THE_720_P | ||
poolSize = 8 # default 3, increased to prevent desync | ||
|
||
# Create pipeline | ||
pipeline = dai.Pipeline() | ||
|
||
# Define sources and outputs | ||
monoL = pipeline.create(dai.node.MonoCamera) | ||
monoR = pipeline.create(dai.node.MonoCamera) | ||
|
||
monoL.setBoardSocket(dai.CameraBoardSocket.LEFT) | ||
monoL.setResolution(res) | ||
monoL.setFps(fps) | ||
monoL.setNumFramesPool(poolSize) | ||
monoR.setBoardSocket(dai.CameraBoardSocket.RIGHT) | ||
monoR.setResolution(res) | ||
monoR.setFps(fps) | ||
monoR.setNumFramesPool(poolSize) | ||
|
||
xoutDotL = pipeline.create(dai.node.XLinkOut) | ||
xoutDotR = pipeline.create(dai.node.XLinkOut) | ||
xoutFloodL = pipeline.create(dai.node.XLinkOut) | ||
xoutFloodR = pipeline.create(dai.node.XLinkOut) | ||
|
||
xoutDotL.setStreamName('dot-left') | ||
xoutDotR.setStreamName('dot-right') | ||
xoutFloodL.setStreamName('flood-left') | ||
xoutFloodR.setStreamName('flood-right') | ||
streams = ['dot-left', 'dot-right', 'flood-left', 'flood-right'] | ||
|
||
# Script node for frame routing and IR dot/flood alternate | ||
script = pipeline.create(dai.node.Script) | ||
script.setProcessor(dai.ProcessorType.LEON_CSS) | ||
script.setScript(""" | ||
dotBright = 500 # Note: recommended to not exceed 765, for max duty cycle | ||
floodBright = 200 | ||
LOGGING = False # Set `True` for latency/timings debugging | ||
node.warn(f'IR drivers detected: {str(Device.getIrDrivers())}') | ||
flagDot = False | ||
while True: | ||
# Wait first for a frame event, received at MIPI start-of-frame | ||
event = node.io['event'].get() | ||
if LOGGING: tEvent = Clock.now() | ||
# Immediately reconfigure the IR driver. | ||
# Note the logic is inverted, as it applies for next frame | ||
Device.setIrLaserDotProjectorBrightness(0 if flagDot else dotBright) | ||
Device.setIrFloodLightBrightness(floodBright if flagDot else 0) | ||
if LOGGING: tIrSet = Clock.now() | ||
# Wait for the actual frames (after MIPI capture and ISP proc is done) | ||
frameL = node.io['frameL'].get() | ||
if LOGGING: tLeft = Clock.now() | ||
frameR = node.io['frameR'].get() | ||
if LOGGING: tRight = Clock.now() | ||
if LOGGING: | ||
latIR = (tIrSet - tEvent ).total_seconds() * 1000 | ||
latEv = (tEvent - event.getTimestamp() ).total_seconds() * 1000 | ||
latProcL = (tLeft - event.getTimestamp() ).total_seconds() * 1000 | ||
diffRecvRL = (tRight - tLeft ).total_seconds() * 1000 | ||
node.warn(f'T[ms] latEv:{latEv:5.3f} latIR:{latIR:5.3f} latProcL:{latProcL:6.3f} ' | ||
+ f' diffRecvRL:{diffRecvRL:5.3f}') | ||
# Sync checks | ||
diffSeq = frameL.getSequenceNum() - event.getSequenceNum() | ||
diffTsEv = (frameL.getTimestamp() - event.getTimestamp()).total_seconds() * 1000 | ||
diffTsRL = (frameR.getTimestamp() - frameL.getTimestamp()).total_seconds() * 1000 | ||
if diffSeq or diffTsEv or (abs(diffTsRL) > 0.8): | ||
node.error(f'frame/event desync! Fr-Ev: {diffSeq} frames,' | ||
+ f' {diffTsEv:.3f} ms; R-L: {diffTsRL:.3f} ms') | ||
# Route the frames to their respective outputs | ||
node.io['dotL' if flagDot else 'floodL'].send(frameL) | ||
node.io['dotR' if flagDot else 'floodR'].send(frameR) | ||
flagDot = not flagDot | ||
""") | ||
|
||
# Linking | ||
monoL.frameEvent.link(script.inputs['event']) | ||
monoL.out.link(script.inputs['frameL']) | ||
monoR.out.link(script.inputs['frameR']) | ||
|
||
script.outputs['dotL'].link(xoutDotL.input) | ||
script.outputs['dotR'].link(xoutDotR.input) | ||
script.outputs['floodL'].link(xoutFloodL.input) | ||
script.outputs['floodR'].link(xoutFloodR.input) | ||
|
||
# Connect to device and start pipeline | ||
with dai.Device(pipeline) as device: | ||
queues = [device.getOutputQueue(name=s, maxSize=4, blocking=False) for s in streams] | ||
|
||
while True: | ||
for q in queues: | ||
pkt = q.tryGet() | ||
if pkt is not None: | ||
name = q.getName() | ||
frame = pkt.getCvFrame() | ||
cv2.imshow(name, frame) | ||
|
||
if cv2.waitKey(5) == ord('q'): | ||
break |
Empty file.
Oops, something went wrong.