Skip to content

Commit

Permalink
Fix direct Depthwise convolution implementation (#367)
Browse files Browse the repository at this point in the history
* Fix direct Depthwise convolution implementation

There was a silly typo bug in the direct depthwise convolution algorithm
that broke multi-channel testsets.  Perhaps more disturbingly, our
default test set did not catch this, because multi-channel workloads are
not run unless the fuzzing tests are run.

* Update test/conv.jl

Co-authored-by: Carlo Lucibello <carlo.lucibello@unibocconi.it>

Co-authored-by: Dhairya Gandhi <dhairya@juliacomputing.com>
Co-authored-by: Carlo Lucibello <carlo.lucibello@unibocconi.it>
  • Loading branch information
3 people authored Dec 5, 2021
1 parent 063992b commit 2436b32
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 12 deletions.
6 changes: 2 additions & 4 deletions src/impl/depthwiseconv_direct.jl
Original file line number Diff line number Diff line change
Expand Up @@ -81,12 +81,10 @@ function depthwiseconv_direct!(y::AbstractArray{yT,5}, x::AbstractArray{xT,5},
h_idx in h_region,
w_idx in w_region

# Probe for out-of-bounds accesses on `x` and `continue` if we hit one
dotprod = yT(0)
c_out = (c_in - 1)*channel_multiplier(cdims) + c_mult
for c_in in 1:channels_in(cdims),
kd in 1:kernel_d

for kd in 1:kernel_d
# Probe for out-of-bounds accesses on `x` and `continue` if we hit one
x_d = project(d_idx, stride_d, pad_d_lo) + (kd - 1)*dil_d
if x_d <= 0 || x_d > depth
continue
Expand Down
23 changes: 15 additions & 8 deletions test/conv.jl
Original file line number Diff line number Diff line change
Expand Up @@ -242,6 +242,9 @@ conv_answer_dict = Dict(
),
)

# A "drop channels and batch dimension" helper
ddims(x) = dropdims(x, dims=(ndims(x)-1, ndims(x)))

@testset "Dense Convolution" begin
# Start with some easy-to-debug cases that we have worked through and _know_ work
for rank in (1,2,3)
Expand Down Expand Up @@ -272,9 +275,6 @@ conv_answer_dict = Dict(
x = reshape(Float64[1:prod(size(dx));], size(dx)..., 1, 1)
w = reshape(Float64[1:prod(size(dw));], size(dw)..., 1, 1)

# A "drop channels and batch dimension" helper
ddims(x) = dropdims(x, dims=(rank+1, rank+2))

convs = [NNlib.conv, NNlib.conv_im2col, NNlib.conv_direct,]
NNlib.is_nnpack_available() && push!(convs, NNlib.conv_nnpack)
for conv in convs
Expand Down Expand Up @@ -450,8 +450,10 @@ else
end

@testset "Depthwise Convolution" begin
# Start with some easy-to-debug cases that we have worked through and _know_ work
for rank in (1,) #2,3)
# Start with some easy-to-debug cases that we have worked through and _know_ work.
# NOTE: these examples are all single-channel... which doesn't really stress test
# the important parts of depthwise convolution!
for rank in (1,2,3)
@testset "depthwiseconv$(rank)d" begin
# Pull out known-good answers for y = depthwiseconv(x, w)
y_pad = conv_answer_dict[rank]["y_pad"]
Expand Down Expand Up @@ -479,9 +481,6 @@ end
x = reshape(Float64[1:prod(size(dx));], size(dx)..., 1, 1)
w = reshape(Float64[1:prod(size(dw));], size(dw)..., 1, 1)

# A "drop channels and batch dimension" helper
ddims(x) = dropdims(x, dims=(rank+1, rank+2))

for conv in (NNlib.depthwiseconv, NNlib.depthwiseconv_im2col, NNlib.depthwiseconv_direct)
@testset "$(conv)" begin
# First, your basic convolution with no parameters
Expand Down Expand Up @@ -557,6 +556,14 @@ end
end
end
end

# Do some real depthwise convolution tests
x = Float64.(reshape(1:2, (1,2,1)))
w = Float64.(reshape(1:6, (3,1,2)))
cdims = DepthwiseConvDims(x, w; padding=1)
for conv in (NNlib.depthwiseconv, NNlib.depthwiseconv_im2col, NNlib.depthwiseconv_direct)
@test conv(x, w, cdims)[:] [2, 10] rtol=1e-7
end
end


Expand Down

0 comments on commit 2436b32

Please sign in to comment.