Skip to content

Commit

Permalink
Merge branch 'master' into ns/benchmarks
Browse files Browse the repository at this point in the history
  • Loading branch information
nirmal-suthar committed Aug 2, 2020
2 parents 8f63ba8 + 829e15d commit 2560f1c
Show file tree
Hide file tree
Showing 39 changed files with 1,617 additions and 972 deletions.
36 changes: 36 additions & 0 deletions .github/workflows/format_pr.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
name: format-pr

on:
schedule:
- cron: '0 0 * * *'
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
matrix:
julia-version: [1.4.2]
julia-arch: [x86]
os: [ubuntu-latest]
steps:
- uses: julia-actions/setup-julia@latest
with:
version: ${{ matrix.julia-version }}

- uses: actions/checkout@v2
- name: Install JuliaFormatter and format
run: |
julia -e 'import Pkg; Pkg.add("JuliaFormatter")'
julia -e 'using JuliaFormatter; format(".")'
# https://github.com/marketplace/actions/create-pull-request
- name: Create Pull Request
uses: peter-evans/create-pull-request@v2
with:
token: ${{ secrets.GITHUB_TOKEN }}
commit-message: Format .jl files
title: 'Automatic JuliaFormatter.jl run'
branch: auto-juliaformatter-pr
labels: formatting, automated pr, no changelog
- name: Check outputs
run: |
echo 'Pull Request Number - ${{ env.PULL_REQUEST_NUMBER }}'
echo 'Pull Request Number - ${{ steps.cpr.outputs.pr_number }}'
4 changes: 4 additions & 0 deletions benchmarks/plot.jl
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ function read_bm_transforms(fname, framework)
)
for line in eachline(fname)
raw = split(line)
<<<<<<< HEAD
push!(
data,
(
Expand All @@ -21,6 +22,9 @@ function read_bm_transforms(fname, framework)
parse(Float64, raw[5]),
),
)
=======
push!(data, (raw[1], raw[2], parse(Int, raw[3]), parse(Float64, raw[4])))
>>>>>>> master
end
data[!, :framework] .= framework
return data
Expand Down
64 changes: 64 additions & 0 deletions benchmarks/transforms.jl
Original file line number Diff line number Diff line change
Expand Up @@ -38,12 +38,15 @@ function realign_point_cloud(npoints)
return rot(pc)
end

<<<<<<< HEAD
function realign_trimesh(_npoints)
pc = generate_trimesh(_npoints)
rot = RotateTriMesh(-ROT_MATRIX)
return rot(pc)
end

=======
>>>>>>> master
ROT_MATRIX = [
1.0 2.0 3.0
0.2 0.5 0.9
Expand Down Expand Up @@ -75,6 +78,7 @@ gpu_bm_trimesh = setup_benchmark_record(names)

println("DEVICE: CPU")
for _npoints in npoint_arr
<<<<<<< HEAD
pcloud_arr = [
(ScalePointCloud(0.5; inplace = false), "ScalePointCloud"),
(RotatePointCloud(ROT_MATRIX; inplace = false), "RotatePointCloud"),
Expand All @@ -85,6 +89,25 @@ for _npoints in npoint_arr
RotatePointCloud(ROT_MATRIX; inplace = false),
ReAlignPointCloud(realign_point_cloud(_npoints);inplace = false,),
NormalizePointCloud()),"Chain",)
=======
arr = [
(ScalePointCloud(0.5; inplace = false), "ScalePointCloud"),
(RotatePointCloud(ROT_MATRIX; inplace = false), "RotatePointCloud"),
(
ReAlignPointCloud(realign_point_cloud(_npoints); inplace = false),
"RealignPointCloud",
),
(NormalizePointCloud(inplace = false), "NormalizePointCloud"),
(
Chain(
ScalePointCloud(0.5; inplace = false),
RotatePointCloud(ROT_MATRIX; inplace = false),
ReAlignPointCloud(realign_point_cloud(_npoints); inplace = false),
NormalizePointCloud(),
),
"Chain",
),
>>>>>>> master
]

trimesh_arr = [
Expand Down Expand Up @@ -123,6 +146,7 @@ if has_cuda()
println("CUDA is on. Running GPU Benchmarks")
println("DEVICE: GPU")
for _npoints in npoint_arr
<<<<<<< HEAD
pcloud_arr = [
(ScalePointCloud(0.5; inplace = false), "ScalePointCloud"),
(RotatePointCloud(ROT_MATRIX; inplace = false), "RotatePointCloud"),
Expand Down Expand Up @@ -162,26 +186,66 @@ if has_cuda()
generate_trimesh,
(op, pc) -> (CUDA.@sync op(pc)),
gpu
=======
arr = [
(ScalePointCloud(0.5; inplace = false), "ScalePointCloud"),
(RotatePointCloud(ROT_MATRIX; inplace = false), "RotatePointCloud"),
(
ReAlignPointCloud(realign_point_cloud(_npoints); inplace = false),
"RealignPointCloud",
),
(NormalizePointCloud(inplace = false), "NormalizePointCloud"),
(
Chain(
ScalePointCloud(0.5; inplace = false),
RotatePointCloud(ROT_MATRIX; inplace = false),
ReAlignPointCloud(realign_point_cloud(_npoints); inplace = false),
NormalizePointCloud(inplace = false),
),
"Chain",
),
]

println("Running benchmarks for npoints = $_npoints")
run_benchmarks!(
gpu_benchmarks,
arr,
_npoints,
(op, pc) -> (CuArrays.@sync op(pc)),
gpu,
>>>>>>> master
)
println()
end
end

<<<<<<< HEAD
function save_bm(fname, rep, cpu_benchmarks, gpu_benchmarks)
=======
function save_bm(fname, cpu_benchmarks, gpu_benchmarks)
>>>>>>> master
open(fname, "w") do io
device = "cpu"
for (key, values) in cpu_benchmarks
for (p, v) in zip(npoint_arr, values)
<<<<<<< HEAD
Printf.@printf(io, "%s %s %s %d %f ms\n",
rep, device, key, p, v)
=======
Printf.@printf(io, "%s %s %d %f ms\n", device, key, p, v)
>>>>>>> master
end
end

device = "gpu"
for (key, values) in gpu_benchmarks
for (p, v) in zip(npoint_arr, values)
<<<<<<< HEAD
Printf.@printf(io, "%s %s %s %d %f ms\n",
rep, device, key, p, v)
=======
Printf.@printf(io, "%s %s %d %f ms\n", device, key, p, v)
>>>>>>> master
end
end
end
Expand Down
6 changes: 3 additions & 3 deletions benchmarks/triangle_mesh.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,13 @@ using Flux3D, BenchmarkTools
# mesh = load_trimesh(joinpath(@__DIR__, "../test/meshes/teapot.obj"))
mesh = load_trimesh("./test/meshes/teapot.obj")

trail = @benchmark $get_edges_packed($mesh; refresh=true)
trail = @benchmark $get_edges_packed($mesh; refresh = true)
minimum(trail.times) * 1.0e-6

trail = @benchmark $get_faces_to_edges_packed($mesh; refresh=true)
trail = @benchmark $get_faces_to_edges_packed($mesh; refresh = true)
minimum(trail.times) * 1.0e-6

trail = @benchmark $get_laplacian_packed($mesh; refresh=true)
trail = @benchmark $get_laplacian_packed($mesh; refresh = true)
minimum(trail.times) * 1.0e-6

trail = @benchmark $compute_verts_normals_packed($mesh)
Expand Down
3 changes: 0 additions & 3 deletions bors.toml

This file was deleted.

24 changes: 10 additions & 14 deletions docs/make.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,20 +2,16 @@ using Flux3D
using Documenter

makedocs(;
modules=[Flux3D],
authors="Nirmal P. Suthar <nirmalps@iitk.ac.in>",
repo="https://github.com/nirmal-suthar/Flux3D.jl/blob/{commit}{path}#L{line}",
sitename="Flux3D.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://nirmal-suthar.github.io/Flux3D.jl",
assets=String[],
modules = [Flux3D],
authors = "Nirmal P. Suthar <nirmalps@iitk.ac.in>",
repo = "https://github.com/nirmal-suthar/Flux3D.jl/blob/{commit}{path}#L{line}",
sitename = "Flux3D.jl",
format = Documenter.HTML(;
prettyurls = get(ENV, "CI", "false") == "true",
canonical = "https://nirmal-suthar.github.io/Flux3D.jl",
assets = String[],
),
pages=[
"Home" => "index.md",
],
pages = ["Home" => "index.md"],
)

deploydocs(;
repo="github.com/nirmal-suthar/Flux3D.jl.git",
)
deploydocs(; repo = "github.com/nirmal-suthar/Flux3D.jl.git")
70 changes: 47 additions & 23 deletions examples/dgcnn_classification.jl
Original file line number Diff line number Diff line change
Expand Up @@ -25,33 +25,56 @@ end
function get_processed_data(args)
# Fetching the train and validation data and getting them into proper shape
if args.num_classes == 10
dset = ModelNet10.dataset(;mode=:pointcloud, npoints=args.npoints, transform=NormalizePointCloud())
dset = ModelNet10.dataset(;
mode = :pointcloud,
npoints = args.npoints,
transform = NormalizePointCloud(),
)
elseif args.num_classes == 40
dset = ModelNet40.dataset(;mode=:pointcloud, npoints=args.npoints, transform=NormalizePointCloud())
dset = ModelNet40.dataset(;
mode = :pointcloud,
npoints = args.npoints,
transform = NormalizePointCloud(),
)
else
error("ModelNet dataset with $(args.num_classes) is not supported.
Currently supported num_classes for ModelNet dataset is {10,40}")
end

data = [dset[i].data.points for i in 1:length(dset)]
labels = onehotbatch([dset[i].ground_truth for i in 1:length(dset)],1:args.num_classes)
data = [dset[i].data.points for i = 1:length(dset)]
labels = onehotbatch([dset[i].ground_truth for i = 1:length(dset)], 1:args.num_classes)

#onehot encode labels of batch
train = [(cat(data[i]..., dims = 3), labels[:,i]) for i in partition(1:length(data), args.batch_size)] .|> args.device

train =
[
(cat(data[i]..., dims = 3), labels[:, i])
for i in partition(1:length(data), args.batch_size)
] .|> args.device

if args.num_classes == 10
VAL = ModelNet10.dataset(;mode=:pointcloud, train=false, npoints=args.npoints, transform=NormalizePointCloud())
VAL = ModelNet10.dataset(;
mode = :pointcloud,
train = false,
npoints = args.npoints,
transform = NormalizePointCloud(),
)
elseif args.num_classes == 40
VAL = ModelNet40.dataset(;mode=:pointcloud, train=false, npoints=args.npoints, transform=NormalizePointCloud())
VAL = ModelNet40.dataset(;
mode = :pointcloud,
train = false,
npoints = args.npoints,
transform = NormalizePointCloud(),
)
else
error("ModelNet dataset with $(args.num_classes) is not supported.
Currently supported num_classes for ModelNet dataset is {10,40}")
end

valX = cat([VAL[i].data.points for i in 1:length(VAL)]..., dims=3) |> args.device
valY = onehotbatch([VAL[i].ground_truth for i in 1:length(VAL)], 1:args.num_classes) |> args.device
valX = cat([VAL[i].data.points for i = 1:length(VAL)]..., dims = 3) |> args.device
valY =
onehotbatch([VAL[i].ground_truth for i = 1:length(VAL)], 1:args.num_classes) |> args.device

val = (valX,valY)
val = (valX, valY)
return train, val
end

Expand All @@ -67,17 +90,18 @@ function train(; kwargs...)
args.device = cpu
@info("Training on CPU")
end

@info("Loading Data...")
# Load the train, validation data
train,val = get_processed_data(args)
train, val = get_processed_data(args)

@info("Initializing Model...")
@info("Initializing Model...")
# Defining the loss and accuracy functions
m = DGCNN(args.num_classes, args.K, args.npoints) |> args.device

loss(x, y) = crossentropy(m(x), y)
accuracy(x, y) = mean(onecold(cpu(m(x)), 1:args.num_classes) .== onecold(cpu(y), 1:args.num_classes))
accuracy(x, y) =
mean(onecold(cpu(m(x)), 1:args.num_classes) .== onecold(cpu(y), 1:args.num_classes))

## Training
opt = ADAM(args.lr)
Expand All @@ -90,18 +114,18 @@ end

function custom_train!(loss, ps, data, opt, epochs, (valX, valY), accuracy)
ps = Zygote.Params(ps)
for epoch in 1:epochs
for epoch = 1:epochs
running_loss = 0
for d in data
gs = gradient(ps) do
training_loss = loss(d...)
running_loss += training_loss
return training_loss
end
Flux.update!(opt, ps, gs)
gs = gradient(ps) do
training_loss = loss(d...)
running_loss += training_loss
return training_loss
end
Flux.update!(opt, ps, gs)
end
print("Epoch: $(epoch), epoch_loss: $(running_loss), accuracy: $(accuracy(valX, valY))\n")
end
end

m = train()
m = train()
Loading

0 comments on commit 2560f1c

Please sign in to comment.