Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Moving more ling prims to raft #4567

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
36 commits
Select commit Hold shift + click to select a range
221d87c
Pointing to custom RAFT
vinaydes Feb 1, 2022
6843958
Doing the modifications needed to work with new RAFT RNG
vinaydes Feb 1, 2022
2d8f486
cuml updates for lap, label, cluster, and spectral apis
cjnolet Feb 1, 2022
5902b07
Updating raft pin
cjnolet Feb 1, 2022
fc41d8f
Updating copyrights
cjnolet Feb 1, 2022
f5acca5
Merge branch 'branch-22.04' into for-raft-rng-improvements
vinaydes Feb 2, 2022
9a7633b
Converting to ceil division
vinaydes Feb 3, 2022
d4c2dd1
Fixing the issue with RF text dump test
vinaydes Feb 3, 2022
fc58e51
Increasing the number of SVD components to make the test work with ot…
vinaydes Feb 3, 2022
a34953a
Fixing SVD tests by introducing some redundancy
vinaydes Feb 7, 2022
ddf80fb
Fixing format
vinaydes Feb 7, 2022
4e740d5
Merge remote-tracking branch 'rapidsai/branch-22.04' into imp-2204-la…
cjnolet Feb 8, 2022
ec44e7c
Fixing raft::spasrse calls
cjnolet Feb 9, 2022
bb3fad4
Reverting raft pin
cjnolet Feb 9, 2022
10ce2a6
Merge branch 'branch-22.04' into for-raft-rng-improvements
vinaydes Feb 9, 2022
6b7217b
Merge remote-tracking branch 'cjnolet/imp-2204-lap_label_cluster_spec…
vinaydes Feb 9, 2022
50b6d99
Fixing the random number generator initialization
vinaydes Feb 9, 2022
91b6f89
Update RAFT commit id
vinaydes Feb 9, 2022
c609f7a
Merge branch 'branch-22.04' into for-raft-rng-improvements
cjnolet Feb 9, 2022
898c3a7
Merge branch 'branch-22.04' into for-raft-rng-improvements
cjnolet Feb 9, 2022
7d98f03
Moving more linalg prims to raft
cjnolet Feb 9, 2022
ea6bb6d
Updating raft pin
cjnolet Feb 9, 2022
e705441
Updating style
cjnolet Feb 9, 2022
9d3b82f
Merge branch 'branch-22.04' into imp-2204-moving_remaining_linalg
cjnolet Feb 10, 2022
64b552e
using proper raft pi
cjnolet Feb 10, 2022
37802f9
more prims
cjnolet Feb 10, 2022
12c0057
Ols
cjnolet Feb 10, 2022
eba1382
Updates
cjnolet Feb 10, 2022
b95d203
More updates
cjnolet Feb 10, 2022
92e6929
Raising threshold for the time being
cjnolet Feb 10, 2022
fa872d6
Updating copyright
cjnolet Feb 10, 2022
510626f
Reverting raft pin
cjnolet Feb 10, 2022
4ea4d06
Merge remote-tracking branch 'rapidsai/branch-22.04' into imp-2204-mo…
cjnolet Feb 10, 2022
f66bd5f
Fixing style
cjnolet Feb 11, 2022
85da579
Reverting raft pin
cjnolet Feb 11, 2022
d127221
Merge branch 'for-raft-rng-improvements' into imp-2204-moving_remaini…
cjnolet Feb 11, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 9 additions & 19 deletions cpp/bench/prims/rng.cu
Original file line number Diff line number Diff line change
Expand Up @@ -89,25 +89,15 @@ static std::vector<Params<T>> getInputs()
{32 * 1024 * 1024 + 1, RNG_Uniform, GenPhilox, T(-1.0), T(1.0)},
{1024 * 1024 * 1024 + 1, RNG_Uniform, GenPhilox, T(-1.0), T(1.0)},

{1024 * 1024, RNG_Uniform, GenTaps, T(-1.0), T(1.0)},
{32 * 1024 * 1024, RNG_Uniform, GenTaps, T(-1.0), T(1.0)},
{1024 * 1024 * 1024, RNG_Uniform, GenTaps, T(-1.0), T(1.0)},
{1024 * 1024 + 2, RNG_Uniform, GenTaps, T(-1.0), T(1.0)},
{32 * 1024 * 1024 + 2, RNG_Uniform, GenTaps, T(-1.0), T(1.0)},
{1024 * 1024 * 1024 + 2, RNG_Uniform, GenTaps, T(-1.0), T(1.0)},
{1024 * 1024 + 1, RNG_Uniform, GenTaps, T(-1.0), T(1.0)},
{32 * 1024 * 1024 + 1, RNG_Uniform, GenTaps, T(-1.0), T(1.0)},
{1024 * 1024 * 1024 + 1, RNG_Uniform, GenTaps, T(-1.0), T(1.0)},

{1024 * 1024, RNG_Uniform, GenKiss99, T(-1.0), T(1.0)},
{32 * 1024 * 1024, RNG_Uniform, GenKiss99, T(-1.0), T(1.0)},
{1024 * 1024 * 1024, RNG_Uniform, GenKiss99, T(-1.0), T(1.0)},
{1024 * 1024 + 2, RNG_Uniform, GenKiss99, T(-1.0), T(1.0)},
{32 * 1024 * 1024 + 2, RNG_Uniform, GenKiss99, T(-1.0), T(1.0)},
{1024 * 1024 * 1024 + 2, RNG_Uniform, GenKiss99, T(-1.0), T(1.0)},
{1024 * 1024 + 1, RNG_Uniform, GenKiss99, T(-1.0), T(1.0)},
{32 * 1024 * 1024 + 1, RNG_Uniform, GenKiss99, T(-1.0), T(1.0)},
{1024 * 1024 * 1024 + 1, RNG_Uniform, GenKiss99, T(-1.0), T(1.0)},
{1024 * 1024, RNG_Uniform, GenPC, T(-1.0), T(1.0)},
{32 * 1024 * 1024, RNG_Uniform, GenPC, T(-1.0), T(1.0)},
{1024 * 1024 * 1024, RNG_Uniform, GenPC, T(-1.0), T(1.0)},
{1024 * 1024 + 2, RNG_Uniform, GenPC, T(-1.0), T(1.0)},
{32 * 1024 * 1024 + 2, RNG_Uniform, GenPC, T(-1.0), T(1.0)},
{1024 * 1024 * 1024 + 2, RNG_Uniform, GenPC, T(-1.0), T(1.0)},
{1024 * 1024 + 1, RNG_Uniform, GenPC, T(-1.0), T(1.0)},
{32 * 1024 * 1024 + 1, RNG_Uniform, GenPC, T(-1.0), T(1.0)},
{1024 * 1024 * 1024 + 1, RNG_Uniform, GenPC, T(-1.0), T(1.0)},

{1024 * 1024, RNG_Fill, GenPhilox, T(-1.0), T(1.0)},
{32 * 1024 * 1024, RNG_Fill, GenPhilox, T(-1.0), T(1.0)},
Expand Down
2 changes: 0 additions & 2 deletions cpp/src/glm/glm.cu
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,6 @@ class handle_t;
namespace ML {
namespace GLM {

using namespace MLCommon;

void olsFit(const raft::handle_t& handle,
float* input,
int n_rows,
Expand Down
14 changes: 7 additions & 7 deletions cpp/src/glm/ols.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@

#pragma once

#include <linalg/lstsq.cuh>
#include <raft/linalg/add.hpp>
#include <raft/linalg/gemv.hpp>
#include <raft/linalg/lstsq.hpp>
#include <raft/linalg/norm.hpp>
#include <raft/linalg/subtract.hpp>
#include <raft/matrix/math.hpp>
Expand All @@ -34,8 +34,6 @@
namespace ML {
namespace GLM {

using namespace MLCommon;

/**
* @brief fit an ordinary least squares model
* @param handle cuml handle
Expand Down Expand Up @@ -97,10 +95,12 @@ void olsFit(const raft::handle_t& handle,

raft::common::nvtx::push_range("ML::GLM::olsFit/algo-%d", selectedAlgo);
switch (selectedAlgo) {
case 0: LinAlg::lstsqSvdJacobi(handle, input, n_rows, n_cols, labels, coef, stream); break;
case 1: LinAlg::lstsqEig(handle, input, n_rows, n_cols, labels, coef, stream); break;
case 2: LinAlg::lstsqQR(handle, input, n_rows, n_cols, labels, coef, stream); break;
case 3: LinAlg::lstsqSvdQR(handle, input, n_rows, n_cols, labels, coef, stream); break;
case 0:
raft::linalg::lstsqSvdJacobi(handle, input, n_rows, n_cols, labels, coef, stream);
break;
case 1: raft::linalg::lstsqEig(handle, input, n_rows, n_cols, labels, coef, stream); break;
case 2: raft::linalg::lstsqQR(handle, input, n_rows, n_cols, labels, coef, stream); break;
case 3: raft::linalg::lstsqSvdQR(handle, input, n_rows, n_cols, labels, coef, stream); break;
default:
ASSERT(false, "olsFit: no algorithm with this id (%d) has been implemented", algo);
break;
Expand Down
2 changes: 0 additions & 2 deletions cpp/src/glm/preprocess.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,6 @@
namespace ML {
namespace GLM {

using namespace MLCommon;

/**
* @brief Center and scale the data, depending on the flags fit_intercept and normalize
*
Expand Down
4 changes: 2 additions & 2 deletions cpp/src/glm/qn/simple_mat/dense.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,11 @@
#include <vector>

#include "base.hpp"
#include <linalg/ternary_op.cuh>
#include <raft/cuda_utils.cuh>
#include <raft/cudart_utils.h>
#include <raft/handle.hpp>
#include <raft/linalg/add.hpp>
#include <raft/linalg/ternary_op.cuh>
// #TODO: Replace with public header when ready
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/linalg/map_then_reduce.hpp>
Expand Down Expand Up @@ -209,7 +209,7 @@ struct SimpleDenseMat : SimpleMat<T> {
ASSERT(ord == other2.ord, "SimpleDenseMat::assign_ternary: Storage orders must match");
ASSERT(ord == other3.ord, "SimpleDenseMat::assign_ternary: Storage orders must match");

MLCommon::LinAlg::ternaryOp(data, other1.data, other2.data, other3.data, len, f, stream);
raft::linalg::ternaryOp(data, other1.data, other2.data, other3.data, len, f, stream);
}

inline void fill(const T val, cudaStream_t stream)
Expand Down
2 changes: 1 addition & 1 deletion cpp/src/glm/qn/simple_mat/sparse.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,10 @@
#include <vector>

#include "base.hpp"
#include <linalg/ternary_op.cuh>
#include <raft/cuda_utils.cuh>
#include <raft/cudart_utils.h>
#include <raft/handle.hpp>
#include <raft/linalg/ternary_op.cuh>

#include <raft/linalg/add.hpp>
#include <raft/linalg/map_then_reduce.hpp>
Expand Down
2 changes: 0 additions & 2 deletions cpp/src/glm/ridge.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,6 @@
namespace ML {
namespace GLM {

using namespace MLCommon;

template <typename math_t>
void ridgeSolve(const raft::handle_t& handle,
math_t* S,
Expand Down
8 changes: 2 additions & 6 deletions cpp/src/kmeans/common.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@

#include <common/tensor.hpp>

#include <linalg/reduce_cols_by_key.cuh>
#include <linalg/reduce_rows_by_key.cuh>
#include <matrix/gather.cuh>
#include <raft/linalg/reduce_cols_by_key.cuh>
#include <raft/linalg/reduce_rows_by_key.cuh>
#include <random/permute.cuh>

#include <raft/comms/comms.hpp>
Expand All @@ -40,7 +40,6 @@
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>

#include <random/permute.cuh>
#include <random>

#include <thrust/equal.h>
Expand All @@ -55,9 +54,6 @@
#include <cuml/cluster/kmeans_mg.hpp>
#include <cuml/common/logger.hpp>
#include <cuml/metrics/metrics.hpp>
#include <linalg/reduce_cols_by_key.cuh>
#include <linalg/reduce_rows_by_key.cuh>
#include <matrix/gather.cuh>

#include <fstream>
#include <numeric>
Expand Down
22 changes: 11 additions & 11 deletions cpp/src/kmeans/kmeans_mg_impl.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -526,19 +526,19 @@ void fit(const raft::handle_t& handle,

// Calculates weighted sum of all the samples assigned to cluster-i and
// store the result in newCentroids[i]
MLCommon::LinAlg::reduce_rows_by_key(X.data(),
X.getSize(1),
itr,
weight.data(),
workspace.data(),
X.getSize(0),
X.getSize(1),
n_clusters,
newCentroids.data(),
stream);
raft::linalg::reduce_rows_by_key(X.data(),
X.getSize(1),
itr,
weight.data(),
workspace.data(),
X.getSize(0),
X.getSize(1),
n_clusters,
newCentroids.data(),
stream);

// Reduce weights by key to compute weight in each cluster
MLCommon::LinAlg::reduce_cols_by_key(
raft::linalg::reduce_cols_by_key(
weight.data(), itr, wtInCluster.data(), 1, weight.getSize(0), n_clusters, stream);

// merge the local histogram from all ranks
Expand Down
22 changes: 11 additions & 11 deletions cpp/src/kmeans/sg_impl.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -135,19 +135,19 @@ void fit(const raft::handle_t& handle,

// Calculates weighted sum of all the samples assigned to cluster-i and store the
// result in newCentroids[i]
MLCommon::LinAlg::reduce_rows_by_key(X.data(),
X.getSize(1),
itr,
weight.data(),
workspace.data(),
X.getSize(0),
X.getSize(1),
n_clusters,
newCentroids.data(),
stream);
raft::linalg::reduce_rows_by_key(X.data(),
X.getSize(1),
itr,
weight.data(),
workspace.data(),
X.getSize(0),
X.getSize(1),
n_clusters,
newCentroids.data(),
stream);

// Reduce weights by key to compute weight in each cluster
MLCommon::LinAlg::reduce_cols_by_key(
raft::linalg::reduce_cols_by_key(
weight.data(), itr, wtInCluster.data(), 1, weight.getSize(0), n_clusters, stream);

// Computes newCentroids[i] = newCentroids[i]/wtInCluster[i] where
Expand Down
2 changes: 1 addition & 1 deletion cpp/src/randomforest/randomforest.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ class RandomForest {
auto rs = DT::fnv1a32_basis;
rs = DT::fnv1a32(rs, rf_params.seed);
rs = DT::fnv1a32(rs, tree_id);
raft::random::Rng rng(rs, raft::random::GeneratorType::GenKiss99);
raft::random::Rng rng(rs, raft::random::GenPhilox);
if (rf_params.bootstrap) {
// Use bootstrapped sample set
rng.uniformInt<int>(selected_rows->data(), selected_rows->size(), 0, n_rows, stream);
Expand Down
2 changes: 0 additions & 2 deletions cpp/src/tsvd/tsvd.cu
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,6 @@

namespace ML {

using namespace MLCommon;

void tsvdFit(raft::handle_t& handle,
float* input,
float* components,
Expand Down
36 changes: 17 additions & 19 deletions cpp/src/tsvd/tsvd.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,13 @@
#pragma once

#include <cuml/decomposition/params.hpp>
#include <linalg/rsvd.cuh>
#include <raft/cudart_utils.h>
#include <raft/handle.hpp>
#include <raft/linalg/add.hpp>
#include <raft/linalg/eig.hpp>
#include <raft/linalg/eltwise.hpp>
#include <raft/linalg/gemm.hpp>
#include <raft/linalg/rsvd.cuh>
#include <raft/linalg/transpose.hpp>
#include <raft/matrix/math.hpp>
#include <raft/matrix/matrix.hpp>
Expand All @@ -38,8 +38,6 @@

namespace ML {

using namespace MLCommon;

template <typename math_t>
void calCompExpVarsSvd(const raft::handle_t& handle,
math_t* in,
Expand Down Expand Up @@ -69,22 +67,22 @@ void calCompExpVarsSvd(const raft::handle_t& handle,

rmm::device_uvector<math_t> components_temp(prms.n_cols * prms.n_components, stream);
math_t* left_eigvec = nullptr;
LinAlg::rsvdFixedRank(handle,
in,
prms.n_rows,
prms.n_cols,
singular_vals,
left_eigvec,
components_temp.data(),
prms.n_components,
p,
true,
false,
true,
false,
(math_t)prms.tol,
prms.n_iterations,
stream);
raft::linalg::rsvdFixedRank(handle,
in,
prms.n_rows,
prms.n_cols,
singular_vals,
left_eigvec,
components_temp.data(),
prms.n_components,
p,
true,
false,
true,
false,
(math_t)prms.tol,
prms.n_iterations,
stream);

raft::linalg::transpose(
handle, components_temp.data(), components, prms.n_cols, prms.n_components, stream);
Expand Down
2 changes: 1 addition & 1 deletion cpp/src/umap/optimize.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,11 @@
#include <cuml/common/logger.hpp>
#include <cuml/manifold/umapparams.h>

#include <linalg/power.cuh>
#include <raft/cudart_utils.h>
#include <raft/linalg/add.hpp>
#include <raft/linalg/eltwise.hpp>
#include <raft/linalg/multiply.hpp>
#include <raft/linalg/power.cuh>
#include <raft/linalg/unary_op.hpp>
#include <raft/matrix/math.hpp>
#include <raft/stats/mean.hpp>
Expand Down
Loading