vendor/knncolle/Kmknn/Kmknn.hpp in umappp-0.1.6 vs vendor/knncolle/Kmknn/Kmknn.hpp in umappp-0.2.0
- old
+ new
@@ -14,10 +14,16 @@
#ifdef DEBUG
#include <iostream>
#endif
+#ifndef KMEANS_CUSTOM_PARALLEL
+#ifdef KNNCOLLE_CUSTOM_PARALLEL
+#define KMEANS_CUSTOM_PARALLEL KNNCOLLE_CUSTOM_PARALLEL
+#endif
+#endif
+
/**
* @file Kmknn.hpp
*
* @brief Implements the k-means with k-nearest neighbors (KMKNN) algorithm.
*/
@@ -73,15 +79,16 @@
* @param nobs Number of observations.
* @param vals Pointer to an array of length `ndim * nobs`, corresponding to a dimension-by-observation matrix in column-major format,
* i.e., contiguous elements belong to the same observation.
* @param power Power of `nobs` to define the number of cluster centers.
* By default, a square root is performed.
+ * @param nthreads Number of threads to use for the k-means clustering.
*
* @tparam INPUT_t Floating-point type of the input data.
*/
template<typename INPUT_t>
- Kmknn(INDEX_t ndim, INDEX_t nobs, const INPUT_t* vals, double power = 0.5) :
+ Kmknn(INDEX_t ndim, INDEX_t nobs, const INPUT_t* vals, double power = 0.5, int nthreads = 1) :
num_dim(ndim),
num_obs(nobs),
data(ndim * nobs),
sizes(std::ceil(std::pow(num_obs, power))),
offsets(sizes.size()),
@@ -101,9 +108,12 @@
host = vals;
} else {
std::copy(vals, vals + data.size(), data.data());
host = data.data();
}
+
+ kmeans::Kmeans<INTERNAL_t, int> krunner;
+ krunner.set_num_threads(nthreads);
auto output = kmeans::Kmeans<INTERNAL_t, int>().run(ndim, nobs, host, ncenters, centers.data(), clusters.data());
std::swap(sizes, output.sizes);
// In case there were some duplicate points, we just resize this a bit.
if (ncenters != sizes.size()) {