Skip to content

Commit

Permalink
Use rapids-logger to generate the cuml logger (#6162)
Browse files Browse the repository at this point in the history
This PR replaces cuml's logger implementation with one generated using https://github.com/rapidsai/rapids-logger. This approach allows us to centralize the logger definition across different RAPIDS projects while allowing each project to vendor its own copy with a suitable set of macros and default logger objects. The common logger also takes care of handling the more complex packaging problems around ensuring that we fully isolate our spdlog dependency and do not leak any of its symbols, allowing our libraries to be safely installed in a much broader set of environments.

This PR requires rapidsai/rapids-logger#1

Contributes to rapidsai/build-planning#104

Authors:
  - Vyas Ramasubramani (https://github.com/vyasr)
  - William Hicks (https://github.com/wphicks)

Approvers:
  - Dante Gama Dessavre (https://github.com/dantegd)

URL: #6162
  • Loading branch information
vyasr authored Jan 3, 2025
1 parent 91496b3 commit 51e6851
Show file tree
Hide file tree
Showing 83 changed files with 672 additions and 891 deletions.
28 changes: 25 additions & 3 deletions cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#=============================================================================
# Copyright (c) 2018-2024, NVIDIA CORPORATION.
# Copyright (c) 2018-2025, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -109,6 +109,17 @@ set(RMM_LOGGING_LEVEL "INFO" CACHE STRING "Choose the logging level.")
set_property(CACHE RMM_LOGGING_LEVEL PROPERTY STRINGS "TRACE" "DEBUG" "INFO" "WARN" "ERROR" "CRITICAL" "OFF")
message(VERBOSE "CUML_CPP: RMM_LOGGING_LEVEL = '${RMM_LOGGING_LEVEL}'.")

# Set logging level
set(LIBCUML_LOGGING_LEVEL
"DEBUG"
CACHE STRING "Choose the logging level."
)
set_property(
CACHE LIBCUML_LOGGING_LEVEL PROPERTY STRINGS "TRACE" "DEBUG" "INFO" "WARN" "ERROR" "CRITICAL"
"OFF"
)
message(VERBOSE "CUML: LIBCUML_LOGGING_LEVEL = '${LIBCUML_LOGGING_LEVEL}'.")

if(BUILD_CUML_TESTS OR BUILD_PRIMS_TESTS)
# Needed because GoogleBenchmark changes the state of FindThreads.cmake, causing subsequent runs to
# have different values for the `Threads::Threads` target. Setting this flag ensures
Expand Down Expand Up @@ -220,6 +231,15 @@ endif()
rapids_cpm_init()
rapids_cmake_install_lib_dir(lib_dir)

# Not using rapids-cmake since we never want to find, always download.
CPMAddPackage(
NAME rapids_logger GITHUB_REPOSITORY rapidsai/rapids-logger GIT_SHALLOW FALSE GIT_TAG
4df3ee70c6746fd1b6c0dc14209dae2e2d4378c6 VERSION 4df3ee70c6746fd1b6c0dc14209dae2e2d4378c6
)
rapids_make_logger(
ML EXPORT_SET cuml-exports LOGGER_HEADER_DIR include/cuml/common/ LOGGER_MACRO_PREFIX CUML LOGGER_TARGET cuml_logger
)

if(BUILD_CUML_TESTS OR BUILD_PRIMS_TESTS)
find_package(Threads)
endif()
Expand Down Expand Up @@ -291,8 +311,7 @@ if(BUILD_CUML_CPP_LIBRARY)

# single GPU components
# common components
add_library(${CUML_CPP_TARGET}
src/common/logger.cpp)
add_library(${CUML_CPP_TARGET})
if (CUML_ENABLE_GPU)
target_compile_definitions(${CUML_CPP_TARGET} PUBLIC CUML_ENABLE_GPU)
endif()
Expand Down Expand Up @@ -564,6 +583,7 @@ if(BUILD_CUML_CPP_LIBRARY)
PRIVATE "$<$<COMPILE_LANGUAGE:CXX>:${CUML_CXX_FLAGS}>"
"$<$<COMPILE_LANGUAGE:CUDA>:${CUML_CUDA_FLAGS}>"
)
target_compile_definitions(${CUML_CPP_TARGET} PUBLIC "CUML_LOG_ACTIVE_LEVEL=CUML_LOG_LEVEL_${LIBCUML_LOGGING_LEVEL}")

target_include_directories(${CUML_CPP_TARGET}
PUBLIC
Expand Down Expand Up @@ -604,6 +624,7 @@ if(BUILD_CUML_CPP_LIBRARY)
raft::raft
rmm::rmm_logger_impl
raft::raft_logger_impl
cuml_logger_impl
$<TARGET_NAME_IF_EXISTS:GPUTreeShap::GPUTreeShap>
$<$<BOOL:${LINK_CUFFT}>:CUDA::cufft${_ctk_fft_static_suffix}>
${TREELITE_LIBS}
Expand All @@ -630,6 +651,7 @@ if(BUILD_CUML_CPP_LIBRARY)
target_link_libraries(${CUML_CPP_TARGET}
PUBLIC rmm::rmm rmm::rmm_logger ${CUVS_LIB}
${_cuml_cpp_public_libs}
cuml_logger
PRIVATE ${_cuml_cpp_private_libs}
)

Expand Down
7 changes: 4 additions & 3 deletions cpp/bench/sg/svc.cu
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2024, NVIDIA CORPORATION.
* Copyright (c) 2020-2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -100,8 +100,9 @@ std::vector<SvcParams<D>> getInputs()
p.blobs.seed = 12345ULL;

// SvmParameter{C, cache_size, max_iter, nochange_steps, tol, verbosity})
p.svm_param = ML::SVM::SvmParameter{1, 200, 100, 100, 1e-3, CUML_LEVEL_INFO, 0, ML::SVM::C_SVC};
p.model = ML::SVM::SvmModel<D>{0, 0, 0, nullptr, {}, nullptr, 0, nullptr};
p.svm_param =
ML::SVM::SvmParameter{1, 200, 100, 100, 1e-3, ML::level_enum::info, 0, ML::SVM::C_SVC};
p.model = ML::SVM::SvmModel<D>{0, 0, 0, nullptr, {}, nullptr, 0, nullptr};

std::vector<Triplets> rowcols = {{50000, 2, 2}, {2048, 100000, 2}, {50000, 1000, 2}};

Expand Down
4 changes: 2 additions & 2 deletions cpp/bench/sg/svr.cu
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2024, NVIDIA CORPORATION.
* Copyright (c) 2020-2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -102,7 +102,7 @@ std::vector<SvrParams<D>> getInputs()
// SvmParameter{C, cache_size, max_iter, nochange_steps, tol, verbosity,
// epsilon, svmType})
p.svm_param =
ML::SVM::SvmParameter{1, 200, 200, 100, 1e-3, CUML_LEVEL_INFO, 0.1, ML::SVM::EPSILON_SVR};
ML::SVM::SvmParameter{1, 200, 200, 100, 1e-3, ML::level_enum::info, 0.1, ML::SVM::EPSILON_SVR};
p.model = new ML::SVM::SvmModel<D>{0, 0, 0, 0};

std::vector<Triplets> rowcols = {{50000, 2, 2}, {1024, 10000, 10}, {3000, 200, 200}};
Expand Down
4 changes: 2 additions & 2 deletions cpp/examples/dbscan/dbscan_example.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2024, NVIDIA CORPORATION.
* Copyright (c) 2019-2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -209,7 +209,7 @@ int main(int argc, char* argv[])
nullptr,
max_bytes_per_batch,
ML::Dbscan::EpsNnMethod::BRUTE_FORCE,
false);
ML::level_enum::off);
CUDA_RT_CALL(cudaMemcpyAsync(
h_labels.data(), d_labels, nRows * sizeof(int), cudaMemcpyDeviceToHost, stream));
CUDA_RT_CALL(cudaStreamSynchronize(stream));
Expand Down
12 changes: 6 additions & 6 deletions cpp/include/cuml/cluster/dbscan.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2024, NVIDIA CORPORATION.
* Copyright (c) 2018-2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -16,7 +16,7 @@

#pragma once

#include <cuml/common/log_levels.hpp>
#include <cuml/common/logger.hpp>

#include <cuvs/distance/distance.hpp>

Expand Down Expand Up @@ -73,7 +73,7 @@ void fit(const raft::handle_t& handle,
float* sample_weight = nullptr,
size_t max_bytes_per_batch = 0,
EpsNnMethod eps_nn_method = BRUTE_FORCE,
int verbosity = CUML_LEVEL_INFO,
level_enum verbosity = ML::level_enum::info,
bool opg = false);
void fit(const raft::handle_t& handle,
double* input,
Expand All @@ -87,7 +87,7 @@ void fit(const raft::handle_t& handle,
double* sample_weight = nullptr,
size_t max_bytes_per_batch = 0,
EpsNnMethod eps_nn_method = BRUTE_FORCE,
int verbosity = CUML_LEVEL_INFO,
level_enum verbosity = ML::level_enum::info,
bool opg = false);

void fit(const raft::handle_t& handle,
Expand All @@ -102,7 +102,7 @@ void fit(const raft::handle_t& handle,
float* sample_weight = nullptr,
size_t max_bytes_per_batch = 0,
EpsNnMethod eps_nn_method = BRUTE_FORCE,
int verbosity = CUML_LEVEL_INFO,
level_enum verbosity = ML::level_enum::info,
bool opg = false);
void fit(const raft::handle_t& handle,
double* input,
Expand All @@ -116,7 +116,7 @@ void fit(const raft::handle_t& handle,
double* sample_weight = nullptr,
size_t max_bytes_per_batch = 0,
EpsNnMethod eps_nn_method = BRUTE_FORCE,
int verbosity = CUML_LEVEL_INFO,
level_enum verbosity = ML::level_enum::info,
bool opg = false);

/** @} */
Expand Down
4 changes: 1 addition & 3 deletions cpp/include/cuml/cluster/kmeans.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2024, NVIDIA CORPORATION.
* Copyright (c) 2019-2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -16,8 +16,6 @@

#pragma once

#include <cuml/common/log_levels.hpp>

#include <cuvs/cluster/kmeans.hpp>

namespace raft {
Expand Down
37 changes: 0 additions & 37 deletions cpp/include/cuml/common/log_levels.hpp

This file was deleted.

Loading

0 comments on commit 51e6851

Please sign in to comment.