diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 0000000..ddff440 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,2 @@ +[build] +rustflags = ["-C", "target-cpu=native"] diff --git a/Cargo.toml b/Cargo.toml index 574870d..b589cfe 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,14 +1,15 @@ [package] name = "ffsvm" -description="A libSVM compatible support vector machine, but up to 10x faster, for games or VR." -version = "0.10.1" +description = "A libSVM compatible support vector machine, but up to 10x faster, for games or VR." +version = "0.11.0" repository = "https://github.com/ralfbiedert/ffsvm-rust" authors = ["Ralf Biedert "] readme = "README.md" categories = ["science", "algorithms"] keywords = ["svm", "libsvm", "machine-learning"] license = "MIT" -edition = "2018" +edition = "2021" +rust-version = "1.83" exclude = [ "docs/*", ] @@ -16,11 +17,11 @@ exclude = [ [lib] name = "ffsvm" path = "src/lib.rs" -crate-type = [ "rlib" ] +crate-type = ["rlib"] [dependencies] -simd_aligned = "0.4" -#simd_aligned = { path = "../simd_aligned_rust" } +#simd_aligned = "0.5" +simd_aligned = { path = "../simd_aligned" } [dev-dependencies] rand = "0.8.5" diff --git a/README.md b/README.md index 86fd2b4..2f9107e 100644 --- a/README.md +++ b/README.md @@ -6,81 +6,89 @@ ## In One Sentence -You trained a SVM using [libSVM](https://github.com/cjlin1/libsvm), now you want the highest possible performance during (real-time) classification, like games or VR. - +You trained an SVM using [libSVM](https://github.com/cjlin1/libsvm), now you want the highest possible performance +during (real-time) classification, like games or VR. ## Highlights -* loads almost all [libSVM](https://github.com/cjlin1/libsvm) types (C-SVC, ν-SVC, ε-SVR, ν-SVR) and kernels (linear, poly, RBF and sigmoid) +* loads almost all [libSVM](https://github.com/cjlin1/libsvm) types (C-SVC, ν-SVC, ε-SVR, ν-SVR) and kernels (linear, + poly, RBF and sigmoid) * produces practically same classification results as libSVM -* optimized for [SIMD](https://github.com/rust-lang/rfcs/pull/2366) and can be mixed seamlessly with [Rayon](https://github.com/rayon-rs/rayon) +* optimized for [SIMD](https://github.com/rust-lang/rfcs/pull/2366) and can be mixed seamlessly + with [Rayon](https://github.com/rayon-rs/rayon) * written in 100% Rust * allocation-free during classification for dense SVMs * **2.5x - 14x faster than libSVM for dense SVMs** * extremely low classification times for small models (e.g., 128 SV, 16 dense attributes, linear ~ 500ns) * successfully used in **Unity and VR** projects (Windows & Android) - -Note: Currently **requires Rust nightly** (March 2019 and later), because we depend on RFC 2366 (portable SIMD). Once that stabilizes we'll also go stable. - +Note: Currently **requires Rust nightly** (March 2019 and later), because we depend on RFC 2366 (portable SIMD). Once +that stabilizes we'll also go stable. ## Usage -Train with [libSVM](https://github.com/cjlin1/libsvm) (e.g., using the tool `svm-train`), then classify with `ffsvm-rust`. +Train with [libSVM](https://github.com/cjlin1/libsvm) (e.g., using the tool `svm-train`), then classify with +`ffsvm-rust`. From Rust: ```rust // Replace `SAMPLE_MODEL` with a `&str` to your model. -let svm = DenseSVM::try_from(SAMPLE_MODEL)?; +let svm = DenseSVM::try_from(SAMPLE_MODEL) ?; -let mut problem = Problem::from(&svm); +let mut problem = Problem::from( & svm); let features = problem.features(); features[0] = 0.55838; -features[1] = -0.157895; +features[1] = - 0.157895; features[2] = 0.581292; -features[3] = -0.221184; +features[3] = - 0.221184; -svm.predict_value(&mut problem)?; +svm.predict_value( & mut problem) ?; assert_eq!(problem.solution(), Solution::Label(42)); ``` ## Status + * **March 10, 2023**: Reactivated for latest Rust nightly. * **June 7, 2019**: Gave up on 'no `unsafe`', but gained runtime SIMD selection. * **March 10, 2019**: As soon as we can move away from nightly we'll go beta. * **Aug 5, 2018**: Still in alpha, but finally on crates.io. * **May 27, 2018**: We're in alpha. Successfully used internally on Windows, Mac, Android and Linux -on various machines and devices. Once SIMD stabilizes and we can cross-compile to WASM -we'll move to beta. + on various machines and devices. Once SIMD stabilizes and we can cross-compile to WASM + we'll move to beta. * **December 16, 2017**: We're in pre-alpha. It will probably not even work on your machine. - ## Performance ![performance](https://raw.githubusercontent.com/ralfbiedert/ffsvm-rust/master/docs/performance_relative.v3.png) -All performance numbers reported for the `DenseSVM`. We also have support for `SparseSVM`s, which are slower for "mostly dense" models, and faster for "mostly sparse" models (and generally on the performance level of libSVM). +All performance numbers reported for the `DenseSVM`. We also have support for `SparseSVM`s, which are slower for "mostly +dense" models, and faster for "mostly sparse" models (and generally on the performance level of libSVM). [See here for details.](https://github.com/ralfbiedert/ffsvm-rust/blob/master/docs/performance.md) - #### Tips -* For an x-fold performance increase, create a number of `Problem` structures, and process them with [Rayon's](https://docs.rs/rayon/1.0.3/rayon/) `par_iter`. - +* For an x-fold performance increase, create a number of `Problem` structures, and process them + with [Rayon's](https://docs.rs/rayon/1.0.3/rayon/) `par_iter`. ## FAQ [See here for details.](https://github.com/ralfbiedert/ffsvm-rust/blob/master/docs/FAQ.md) [Latest Version]: https://img.shields.io/crates/v/ffsvm.svg + [crates.io]: https://crates.io/crates/ffsvm + [MIT]: https://img.shields.io/badge/license-MIT-blue.svg + [docs]: https://docs.rs/ffsvm/badge.svg + [docs.rs]: https://docs.rs/ffsvm/ + [deps]: https://deps.rs/repo/github/ralfbiedert/ffsvm-rust + [deps.svg]: https://deps.rs/repo/github/ralfbiedert/ffsvm-rust/status.svg diff --git a/benches/svm_dense.rs b/benches/svm_dense.rs index 1b6f2e9..e9522a4 100644 --- a/benches/svm_dense.rs +++ b/benches/svm_dense.rs @@ -9,7 +9,7 @@ mod util; mod svm_dense { use crate::test::Bencher; - use ffsvm::{DenseSVM, Predict, Problem}; + use ffsvm::{DenseSVM, FeatureVector, Predict}; use std::convert::TryFrom; /// Produces a test case run for benchmarking @@ -17,8 +17,8 @@ mod svm_dense { fn produce_testcase(svm_type: &str, kernel_type: &str, total_sv: u32, num_attributes: u32) -> impl FnMut() { let raw_model = super::util::random_dense(svm_type, kernel_type, total_sv, num_attributes); let svm = DenseSVM::try_from(&raw_model).unwrap(); - let mut problem = Problem::from(&svm); - let problem_mut = problem.features().as_slice_mut(); + let mut problem = FeatureVector::from(&svm); + let problem_mut = problem.features(); for i in 0 .. num_attributes { problem_mut[i as usize] = i as f32; @@ -70,5 +70,4 @@ mod svm_dense { #[bench] fn predict_sigmoid_sv1024_attr1024(b: &mut Bencher) { b.iter(produce_testcase("c_svc", "sigmoid", 1024, 1024)); } - } diff --git a/benches/svm_sparse.rs b/benches/svm_sparse.rs index 093e1e1..f53be60 100644 --- a/benches/svm_sparse.rs +++ b/benches/svm_sparse.rs @@ -9,7 +9,7 @@ mod util; mod svm_sparse { use crate::test::Bencher; - use ffsvm::{Predict, Problem, SparseSVM}; + use ffsvm::{Predict, FeatureVector, SparseSVM}; use std::convert::TryFrom; /// Produces a test case run for benchmarking @@ -17,7 +17,7 @@ mod svm_sparse { fn produce_testcase(svm_type: &str, kernel_type: &str, total_sv: u32, num_attributes: u32) -> impl FnMut() { let raw_model = super::util::random_dense(svm_type, kernel_type, total_sv, num_attributes); let svm = SparseSVM::try_from(&raw_model).unwrap(); - let mut problem = Problem::from(&svm); + let mut problem = FeatureVector::from(&svm); let problem_mut = problem.features(); for i in 0 .. num_attributes { diff --git a/benches/util.rs b/benches/util.rs index 56b92a0..f32dd97 100644 --- a/benches/util.rs +++ b/benches/util.rs @@ -4,31 +4,32 @@ use rand::Rng; pub fn random_dense<'b>(svm_type: &'b str, kernel_type: &'b str, total_sv: u32, attr: u32) -> ModelFile<'b> { let mut rng = rand::thread_rng(); - ModelFile { - header: Header { - svm_type, - kernel_type, - total_sv, - gamma: Some(rng.gen::()), - coef0: Some(rng.gen::()), - degree: Some(rng.gen_range(1..10)), - nr_class: 2, - rho: vec![rng.gen::()], - label: vec![0, 1], - prob_a: Some(vec![rng.gen::(), rng.gen::()]), - prob_b: Some(vec![rng.gen::(), rng.gen::()]), - nr_sv: vec![total_sv / 2, total_sv / 2], - }, - vectors: (0 .. total_sv) - .map(|_| SupportVector { - coefs: vec![rng.gen::()], - features: (0 .. attr) - .map(|i| Attribute { - index: i, - value: rng.gen::(), - }) - .collect(), - }) - .collect(), - } + let header = Header { + svm_type, + kernel_type, + total_sv, + gamma: Some(rng.gen::()), + coef0: Some(rng.gen::()), + degree: Some(rng.gen_range(1 .. 10)), + nr_class: 2, + rho: vec![rng.gen::()], + label: vec![0, 1], + prob_a: Some(vec![rng.gen::(), rng.gen::()]), + prob_b: Some(vec![rng.gen::(), rng.gen::()]), + nr_sv: vec![total_sv / 2, total_sv / 2], + }; + + let vectors = (0 .. total_sv) + .map(|_| SupportVector { + coefs: vec![rng.gen::()], + features: (0 .. attr) + .map(|i| Attribute { + index: i, + value: rng.gen::(), + }) + .collect(), + }) + .collect(); + + ModelFile::new(header, vectors) } diff --git a/examples/basic.rs b/examples/basic.rs index ec43633..4afe5d1 100644 --- a/examples/basic.rs +++ b/examples/basic.rs @@ -4,17 +4,17 @@ use std::convert::TryFrom; fn main() -> Result<(), Error> { let svm = DenseSVM::try_from(SAMPLE_MODEL)?; - let mut problem = Problem::from(&svm); - let features = problem.features(); + let mut fv = FeatureVector::from(&svm); + let features = fv.features(); features[0] = 0.558_382; features[1] = -0.157_895; features[2] = 0.581_292; features[3] = -0.221_184; - svm.predict_value(&mut problem)?; + svm.predict_value(&mut fv)?; - assert_eq!(problem.solution(), Solution::Label(42)); + assert_eq!(fv.label(), Label::Class(42)); Ok(()) } diff --git a/src/errors.rs b/src/errors.rs index a80e44f..483eef3 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -1,11 +1,9 @@ -use std::{ - num::{ParseFloatError, ParseIntError}, -}; +use std::num::{ParseFloatError, ParseIntError}; /// Possible error types when classifying with one of the SVMs. #[derive(Debug)] pub enum Error { - /// This can be emitted when creating a SVM from a [`ModelFile`](crate::ModelFile). For models generated by + /// This can be emitted when creating an SVM from a [`ModelFile`](crate::ModelFile). For models generated by /// libSVM's `svm-train`, the most common reason this occurs is skipping attributes. /// All attributes must be in sequential order 0, 1, 2, ..., n. If they are not, this /// error will be emitted. For more details see the documentation provided in [`ModelFile`](crate::ModelFile). diff --git a/src/lib.rs b/src/lib.rs index c20ca83..d66a0d4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -6,24 +6,19 @@ //! //! # In One Sentence //! -//! You trained a SVM using [libSVM](https://github.com/cjlin1/libsvm), now you want the highest possible performance during (real-time) classification, like games or VR. -//! +//! You trained an SVM using [libSVM](https://github.com/cjlin1/libsvm), now you want the highest possible performance during (real-time) classification, like games or VR. //! //! # Highlights //! //! * loads almost all [libSVM](https://github.com/cjlin1/libsvm) types (C-SVC, ν-SVC, ε-SVR, ν-SVR) and kernels (linear, poly, RBF and sigmoid) //! * produces practically same classification results as libSVM //! * optimized for [SIMD](https://github.com/rust-lang/rfcs/pull/2366) and can be mixed seamlessly with [Rayon](https://github.com/rayon-rs/rayon) -//! * written in 100% Rust +//! * written in 100% safe Rust //! * allocation-free during classification for dense SVMs //! * **2.5x - 14x faster than libSVM for dense SVMs** //! * extremely low classification times for small models (e.g., 128 SV, 16 dense attributes, linear ~ 500ns) //! * successfully used in **Unity and VR** projects (Windows & Android) //! -//! -//! Note: Currently **requires Rust nightly** (March 2019 and later), because we depend on RFC 2366 (portable SIMD). Once that stabilizes we'll also go stable. -//! -//! //! # Usage //! //! Train with [libSVM](https://github.com/cjlin1/libsvm) (e.g., using the tool `svm-train`), then classify with `ffsvm-rust`. @@ -32,35 +27,35 @@ //! //! ```rust //! # use std::convert::TryFrom; -//! # use ffsvm::{DenseSVM, Predict, Problem, SAMPLE_MODEL, Solution}; +//! # use ffsvm::{DenseSVM, Predict, FeatureVector, SAMPLE_MODEL, Label}; //! # fn main() -> Result<(), ffsvm::Error> { //! // Replace `SAMPLE_MODEL` with a `&str` to your model. //! let svm = DenseSVM::try_from(SAMPLE_MODEL)?; //! -//! let mut problem = Problem::from(&svm); -//! let features = problem.features(); +//! let mut fv = FeatureVector::from(&svm); +//! let features = fv.features(); //! //! features[0] = 0.55838; //! features[1] = -0.157895; //! features[2] = 0.581292; //! features[3] = -0.221184; //! -//! svm.predict_value(&mut problem)?; +//! svm.predict_value(&mut fv)?; //! -//! assert_eq!(problem.solution(), Solution::Label(42)); +//! assert_eq!(fv.label(), Label::Class(42)); //! # Ok(()) //! # } -//! //! ``` //! //! # Status +//! * **December 14, 2024**: **After 7+ years, finally ported to stable**.🎉🎉🎉 //! * **March 10, 2023**: Reactivated for latest Rust nightly. //! * **June 7, 2019**: Gave up on 'no `unsafe`', but gained runtime SIMD selection. //! * **March 10, 2019**: As soon as we can move away from nightly we'll go beta. //! * **Aug 5, 2018**: Still in alpha, but finally on crates.io. //! * **May 27, 2018**: We're in alpha. Successfully used internally on Windows, Mac, Android and Linux -//! on various machines and devices. Once SIMD stabilizes and we can cross-compile to WASM -//! we'll move to beta. +//! on various machines and devices. Once SIMD stabilizes and we can cross-compile to WASM +//! we'll move to beta. //! * **December 16, 2017**: We're in pre-alpha. It will probably not even work on your machine. //! //! @@ -68,16 +63,19 @@ //! //! ![performance](https://raw.githubusercontent.com/ralfbiedert/ffsvm-rust/master/docs/performance_relative.v3.png) //! -//! All performance numbers reported for the `DenseSVM`. We also have support for `SparseSVM`s, which are slower for "mostly dense" models, and faster for "mostly sparse" models (and generally on the performance level of libSVM). +//! All performance numbers reported for the `DenseSVM`. We also have support for `SparseSVM`s, which are slower +//! for "mostly dense" models, and faster for "mostly sparse" models (and generally on the performance level of libSVM). //! //! [See here for details.](https://github.com/ralfbiedert/ffsvm-rust/blob/master/docs/performance.md) //! //! //! ### Tips //! +//! * Compile your project with `target-cpu=native` for a massive speed boost (e.g., check our `.cargo/config.toml` how +//! you can easily do that for your project). Note, due to how Rust works, this is only used for application +//! (or dynamic FFI libraries), not library crates wrapping us. //! * For an x-fold performance increase, create a number of `Problem` structures, and process them with [Rayon's](https://docs.rs/rayon/1.0.3/rayon/) `par_iter`. //! -//! //! # FAQ //! //! [See here for details.](https://github.com/ralfbiedert/ffsvm-rust/blob/master/docs/FAQ.md) @@ -89,8 +87,6 @@ //! [docs.rs]: https://docs.rs/ffsvm/ //! [deps]: https://deps.rs/repo/github/ralfbiedert/ffsvm-rust //! [deps.svg]: https://deps.rs/repo/github/ralfbiedert/ffsvm-rust/status.svg - -#![feature(portable_simd)] #![warn(clippy::all)] // Enable ALL the warnings ... #![warn(clippy::nursery)] #![warn(clippy::pedantic)] @@ -108,16 +104,6 @@ mod svm; mod util; mod vectors; -// Set float types to largest width we support instructions sets -// (important to make sure we get max alignment of target_feature) when selecting -// dynamically. -#[allow(non_camel_case_types)] -#[doc(hidden)] -pub type f32s = simd_aligned::arch::x256::f32s; -#[doc(hidden)] -#[allow(non_camel_case_types)] -pub type f64s = simd_aligned::arch::x256::f64s; - #[doc(hidden)] pub static SAMPLE_MODEL: &str = include_str!("sample.model"); @@ -125,9 +111,9 @@ pub use crate::{ errors::Error, parser::{Attribute, Header, ModelFile, SupportVector}, svm::{ + features::{DenseFeatures, FeatureVector, Label, SparseFeatures}, kernel::{KernelDense, KernelSparse, Linear, Poly, Rbf, Sigmoid}, predict::Predict, - problem::{DenseProblem, Problem, Solution, SparseProblem}, DenseSVM, SVMType, SparseSVM, }, }; diff --git a/src/parser.rs b/src/parser.rs index a768ce9..0dce1a7 100644 --- a/src/parser.rs +++ b/src/parser.rs @@ -3,28 +3,28 @@ use std::{convert::TryFrom, str}; /// Parsing result of a model file used to instantiate a [`DenseSVM`](`crate::DenseSVM`) or [`SparseSVM`](`crate::SparseSVM`). /// -/// # Obtaining a model +/// # Obtaining Models /// A model file is produced by [libSVM](https://github.com/cjlin1/libsvm). For details /// how to produce a model see the top-level [FFSVM](index.html#creating-a-libsvm-model) /// documentation. /// -/// # Loading a model +/// # Loading Models /// -/// Model are generally produced by parsing a `&str` using the `ModelFile::try_from` function: +/// Models are generally produced by parsing a [`&str`] using the [`ModelFile::try_from`] function: /// /// ```rust -/// use ffsvm::*; -/// use std::convert::TryFrom; +/// use ffsvm::ModelFile; +/// # use ffsvm::SAMPLE_MODEL; /// /// let model_result = ModelFile::try_from(SAMPLE_MODEL); /// ``` /// /// Should anything be wrong with the model format, an [`Error`] will be returned. Once you have -/// your model, you can use it to create a SVM, for example by invoking `DenseSVM::try_from(model)`. +/// your model, you can use it to create an SVM, for example by invoking `DenseSVM::try_from(model)`. /// -/// # Model format +/// # Model Format /// -/// For FFSVM to load a model, it needs to approximately look like below. Note that you cannot +/// For FFSVM to load a model, it needs to look approximately like below. Note that you cannot /// reasonably create this model by hand, it needs to come from [libSVM](https://github.com/cjlin1/libsvm). /// /// ```text @@ -54,8 +54,22 @@ use std::{convert::TryFrom, str}; /// `2:`, ... `n:` and not, say, `0:`, `1:`, `4:`, ... `n:`. #[derive(Clone, Debug, Default)] pub struct ModelFile<'a> { - pub header: Header<'a>, - pub vectors: Vec, + header: Header<'a>, + vectors: Vec, +} + +impl<'a> ModelFile<'a> { + #[doc(hidden)] + #[must_use] + pub const fn new(header: Header<'a>, vectors: Vec) -> Self { Self { header, vectors } } + + #[doc(hidden)] + #[must_use] + pub const fn header(&self) -> &Header { &self.header } + + #[doc(hidden)] + #[must_use] + pub fn vectors(&self) -> &[SupportVector] { self.vectors.as_slice() } } #[doc(hidden)] @@ -92,7 +106,7 @@ pub struct SupportVector { impl<'a> TryFrom<&'a str> for ModelFile<'a> { type Error = Error; - /// Parses a string into a SVM model + /// Parses a string into an SVM model #[allow(clippy::similar_names)] fn try_from(input: &str) -> Result, Error> { let mut svm_type = Option::None; diff --git a/src/svm/class.rs b/src/svm/class.rs index a2d8964..52db997 100644 --- a/src/svm/class.rs +++ b/src/svm/class.rs @@ -1,5 +1,5 @@ -use crate::{f32s, f64s, sparse::SparseMatrix}; -use simd_aligned::{MatrixD, Rows}; +use crate::{sparse::SparseMatrix}; +use simd_aligned::{f32x8, f64x4, MatD, Rows}; /// Represents one class of the SVM model. #[derive(Clone, Debug)] @@ -12,19 +12,19 @@ pub(crate) struct Class { // pub(crate) num_support_vectors: usize, /// Coefficients between this class and n-1 other classes. - pub(crate) coefficients: MatrixD, + pub(crate) coefficients: MatD, /// All support vectors in this class. pub(crate) support_vectors: M32, } -impl Class> { +impl Class> { /// Creates a new class with the given parameters. pub fn with_parameters(classes: usize, support_vectors: usize, attributes: usize, label: i32) -> Self { Self { label, - coefficients: MatrixD::with_dimension(classes - 1, support_vectors), - support_vectors: MatrixD::with_dimension(support_vectors, attributes), + coefficients: MatD::with_dimension(classes - 1, support_vectors), + support_vectors: MatD::with_dimension(support_vectors, attributes), } } } @@ -34,7 +34,7 @@ impl Class> { pub fn with_parameters(classes: usize, support_vectors: usize, _attributes: usize, label: i32) -> Self { Self { label, - coefficients: MatrixD::with_dimension(classes - 1, support_vectors), + coefficients: MatD::with_dimension(classes - 1, support_vectors), support_vectors: SparseMatrix::with(support_vectors), } } diff --git a/src/svm/core/dense.rs b/src/svm/core/dense.rs index df5438f..e15f9ae 100644 --- a/src/svm/core/dense.rs +++ b/src/svm/core/dense.rs @@ -1,31 +1,29 @@ -use simd_aligned::{MatrixD, Rows, VectorD}; +use simd_aligned::{f32x8, traits::Simd, MatD, Rows, VecD}; use std::convert::TryFrom; use crate::{ errors::Error, - f32s, f64s, parser::ModelFile, svm::{ class::Class, + features::{FeatureVector, Label}, kernel::{KernelDense, Linear, Poly, Rbf, Sigmoid}, predict::Predict, - problem::{Problem, Solution}, Probabilities, SVMType, }, util::{find_max_index, set_all, sigmoid_predict}, vectors::Triangular, }; -/// A SVM using [SIMD](https://en.wikipedia.org/wiki/SIMD) intrinsics optimized for speed. +/// An SVM using [SIMD](https://en.wikipedia.org/wiki/SIMD) intrinsics optimized for speed. /// /// -/// # Creating a SVM +/// # Creating an SVM /// -/// This SVM can be created by passing a [`ModelFile`](crate::ModelFile) into `try_from`, or a `&str`: +/// This SVM can be created by passing a [`ModelFile`](crate::ModelFile) or [`&str`] into [`ModelFile::try_from`]: /// /// ``` -/// use ffsvm::*; -/// use std::convert::TryFrom; +/// use ffsvm::DenseSVM; /// /// let svm = DenseSVM::try_from("..."); /// ``` @@ -46,7 +44,7 @@ pub struct DenseSVM { pub(crate) kernel: Box, /// All classes - pub(crate) classes: Vec>>, + pub(crate) classes: Vec>>, } impl DenseSVM { @@ -56,12 +54,12 @@ impl DenseSVM { /// /// This method takes a `label` as defined in the libSVM training model /// and returns the internal `index` where this label resides. The index - /// equals [`Problem::probabilities`] index where that label's + /// equals [`FeatureVector::probabilities`] index where that label's /// probability can be found. /// /// # Returns /// - /// If the label was found its index returned in the [`Option`]. Otherwise `None` + /// If the label was found its index returned in the [`Option`], otherwise `None` /// is returned. pub fn class_index_for_label(&self, label: i32) -> Option { for (i, class) in self.classes.iter().enumerate() { @@ -80,12 +78,12 @@ impl DenseSVM { /// # Description /// /// The inverse of [`DenseSVM::class_index_for_label`], this function returns the class label - /// associated with a certain internal index. The index equals the [`Problem::probabilities`] + /// associated with a certain internal index. The index equals the [`FeatureVector::probabilities`] /// index where a label's probability can be found. /// /// # Returns /// - /// If the index was found it is returned in the [`Option`]. Otherwise `None` + /// If the index was found it is returned in the [`Option`], otherwise `None` /// is returned. pub fn class_label_for_index(&self, index: usize) -> Option { if index >= self.classes.len() { @@ -96,7 +94,7 @@ impl DenseSVM { } /// Computes the kernel values for this problem - pub(crate) fn compute_kernel_values(&self, problem: &mut Problem>) { + pub(crate) fn compute_kernel_values(&self, problem: &mut FeatureVector>) { // Get current problem and decision values array let features = &problem.features; let kernel_values = &mut problem.kernel_values; @@ -105,7 +103,7 @@ impl DenseSVM { for (i, class) in self.classes.iter().enumerate() { let kvalues = kernel_values.row_as_flat_mut(i); - self.kernel.compute(&class.support_vectors, features.as_raw(), kvalues); + self.kernel.compute(&class.support_vectors, features, kvalues); } } @@ -114,14 +112,13 @@ impl DenseSVM { // based on Method 2 from the paper "Probability Estimates for Multi-class // Classification by Pairwise Coupling", Journal of Machine Learning Research 5 (2004) 975-1005, // by Ting-Fan Wu, Chih-Jen Lin and Ruby C. Weng. - pub(crate) fn compute_multiclass_probabilities(&self, problem: &mut Problem>) -> Result<(), Error> { compute_multiclass_probabilities_impl!(self, problem) } + pub(crate) fn compute_multiclass_probabilities(&self, problem: &mut FeatureVector>) -> Result<(), Error> { compute_multiclass_probabilities_impl!(self, problem) } /// Based on kernel values, computes the decision values for this problem. - pub(crate) fn compute_classification_values(&self, problem: &mut Problem>) { compute_classification_values_impl!(self, problem) } + pub(crate) fn compute_classification_values(&self, problem: &mut FeatureVector>) { compute_classification_values_impl!(self, problem) } /// Based on kernel values, computes the decision values for this problem. - pub(crate) fn compute_regression_values(&self, problem: &mut Problem>) { - use simd_aligned::SimdExt; + pub(crate) fn compute_regression_values(&self, problem: &mut FeatureVector>) { let class = &self.classes[0]; let coef = class.coefficients.row(0); let kvalues = problem.kernel_values.row(0); @@ -130,7 +127,7 @@ impl DenseSVM { sum -= self.rho[0]; - problem.result = Solution::Value(sum as f32); + problem.result = Label::Value(sum as f32); } /// Returns number of attributes, reflecting the libSVM model. @@ -140,33 +137,33 @@ impl DenseSVM { pub fn classes(&self) -> usize { self.classes.len() } } -impl Predict, VectorD> for DenseSVM { - fn predict_probability(&self, problem: &mut Problem>) -> Result<(), Error> { predict_probability_impl!(self, problem) } - +impl Predict> for DenseSVM { // Predict the value for one problem. - fn predict_value(&self, problem: &mut Problem>) -> Result<(), Error> { + fn predict_value(&self, fv: &mut FeatureVector>) -> Result<(), Error> { match self.svm_type { SVMType::CSvc | SVMType::NuSvc => { // Compute kernel, decision values and eventually the label - self.compute_kernel_values(problem); - self.compute_classification_values(problem); + self.compute_kernel_values(fv); + self.compute_classification_values(fv); - // Compute highest vote - let highest_vote = find_max_index(&problem.vote); - problem.result = Solution::Label(self.classes[highest_vote].label); + // Compute the highest vote + let highest_vote = find_max_index(&fv.vote); + fv.result = Label::Class(self.classes[highest_vote].label); Ok(()) } SVMType::ESvr | SVMType::NuSvr => { - self.compute_kernel_values(problem); - self.compute_regression_values(problem); + self.compute_kernel_values(fv); + self.compute_regression_values(fv); Ok(()) } } } + + fn predict_probability(&self, problem: &mut FeatureVector>) -> Result<(), Error> { predict_probability_impl!(self, problem) } } -impl<'a, 'b> TryFrom<&'a str> for DenseSVM { +impl<'a> TryFrom<&'a str> for DenseSVM { type Error = Error; fn try_from(input: &'a str) -> Result { @@ -179,9 +176,9 @@ impl<'a, 'b> TryFrom<&'a ModelFile<'b>> for DenseSVM { type Error = Error; fn try_from(raw_model: &'a ModelFile<'_>) -> Result { - let (mut svm, nr_sv) = prepare_svm!(raw_model, dyn KernelDense, MatrixD, Self); + let (mut svm, nr_sv) = prepare_svm!(raw_model, dyn KernelDense, MatD, Self); - let vectors = &raw_model.vectors; + let vectors = &raw_model.vectors(); // Things down here are a bit ugly as the file format is a bit ugly ... // Now read all vectors and decode stored information @@ -227,7 +224,7 @@ impl<'a, 'b> TryFrom<&'a ModelFile<'b>> for DenseSVM { } // Return what we have - Result::Ok(svm) + Ok(svm) } } @@ -245,5 +242,4 @@ mod tests { Ok(()) } - } diff --git a/src/svm/core/mod.rs b/src/svm/core/mod.rs index 596186f..7ab9ac0 100644 --- a/src/svm/core/mod.rs +++ b/src/svm/core/mod.rs @@ -3,14 +3,14 @@ macro_rules! prepare_svm { // To quickly check what broke again during parsing ... // println!("{:?}", raw_model); { - let header = &$raw_model.header; - let vectors = &$raw_model.vectors; + let header = &$raw_model.header(); + let vectors = &$raw_model.vectors(); // Get basic info let num_attributes = vectors[0].features.len(); let num_total_sv = header.total_sv as usize; - let svm_type = match $raw_model.header.svm_type { + let svm_type = match $raw_model.header().svm_type { "c_svc" => SVMType::CSvc, "nu_svc" => SVMType::NuSvc, "epsilon_svr" => SVMType::ESvr, @@ -18,7 +18,7 @@ macro_rules! prepare_svm { _ => unimplemented!(), }; - let kernel: Box<$k> = match $raw_model.header.kernel_type { + let kernel: Box<$k> = match $raw_model.header().kernel_type { "rbf" => Box::new(Rbf::try_from($raw_model)?), "linear" => Box::new(Linear::from($raw_model)), "polynomial" => Box::new(Poly::try_from($raw_model)?), @@ -53,7 +53,7 @@ macro_rules! prepare_svm { SVMType::ESvr | SVMType::NuSvr => vec![Class::<$m32>::with_parameters(2, num_total_sv, num_attributes, 0)], }; - let probabilities = match (&$raw_model.header.prob_a, &$raw_model.header.prob_b) { + let probabilities = match (&$raw_model.header().prob_a, &$raw_model.header().prob_b) { // Regular case for classification with probabilities (&Some(ref a), &Some(ref b)) => Some(Probabilities { a: Triangular::from(a), @@ -169,9 +169,7 @@ macro_rules! compute_multiclass_probabilities_impl { macro_rules! compute_classification_values_impl { ($self:tt, $problem:tt) => {{ - // Reset all votes - use simd_aligned::SimdExt; - + use simd_aligned::traits::Simd; set_all(&mut $problem.vote, 0); // Since classification is symmetric, if we have N classes, we only need to go through @@ -257,7 +255,7 @@ macro_rules! predict_probability_impl { } let max_index = find_max_index($problem.probabilities.flat()); - $problem.result = Solution::Label($self.classes[max_index].label); + $problem.result = Label::Class($self.classes[max_index].label); Ok(()) } diff --git a/src/svm/core/sparse.rs b/src/svm/core/sparse.rs index 35c0c31..68feffc 100644 --- a/src/svm/core/sparse.rs +++ b/src/svm/core/sparse.rs @@ -1,5 +1,6 @@ use crate::sparse::{SparseMatrix, SparseVector}; +use simd_aligned::traits::Simd; use std::convert::TryFrom; use crate::{ @@ -7,25 +8,24 @@ use crate::{ parser::ModelFile, svm::{ class::Class, + features::{FeatureVector, Label}, kernel::{KernelSparse, Linear, Poly, Rbf, Sigmoid}, predict::Predict, - problem::{Problem, Solution}, Probabilities, SVMType, }, util::{find_max_index, set_all, sigmoid_predict}, vectors::Triangular, }; -/// A SVM optimized for large models with many empty attributes. +/// an SVM optimized for large models with many empty attributes. /// -/// # Creating a SVM +/// # Creating an SVM /// -/// This SVM can be created by passing a [`ModelFile`](crate::ModelFile) into `try_from`, or a `&str`: +/// This SVM can be created by passing a [`ModelFile`](crate::ModelFile) or [`&str`] into [`ModelFile::try_from`]: /// /// /// ``` -/// use ffsvm::*; -/// use std::convert::TryFrom; +/// use ffsvm::SparseSVM; /// /// let svm = SparseSVM::try_from("..."); /// ``` @@ -56,13 +56,14 @@ impl SparseSVM { /// /// This method takes a `label` as defined in the libSVM training model /// and returns the internal `index` where this label resides. The index - /// equals [`Problem::probabilities`] index where that label's + /// equals [`FeatureVector::probabilities`] index where that label's /// probability can be found. /// /// # Returns /// - /// If the label was found its index returned in the [`Option`]. Otherwise `None` + /// If the label was found its index returned in the [`Option`], otherwise `None` /// is returned. + #[must_use] pub fn class_index_for_label(&self, label: i32) -> Option { for (i, class) in self.classes.iter().enumerate() { if class.label != label { @@ -80,13 +81,14 @@ impl SparseSVM { /// # Description /// /// The inverse of [`SparseSVM::class_index_for_label`], this function returns the class label - /// associated with a certain internal index. The index equals the [`Problem::probabilities`] + /// associated with a certain internal index. The index equals the [`FeatureVector::probabilities`] /// index where a label's probability can be found. /// /// # Returns /// - /// If the index was found it is returned in the [`Option`]. Otherwise `None` + /// If the index was found it is returned in the [`Option`], otherwise `None` /// is returned. + #[must_use] pub fn class_label_for_index(&self, index: usize) -> Option { if index >= self.classes.len() { None @@ -96,7 +98,7 @@ impl SparseSVM { } /// Computes the kernel values for this problem - pub(crate) fn compute_kernel_values(&self, problem: &mut Problem>) { + pub(crate) fn compute_kernel_values(&self, problem: &mut FeatureVector>) { // Get current problem and decision values array let features = &problem.features; let kernel_values = &mut problem.kernel_values; @@ -105,7 +107,7 @@ impl SparseSVM { for (i, class) in self.classes.iter().enumerate() { let kvalues = kernel_values.row_as_flat_mut(i); - self.kernel.compute(&class.support_vectors, features.as_raw(), kvalues); + self.kernel.compute(&class.support_vectors, features, kvalues); } } @@ -114,14 +116,15 @@ impl SparseSVM { // based on Method 2 from the paper "Probability Estimates for Multi-class // Classification by Pairwise Coupling", Journal of Machine Learning Research 5 (2004) 975-1005, // by Ting-Fan Wu, Chih-Jen Lin and Ruby C. Weng. - pub(crate) fn compute_multiclass_probabilities(&self, problem: &mut Problem>) -> Result<(), Error> { compute_multiclass_probabilities_impl!(self, problem) } + pub(crate) fn compute_multiclass_probabilities(&self, problem: &mut FeatureVector>) -> Result<(), Error> { + compute_multiclass_probabilities_impl!(self, problem) + } /// Based on kernel values, computes the decision values for this problem. - pub(crate) fn compute_classification_values(&self, problem: &mut Problem>) { compute_classification_values_impl!(self, problem) } + pub(crate) fn compute_classification_values(&self, problem: &mut FeatureVector>) { compute_classification_values_impl!(self, problem) } /// Based on kernel values, computes the decision values for this problem. - pub(crate) fn compute_regression_values(&self, problem: &mut Problem>) { - use simd_aligned::SimdExt; + pub(crate) fn compute_regression_values(&self, problem: &mut FeatureVector>) { let class = &self.classes[0]; let coef = class.coefficients.row(0); let kvalues = problem.kernel_values.row(0); @@ -130,21 +133,21 @@ impl SparseSVM { sum -= self.rho[0]; - problem.result = Solution::Value(sum as f32); + problem.result = Label::Value(sum as f32); } /// Returns number of attributes, reflecting the libSVM model. + #[must_use] pub const fn attributes(&self) -> usize { self.num_attributes } /// Returns number of classes, reflecting the libSVM model. + #[must_use] pub fn classes(&self) -> usize { self.classes.len() } } -impl Predict, SparseVector> for SparseSVM { - fn predict_probability(&self, problem: &mut Problem>) -> Result<(), Error> { predict_probability_impl!(self, problem) } - +impl Predict> for SparseSVM { // Predict the value for one problem. - fn predict_value(&self, problem: &mut Problem>) -> Result<(), Error> { + fn predict_value(&self, problem: &mut FeatureVector>) -> Result<(), Error> { match self.svm_type { SVMType::CSvc | SVMType::NuSvc => { // Compute kernel, decision values and eventually the label @@ -153,7 +156,7 @@ impl Predict, SparseVector> for SparseSVM { // Compute highest vote let highest_vote = find_max_index(&problem.vote); - problem.result = Solution::Label(self.classes[highest_vote].label); + problem.result = Label::Class(self.classes[highest_vote].label); Ok(()) } @@ -164,6 +167,8 @@ impl Predict, SparseVector> for SparseSVM { } } } + + fn predict_probability(&self, problem: &mut FeatureVector>) -> Result<(), Error> { predict_probability_impl!(self, problem) } } impl<'a, 'b> TryFrom<&'a str> for SparseSVM { @@ -175,13 +180,13 @@ impl<'a, 'b> TryFrom<&'a str> for SparseSVM { } } -impl<'a, 'b> TryFrom<&'a ModelFile<'b>> for SparseSVM { +impl<'a> TryFrom<&'a ModelFile<'_>> for SparseSVM { type Error = Error; fn try_from(raw_model: &'a ModelFile<'_>) -> Result { let (mut svm, nr_sv) = prepare_svm!(raw_model, dyn KernelSparse, SparseMatrix, Self); - let vectors = &raw_model.vectors; + let vectors = &raw_model.vectors(); // Things down here are a bit ugly as the file format is a bit ugly ... // Now read all vectors and decode stored information @@ -211,6 +216,6 @@ impl<'a, 'b> TryFrom<&'a ModelFile<'b>> for SparseSVM { } // Return what we have - Result::Ok(svm) + Ok(svm) } } diff --git a/src/svm/features.rs b/src/svm/features.rs new file mode 100644 index 0000000..e26ec00 --- /dev/null +++ b/src/svm/features.rs @@ -0,0 +1,158 @@ +use crate::{ + sparse::SparseVector, + svm::{DenseSVM, SparseSVM}, + vectors::Triangular, +}; + +use simd_aligned::{f32x8, f64x4, MatD, Rows, VecD}; + +/// Feature vectors produced for [`DenseSVM`]s. +/// +/// Also see [`FeatureVector`] for more methods for this type. +pub type DenseFeatures = FeatureVector>; + +/// Feature vectors produced for [`SparseSVM`]s. +/// +/// Also see [`FeatureVector`] for more methods for this type. +pub type SparseFeatures = FeatureVector>; + +/// The result of a classification +#[derive(Copy, Debug, Clone, PartialEq)] +pub enum Label { + /// If classified this will hold the label. + Class(i32), + + /// If regression was performed contains regression result. + Value(f32), + + /// No operation was performed yet. + None, +} + +/// A single feature vector ("problem") an SVM should classify. +/// +/// # Creating a feature vector: +/// +/// Feature vectors are created via the [`FeatureVector::from`] method and match the SVM type they were +/// created for, so their layout matches the SVM: +/// +/// ```rust +/// use ffsvm::{Error, DenseSVM, FeatureVector}; +/// # use ffsvm::SAMPLE_MODEL; +/// +/// # fn main() -> Result<(), Error> { +/// let svm = DenseSVM::try_from(SAMPLE_MODEL)?; +/// let mut fv = FeatureVector::from(&svm); +/// # Ok(()) +/// # } +/// ``` +/// +/// # Setting Features +/// +/// A [`FeatureVector`] is an instance of the SVM's problem domain. Before it can be classified, all `features` need +/// to be set, for example by: +/// +/// ``` +/// use ffsvm::DenseFeatures; +/// +/// fn set_features(problem: &mut DenseFeatures) { +/// let features = problem.features(); +/// features[0] = -0.221184; +/// features[3] = 0.135713; +/// } +/// ``` +/// +/// It can then be classified via the [`Predict`](crate::Predict) trait. +#[derive(Debug, Clone)] +pub struct FeatureVector { + /// A vector of all features. + pub(crate) features: T, + + /// KernelDense values. A vector for each class. + pub(crate) kernel_values: MatD, + + /// All votes for a given class label. + pub(crate) vote: Vec, + + /// Decision values. + pub(crate) decision_values: Triangular, + + /// Pairwise probabilities + pub(crate) pairwise: MatD, + + /// Needed for multi-class probability estimates replicating libSVM. + pub(crate) q: MatD, + + /// Needed for multi-class probability estimates replicating libSVM. + pub(crate) qp: Vec, + + /// Probability estimates that will be updated after this problem was processed + /// by `predict_probability`. + pub(crate) probabilities: VecD, + + /// Computed label that will be updated after this problem was processed. + pub(crate) result: Label, +} + +impl FeatureVector { + /// After a [`Problem`](crate::FeatureVector) has been classified, this will hold the SVMs solution label. + pub const fn label(&self) -> Label { self.result } + + /// Returns the probability estimates. Only really useful if the model was trained with probability estimates and you classified with them. + pub fn probabilities(&self) -> &[f64] { self.probabilities.flat() } +} + +impl FeatureVector> { + /// Returns the features. You must set them first and classify the problem before you can get a solution. + pub fn features(&mut self) -> &mut [f32] { self.features.flat_mut() } +} + +impl FeatureVector> { + /// Returns the features. You must set them first and classify the problem before you can get a solution. + pub fn features(&mut self) -> &mut SparseVector { &mut self.features } +} + +impl DenseFeatures { + /// Creates a new problem with the given parameters. + pub(crate) fn with_dimension(total_sv: usize, num_classes: usize, num_attributes: usize) -> Self { + Self { + features: VecD::with(0.0, num_attributes), + kernel_values: MatD::with_dimension(num_classes, total_sv), + pairwise: MatD::with_dimension(num_classes, num_classes), + q: MatD::with_dimension(num_classes, num_classes), + qp: vec![Default::default(); num_classes], + decision_values: Triangular::with_dimension(num_classes, Default::default()), + vote: vec![Default::default(); num_classes], + probabilities: VecD::with(0.0, num_classes), + result: Label::None, + } + } +} + +impl SparseFeatures { + /// Clears the [FeatureVector] when reusing it between calls. Only needed for [SparseSVM] problems. + pub fn clear(&mut self) { self.features.clear(); } + + /// Creates a new problem with the given parameters. + pub(crate) fn with_dimension(total_sv: usize, num_classes: usize, _num_attributes: usize) -> Self { + Self { + features: SparseVector::new(), + kernel_values: MatD::with_dimension(num_classes, total_sv), + pairwise: MatD::with_dimension(num_classes, num_classes), + q: MatD::with_dimension(num_classes, num_classes), + qp: vec![Default::default(); num_classes], + decision_values: Triangular::with_dimension(num_classes, Default::default()), + vote: vec![Default::default(); num_classes], + probabilities: VecD::with(0.0, num_classes), + result: Label::None, + } + } +} + +impl From<&DenseSVM> for DenseFeatures { + fn from(svm: &DenseSVM) -> Self { Self::with_dimension(svm.num_total_sv, svm.classes.len(), svm.num_attributes) } +} + +impl From<&SparseSVM> for SparseFeatures { + fn from(svm: &SparseSVM) -> Self { Self::with_dimension(svm.num_total_sv, svm.classes.len(), svm.num_attributes) } +} diff --git a/src/svm/kernel/linear.rs b/src/svm/kernel/linear.rs index ff04019..7f151ce 100644 --- a/src/svm/kernel/linear.rs +++ b/src/svm/kernel/linear.rs @@ -2,23 +2,21 @@ use std::convert::From; use super::{KernelDense, KernelSparse}; use crate::{ - f32s, parser::ModelFile, sparse::{SparseMatrix, SparseVector}, }; -use simd_aligned::{MatrixD, Rows, VectorD}; +use simd_aligned::{f32x8, MatD, Rows, VecD, traits::Simd}; #[derive(Copy, Clone, Debug, Default)] #[doc(hidden)] pub struct Linear {} impl KernelDense for Linear { - fn compute(&self, vectors: &MatrixD, feature: &VectorD, output: &mut [f64]) { - use simd_aligned::SimdExt; + fn compute(&self, vectors: &MatD, feature: &VecD, output: &mut [f64]) { for (i, sv) in vectors.row_iter().enumerate() { - let mut sum = f32s::splat(0.0); - let feature: &[f32s] = feature; + let mut sum = f32x8::splat(0.0); + let feature: &[f32x8] = feature; for (a, b) in sv.iter().zip(feature) { sum += *a * *b; diff --git a/src/svm/kernel/mod.rs b/src/svm/kernel/mod.rs index 259dd3b..983c630 100644 --- a/src/svm/kernel/mod.rs +++ b/src/svm/kernel/mod.rs @@ -4,10 +4,9 @@ mod rbf; mod sigmoid; use crate::{ - f32s, sparse::{SparseMatrix, SparseVector}, }; -use simd_aligned::{MatrixD, Rows, VectorD}; +use simd_aligned::{f32x8, MatD, Rows, VecD}; pub use self::{linear::*, poly::*, rbf::*, sigmoid::*}; @@ -17,7 +16,7 @@ pub trait KernelDense where Self: Send + Sync, { - fn compute(&self, vectors: &MatrixD, feature: &VectorD, output: &mut [f64]); + fn compute(&self, vectors: &MatD, feature: &VecD, output: &mut [f64]); } /// Base trait for kernels diff --git a/src/svm/kernel/poly.rs b/src/svm/kernel/poly.rs index d4a4dc6..d389cad 100644 --- a/src/svm/kernel/poly.rs +++ b/src/svm/kernel/poly.rs @@ -3,12 +3,11 @@ use std::convert::{From, TryFrom}; use super::{KernelDense, KernelSparse}; use crate::{ errors::Error, - f32s, parser::ModelFile, sparse::{SparseMatrix, SparseVector}, }; -use simd_aligned::{MatrixD, Rows, VectorD}; +use simd_aligned::{f32x8, traits::Simd, MatD, Rows, VecD}; #[derive(Copy, Clone, Debug, Default)] #[doc(hidden)] @@ -19,11 +18,10 @@ pub struct Poly { } impl KernelDense for Poly { - fn compute(&self, vectors: &MatrixD, feature: &VectorD, output: &mut [f64]) { - use simd_aligned::SimdExt; + fn compute(&self, vectors: &MatD, feature: &VecD, output: &mut [f64]) { for (i, sv) in vectors.row_iter().enumerate() { - let mut sum = f32s::splat(0.0); - let feature: &[f32s] = feature; + let mut sum = f32x8::splat(0.0); + let feature: &[f32x8] = feature; for (a, b) in sv.iter().zip(feature) { sum += *a * *b; @@ -64,9 +62,9 @@ impl<'a, 'b> TryFrom<&'a ModelFile<'b>> for Poly { type Error = Error; fn try_from(raw_model: &'a ModelFile<'b>) -> Result { - let gamma = raw_model.header.gamma.ok_or(Error::NoGamma)?; - let coef0 = raw_model.header.coef0.ok_or(Error::NoCoef0)?; - let degree = raw_model.header.degree.ok_or(Error::NoDegree)?; + let gamma = raw_model.header().gamma.ok_or(Error::NoGamma)?; + let coef0 = raw_model.header().coef0.ok_or(Error::NoCoef0)?; + let degree = raw_model.header().degree.ok_or(Error::NoDegree)?; Ok(Self { gamma, coef0, degree }) } diff --git a/src/svm/kernel/rbf.rs b/src/svm/kernel/rbf.rs index 023ca96..5e8ec8b 100644 --- a/src/svm/kernel/rbf.rs +++ b/src/svm/kernel/rbf.rs @@ -3,12 +3,11 @@ use std::convert::{From, TryFrom}; use super::{KernelDense, KernelSparse}; use crate::{ errors::Error, - f32s, parser::ModelFile, sparse::{SparseMatrix, SparseVector}, }; -use simd_aligned::{MatrixD, Rows, VectorD}; +use simd_aligned::{f32x8, traits::Simd, MatD, Rows, VecD}; #[derive(Copy, Clone, Debug, Default)] #[doc(hidden)] @@ -17,13 +16,12 @@ pub struct Rbf { } #[inline] -fn compute_core(rbf: Rbf, vectors: &MatrixD, feature: &VectorD, output: &mut [f64]) { - use simd_aligned::SimdExt; - // According to Instruments, for realistic SVMs and problems, the VAST majority of our +fn compute_core(rbf: Rbf, vectors: &MatD, feature: &VecD, output: &mut [f64]) { + // According to Instruments, for realistic SVMs and feature vectors, the VAST majority of our // CPU time is spent in this loop. for (i, sv) in vectors.row_iter().enumerate() { - let mut sum = f32s::splat(0.0); - let feature: &[f32s] = feature; + let mut sum = f32x8::splat(0.0); + let feature: &[f32x8] = feature; for (a, b) in sv.iter().zip(feature) { sum += (*a - *b) * (*a - *b); @@ -39,21 +37,21 @@ fn compute_core(rbf: Rbf, vectors: &MatrixD, feature: &VectorD #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] #[target_feature(enable = "avx")] #[inline] -unsafe fn compute_avx(rbf: Rbf, vectors: &MatrixD, feature: &VectorD, output: &mut [f64]) { compute_core(rbf, vectors, feature, output); } +unsafe fn compute_avx(rbf: Rbf, vectors: &MatD, feature: &VecD, output: &mut [f64]) { compute_core(rbf, vectors, feature, output); } #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] #[target_feature(enable = "avx2")] #[inline] -unsafe fn compute_avx2(rbf: Rbf, vectors: &MatrixD, feature: &VectorD, output: &mut [f64]) { compute_core(rbf, vectors, feature, output); } +unsafe fn compute_avx2(rbf: Rbf, vectors: &MatD, feature: &VecD, output: &mut [f64]) { compute_core(rbf, vectors, feature, output); } #[cfg(target_arch = "aarch64")] #[target_feature(enable = "neon")] #[inline] -unsafe fn compute_neon(rbf: Rbf, vectors: &MatrixD, feature: &VectorD, output: &mut [f64]) { compute_core(rbf, vectors, feature, output); } +unsafe fn compute_neon(rbf: Rbf, vectors: &MatD, feature: &VecD, output: &mut [f64]) { compute_core(rbf, vectors, feature, output); } #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] #[inline] -fn compute(rbf: Rbf, vectors: &MatrixD, feature: &VectorD, output: &mut [f64]) { +fn compute(rbf: Rbf, vectors: &MatD, feature: &VecD, output: &mut [f64]) { if is_x86_feature_detected!("avx2") { unsafe { compute_avx2(rbf, vectors, feature, output) } } else if is_x86_feature_detected!("avx") { @@ -65,7 +63,7 @@ fn compute(rbf: Rbf, vectors: &MatrixD, feature: &VectorD, out #[cfg(target_arch = "aarch64")] #[inline] -fn compute(rbf: Rbf, vectors: &MatrixD, feature: &VectorD, output: &mut [f64]) { +fn compute(rbf: Rbf, vectors: &MatD, feature: &VecD, output: &mut [f64]) { if std::arch::is_aarch64_feature_detected!("neon") { unsafe { compute_neon(rbf, vectors, feature, output) } } else { @@ -75,10 +73,10 @@ fn compute(rbf: Rbf, vectors: &MatrixD, feature: &VectorD, out #[cfg(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64")))] #[inline] -fn compute(rbf: Rbf, vectors: &MatrixD, feature: &VectorD, output: &mut [f64]) { compute_core(rbf, vectors, feature, output) } +fn compute(rbf: Rbf, vectors: &MatD, feature: &VecD, output: &mut [f64]) { compute_core(rbf, vectors, feature, output) } impl KernelDense for Rbf { - fn compute(&self, vectors: &MatrixD, feature: &VectorD, output: &mut [f64]) { compute(*self, vectors, feature, output); } + fn compute(&self, vectors: &MatD, feature: &VecD, output: &mut [f64]) { compute(*self, vectors, feature, output); } } impl KernelSparse for Rbf { @@ -99,13 +97,13 @@ impl KernelSparse for Rbf { b = b_iter.next(); } (Some((i_a, x)), Some((i_b, _))) if i_a < i_b => { - sum += x*x; - a = a_iter.next(); - }, + sum += x * x; + a = a_iter.next(); + } (Some((i_a, _)), Some((i_b, y))) if i_a > i_b => { - sum += y*y; - b = b_iter.next(); - }, + sum += y * y; + b = b_iter.next(); + } _ => break f64::from((-self.gamma * sum).exp()), } } @@ -117,7 +115,7 @@ impl<'a, 'b> TryFrom<&'a ModelFile<'b>> for Rbf { type Error = Error; fn try_from(raw_model: &'a ModelFile<'b>) -> Result { - let gamma = raw_model.header.gamma.ok_or(Error::NoGamma)?; + let gamma = raw_model.header().gamma.ok_or(Error::NoGamma)?; Ok(Self { gamma }) } diff --git a/src/svm/kernel/sigmoid.rs b/src/svm/kernel/sigmoid.rs index 71ae713..ffa5734 100644 --- a/src/svm/kernel/sigmoid.rs +++ b/src/svm/kernel/sigmoid.rs @@ -3,12 +3,11 @@ use std::convert::{From, TryFrom}; use super::{KernelDense, KernelSparse}; use crate::{ errors::Error, - f32s, parser::ModelFile, sparse::{SparseMatrix, SparseVector}, }; -use simd_aligned::{MatrixD, Rows, VectorD}; +use simd_aligned::{f32x8, traits::Simd, MatD, Rows, VecD}; #[derive(Copy, Clone, Debug, Default)] #[doc(hidden)] @@ -18,11 +17,10 @@ pub struct Sigmoid { } impl KernelDense for Sigmoid { - fn compute(&self, vectors: &MatrixD, feature: &VectorD, output: &mut [f64]) { - use simd_aligned::SimdExt; + fn compute(&self, vectors: &MatD, feature: &VecD, output: &mut [f64]) { for (i, sv) in vectors.row_iter().enumerate() { - let mut sum = f32s::splat(0.0); - let feature: &[f32s] = feature; + let mut sum = f32x8::splat(0.0); + let feature: &[f32x8] = feature; for (a, b) in sv.iter().zip(feature) { sum += *a * *b; @@ -63,8 +61,8 @@ impl<'a, 'b> TryFrom<&'a ModelFile<'b>> for Sigmoid { type Error = Error; fn try_from(raw_model: &'a ModelFile<'b>) -> Result { - let gamma = raw_model.header.gamma.ok_or(Error::NoGamma)?; - let coef0 = raw_model.header.coef0.ok_or(Error::NoCoef0)?; + let gamma = raw_model.header().gamma.ok_or(Error::NoGamma)?; + let coef0 = raw_model.header().coef0.ok_or(Error::NoCoef0)?; Ok(Self { gamma, coef0 }) } diff --git a/src/svm/mod.rs b/src/svm/mod.rs index 36dd8e6..4fbb412 100644 --- a/src/svm/mod.rs +++ b/src/svm/mod.rs @@ -1,8 +1,8 @@ pub(crate) mod class; pub(crate) mod core; +pub(crate) mod features; pub(crate) mod kernel; pub(crate) mod predict; -pub(crate) mod problem; use crate::vectors::Triangular; diff --git a/src/svm/predict.rs b/src/svm/predict.rs index 03ef353..a76e32f 100644 --- a/src/svm/predict.rs +++ b/src/svm/predict.rs @@ -1,19 +1,19 @@ -use crate::{errors::Error, svm::problem::Problem}; +use crate::{errors::Error, svm::features::FeatureVector}; -/// Implemented by [`DenseSVM`](crate::DenseSVM) and [`SparseSVM`](crate::SparseSVM) to predict a [`Problem`]. +/// Implemented by [`DenseSVM`](crate::DenseSVM) and [`SparseSVM`](crate::SparseSVM) to predict a [`FeatureVector`]. /// /// # Predicting a label /// -/// To predict a label, first make sure the [`Problem`](crate::Problem) has all features set. Then calling +/// To predict a label, first make sure the [`FeatureVector`](crate::FeatureVector) has all features set. Then calling /// ``` -/// use ffsvm::*; +/// use ffsvm::{DenseFeatures, DenseSVM, Predict}; /// -/// fn set_features(svm: &DenseSVM, problem: &mut DenseProblem) { +/// fn set_features(svm: &DenseSVM, problem: &mut DenseFeatures) { /// // Predicts the value. /// svm.predict_value(problem); /// } /// ``` -/// will update the [`Problem::solution`] to correspond to the class label with the highest likelihood. +/// will update the [`FeatureVector::label`] to correspond to the class label with the highest likelihood. /// /// # Predicting a label and obtaining probability estimates. /// @@ -24,31 +24,31 @@ use crate::{errors::Error, svm::problem::Problem}; /// Probabilities are estimated like this: /// /// ``` -/// use ffsvm::*; +/// use ffsvm::{DenseFeatures, DenseSVM, Predict}; /// -/// fn set_features(svm: &DenseSVM, problem: &mut DenseProblem) { +/// fn set_features(svm: &DenseSVM, features: &mut DenseFeatures) { /// // Predicts the value. -/// svm.predict_probability(problem); +/// svm.predict_probability(features); /// } /// ``` /// -/// Predicting probabilities automatically predicts the best label. In addition [`Problem::probabilities`] +/// Predicting probabilities automatically predicts the best label. In addition [`FeatureVector::probabilities`] /// will be updated accordingly. The class labels for each probablity entry can be obtained /// by the SVM's `class_label_for_index` and `class_index_for_label` methods. -pub trait Predict +pub trait Predict where Self: Sync, { - /// Predict a single value for a [`Problem`]. + /// Predict a single value for a [`FeatureVector`]. /// /// The problem needs to have all features set. Once this method returns, - /// the [`Problem::solution`] will be set. - fn predict_value(&self, problem: &mut Problem) -> Result<(), Error>; + /// the [`FeatureVector::label`] will be set. + fn predict_value(&self, problem: &mut FeatureVector) -> Result<(), Error>; /// Predict a probability value for a problem. /// /// The problem needs to have all features set. Once this method returns, - /// both [`Problem::solution`] will be set, and all [`Problem::probabilities`] will + /// both [`FeatureVector::label`] will be set, and all [`FeatureVector::probabilities`] will /// be available accordingly. - fn predict_probability(&self, problem: &mut Problem) -> Result<(), Error>; + fn predict_probability(&self, problem: &mut FeatureVector) -> Result<(), Error>; } diff --git a/src/svm/problem.rs b/src/svm/problem.rs deleted file mode 100644 index 4ae1886..0000000 --- a/src/svm/problem.rs +++ /dev/null @@ -1,192 +0,0 @@ -use std::ops::{Index, IndexMut}; - -use crate::{ - f32s, f64s, - sparse::SparseVector, - svm::{DenseSVM, SparseSVM}, - vectors::Triangular, -}; - -use simd_aligned::{MatrixD, Rows, VectorD}; - -/// Problems produced for [`DenseSVM`]s. -/// -/// Also see [`Problem`] for more methods for this type. -pub type DenseProblem = Problem>; - -/// Problems produced for [`SparseSVM`]s. -/// -/// Also see [`Problem`] for more methods for this type. -pub type SparseProblem = Problem>; - -/// The result of a classification -#[derive(Copy, Debug, Clone, PartialEq)] -pub enum Solution { - /// If classified this will hold the label. - Label(i32), - - /// If regression was performed contains regression result. - Value(f32), - - /// No operation was performed yet. - None, -} - -#[derive(Debug, Clone)] -pub struct Features { - data: V32, -} - -/// A single problem a SVM should classify. -/// -/// # Creating a `Problem` -/// -/// Problems are created via the `Problem::from` method and match the SVM type they were created for: -/// -/// ```rust -/// use ffsvm::*; -/// use std::convert::TryFrom; -/// -/// fn main() -> Result<(), Error> { -/// let svm = DenseSVM::try_from(SAMPLE_MODEL)?; -/// -/// let mut problem = Problem::from(&svm); -/// -/// Ok(()) -/// } -/// ``` -/// -/// # Setting Features -/// -/// A `Problem` is an instance of the SVM's problem domain. Before it can be classified, all `features` need -/// to be set, for example by: -/// -/// ``` -/// use ffsvm::*; -/// -/// fn set_features(problem: &mut DenseProblem) { -/// let features = problem.features(); -/// features[0] = -0.221184; -/// features[3] = 0.135713; -/// } -/// ``` -/// -/// It can then be classified via the [`Predict`](crate::Predict) trait. -#[derive(Debug, Clone)] -pub struct Problem { - /// A vector of all features. - pub(crate) features: Features, - - /// KernelDense values. A vector for each class. - pub(crate) kernel_values: MatrixD, - - /// All votes for a given class label. - pub(crate) vote: Vec, - - /// Decision values. - pub(crate) decision_values: Triangular, - - /// Pairwise probabilities - pub(crate) pairwise: MatrixD, - - /// Needed for multi-class probability estimates replicating libSVM. - pub(crate) q: MatrixD, - - /// Needed for multi-class probability estimates replicating libSVM. - pub(crate) qp: Vec, - - /// Probability estimates that will be updated after this problem was processed - /// by `predict_probability`. - pub(crate) probabilities: VectorD, - - /// Computed label that will be updated after this problem was processed. - pub(crate) result: Solution, -} - -impl Problem { - /// After a [`Problem`](crate::Problem) has been classified, this will hold the SVMs solution. - pub const fn solution(&self) -> Solution { self.result } - - /// Returns the probability estimates. Only really useful if the model was trained with probability estimates and you classified with them. - pub fn probabilities(&self) -> &[f64] { self.probabilities.flat() } - - /// Returns the features. You must set them first and classifiy the problem before you can get a solution. - pub fn features(&mut self) -> &mut Features { &mut self.features } -} - -impl DenseProblem { - /// Creates a new problem with the given parameters. - pub(crate) fn with_dimension(total_sv: usize, num_classes: usize, num_attributes: usize) -> Problem> { - Problem { - features: Features { - data: VectorD::with(0.0, num_attributes), - }, - kernel_values: MatrixD::with_dimension(num_classes, total_sv), - pairwise: MatrixD::with_dimension(num_classes, num_classes), - q: MatrixD::with_dimension(num_classes, num_classes), - qp: vec![Default::default(); num_classes], - decision_values: Triangular::with_dimension(num_classes, Default::default()), - vote: vec![Default::default(); num_classes], - probabilities: VectorD::with(0.0, num_classes), - result: Solution::None, - } - } -} - -impl SparseProblem { - /// Clears the [Problem] when reusing it between calls. Only needed for [SparseSVM] problems. - pub fn clear(&mut self) { self.features.data.clear(); } - - /// Creates a new problem with the given parameters. - pub(crate) fn with_dimension(total_sv: usize, num_classes: usize, _num_attributes: usize) -> Problem> { - Problem { - features: Features { data: SparseVector::new() }, - kernel_values: MatrixD::with_dimension(num_classes, total_sv), - pairwise: MatrixD::with_dimension(num_classes, num_classes), - q: MatrixD::with_dimension(num_classes, num_classes), - qp: vec![Default::default(); num_classes], - decision_values: Triangular::with_dimension(num_classes, Default::default()), - vote: vec![Default::default(); num_classes], - probabilities: VectorD::with(0.0, num_classes), - result: Solution::None, - } - } -} - -impl<'a> From<&'a DenseSVM> for DenseProblem { - fn from(svm: &DenseSVM) -> Self { Problem::>::with_dimension(svm.num_total_sv, svm.classes.len(), svm.num_attributes) } -} - -impl<'a> From<&'a SparseSVM> for SparseProblem { - fn from(svm: &SparseSVM) -> Self { Problem::>::with_dimension(svm.num_total_sv, svm.classes.len(), svm.num_attributes) } -} - -impl Features { - pub const fn as_raw(&self) -> &V32 { &self.data } -} - -impl Features> { - pub fn as_slice_mut(&mut self) -> &mut [f32] { self.data.flat_mut() } -} - -impl Index for Features> // where -{ - type Output = f32; - - fn index(&self, index: usize) -> &f32 { &self.data.flat()[index] } -} - -impl IndexMut for Features> { - fn index_mut(&mut self, index: usize) -> &mut f32 { &mut self.data.flat_mut()[index] } -} - -impl Index for Features> // where -{ - type Output = f32; - - fn index(&self, index: usize) -> &f32 { &self.data[index] } -} - -impl IndexMut for Features> { - fn index_mut(&mut self, index: usize) -> &mut f32 { &mut self.data[index] } -} diff --git a/tests/svm_dense_class.rs b/tests/svm_dense_class.rs index b6d33ef..71c17da 100644 --- a/tests/svm_dense_class.rs +++ b/tests/svm_dense_class.rs @@ -1,7 +1,3 @@ -#![feature(test)] - -extern crate test; - macro_rules! test_model { ($name:ident, $file:expr, $prob:expr, $libsvm:expr, $libsvm_prob:expr) => { #[test] @@ -9,26 +5,26 @@ macro_rules! test_model { let model = include_str!(concat!("data_dense/", $file)); let svm = DenseSVM::try_from(model)?; - let mut problem_0 = Problem::from(&svm); - let features_0 = problem_0.features().as_slice_mut(); + let mut problem_0 = FeatureVector::from(&svm); + let features_0 = problem_0.features(); features_0.clone_from_slice(&[0.000_1, 0.000_1, 0.000_1, 0.000_1, 0.000_1, 0.000_1, 0.000_1, 0.000_1]); - let mut problem_7 = Problem::from(&svm); - let features_7 = problem_7.features().as_slice_mut(); + let mut problem_7 = FeatureVector::from(&svm); + let features_7 = problem_7.features(); features_7.clone_from_slice(&[1.287_784_9, 0.986_031_7, 1.486_247_2, 1.128_083, 0.891_030_55, 1.164_363_4, 0.928_599_1, 1.140_762_9]); svm.predict_value(&mut problem_0)?; svm.predict_value(&mut problem_7)?; - assert_eq!(problem_0.solution(), Solution::Label($libsvm[0]), "predict_value(problem_0)"); - assert_eq!(problem_7.solution(), Solution::Label($libsvm[1]), "predict_value(problem_7)"); + assert_eq!(problem_0.label(), Label::Class($libsvm[0]), "predict_value(problem_0)"); + assert_eq!(problem_7.label(), Label::Class($libsvm[1]), "predict_value(problem_7)"); if $prob { svm.predict_probability(&mut problem_0)?; svm.predict_probability(&mut problem_7)?; - assert_eq!(problem_0.solution(), Solution::Label($libsvm_prob[0]), "predict_probability(problem_0)"); - assert_eq!(problem_7.solution(), Solution::Label($libsvm_prob[1]), "predict_probability(problem_7)"); + assert_eq!(problem_0.label(), Label::Class($libsvm_prob[0]), "predict_probability(problem_0)"); + assert_eq!(problem_7.label(), Label::Class($libsvm_prob[1]), "predict_probability(problem_7)"); } Ok(()) @@ -38,7 +34,7 @@ macro_rules! test_model { #[cfg(test)] mod svm_dense_class { - use ffsvm::{DenseSVM, Error, Predict, Problem, Solution}; + use ffsvm::{DenseSVM, Error, FeatureVector, Label, Predict}; use std::convert::TryFrom; // CSVM diff --git a/tests/svm_dense_regression.rs b/tests/svm_dense_regression.rs index 4a5072a..997cd14 100644 --- a/tests/svm_dense_regression.rs +++ b/tests/svm_dense_regression.rs @@ -1,10 +1,8 @@ -#![feature(test)] +use ffsvm::Label; -use ffsvm::Solution; - -fn similar(a: Solution, b: Solution) -> bool { +fn similar(a: Label, b: Label) -> bool { match (a, b) { - (Solution::Value(a), Solution::Value(b)) => (a - b).abs() < 0.001 * ((a + b) / 2.0), + (Label::Value(a), Label::Value(b)) => (a - b).abs() < 0.001 * ((a + b) / 2.0), _ => false, } } @@ -16,26 +14,26 @@ macro_rules! test_model { let model = include_str!(concat!("data_dense/", $file)); let svm = DenseSVM::try_from(model)?; - let mut problem_0 = Problem::from(&svm); - let features_0 = problem_0.features().as_slice_mut(); + let mut problem_0 = FeatureVector::from(&svm); + let features_0 = problem_0.features(); features_0.clone_from_slice(&[0.000_1, 0.000_1, 0.000_1, 0.000_1, 0.000_1, 0.000_1, 0.000_1, 0.000_1]); - let mut problem_7 = Problem::from(&svm); - let features_7 = problem_7.features().as_slice_mut(); + let mut problem_7 = FeatureVector::from(&svm); + let features_7 = problem_7.features(); features_7.clone_from_slice(&[1.287_784_9, 0.986_031_7, 1.486_247_2, 1.128_083, 0.891_030_55, 1.164_363_4, 0.928_599_1, 1.140_762_9]); svm.predict_value(&mut problem_0)?; svm.predict_value(&mut problem_7)?; - assert!(similar(problem_0.solution(), Solution::Value($libsvm[0]))); - assert!(similar(problem_7.solution(), Solution::Value($libsvm[1]))); + assert!(similar(problem_0.label(), Label::Value($libsvm[0]))); + assert!(similar(problem_7.label(), Label::Value($libsvm[1]))); if $prob { svm.predict_probability(&mut problem_0)?; svm.predict_probability(&mut problem_7)?; - assert!(similar(problem_0.solution(), Solution::Value($libsvm_prob[0]))); - assert!(similar(problem_7.solution(), Solution::Value($libsvm_prob[1]))); + assert!(similar(problem_0.label(), Label::Value($libsvm_prob[0]))); + assert!(similar(problem_7.label(), Label::Value($libsvm_prob[1]))); } Ok(()) @@ -46,7 +44,7 @@ macro_rules! test_model { #[cfg(test)] mod svm_dense_regression { use super::similar; - use ffsvm::{DenseSVM, Error, Predict, Problem, Solution}; + use ffsvm::{DenseSVM, Error, FeatureVector, Label, Predict}; use std::convert::TryFrom; // E-SVR @@ -70,5 +68,4 @@ mod svm_dense_regression { test_model!(m_nu_svr_poly, "m_nu_svr_poly.libsvm", false, [2.18783, 6.55455], [0.0, 0.0]); test_model!(m_nu_svr_rbf, "m_nu_svr_rbf.libsvm", false, [0.653_419, 6.49803], [0.0, 0.0]); test_model!(m_nu_svr_sigmoid, "m_nu_svr_sigmoid.libsvm", false, [0.396_866, 5.52985], [0.0, 0.0]); - } diff --git a/tests/svm_parsing.rs b/tests/svm_parsing.rs index 1ca9b94..f0c5f2d 100644 --- a/tests/svm_parsing.rs +++ b/tests/svm_parsing.rs @@ -1,7 +1,3 @@ -#![feature(test)] - -extern crate test; - #[cfg(test)] mod svm_performance { use ffsvm::{DenseSVM, Error}; diff --git a/tests/svm_sparse_class.rs b/tests/svm_sparse_class.rs index e453f78..5ad3e22 100644 --- a/tests/svm_sparse_class.rs +++ b/tests/svm_sparse_class.rs @@ -1,7 +1,3 @@ -#![feature(test)] - -extern crate test; - macro_rules! test_model { ($name:ident, $file:expr, $prob:expr, $libsvm:expr, $libsvm_prob:expr) => { #[test] @@ -9,7 +5,7 @@ macro_rules! test_model { let model = include_str!(concat!("data_sparse/", $file)); let svm = SparseSVM::try_from(model)?; - let mut problem_0 = Problem::from(&svm); + let mut problem_0 = FeatureVector::from(&svm); let features_0 = problem_0.features(); features_0[3] = 0.000_1; features_0[4] = 0.000_1; @@ -24,7 +20,7 @@ macro_rules! test_model { features_0[123] = 0.000_1; features_0[127] = 0.000_1; - let mut problem_7 = Problem::from(&svm); + let mut problem_7 = FeatureVector::from(&svm); let features_7 = problem_7.features(); features_7[3] = 0.930_907_6; features_7[4] = 1.264_398_9; @@ -41,15 +37,15 @@ macro_rules! test_model { svm.predict_value(&mut problem_0)?; svm.predict_value(&mut problem_7)?; - assert_eq!(problem_0.solution(), Solution::Label($libsvm[0]), "predict_value(problem_0)"); - assert_eq!(problem_7.solution(), Solution::Label($libsvm[1]), "predict_value(problem_7)"); + assert_eq!(problem_0.label(), Label::Class($libsvm[0]), "predict_value(problem_0)"); + assert_eq!(problem_7.label(), Label::Class($libsvm[1]), "predict_value(problem_7)"); if $prob { svm.predict_probability(&mut problem_0)?; svm.predict_probability(&mut problem_7)?; - assert_eq!(problem_0.solution(), Solution::Label($libsvm_prob[0]), "predict_probability(problem_0)"); - assert_eq!(problem_7.solution(), Solution::Label($libsvm_prob[1]), "predict_probability(problem_7)"); + assert_eq!(problem_0.label(), Label::Class($libsvm_prob[0]), "predict_probability(problem_0)"); + assert_eq!(problem_7.label(), Label::Class($libsvm_prob[1]), "predict_probability(problem_7)"); } Ok(()) @@ -59,7 +55,7 @@ macro_rules! test_model { #[cfg(test)] mod svm_sparse_class { - use ffsvm::{Error, Predict, Problem, Solution, SparseSVM}; + use ffsvm::{Error, FeatureVector, Label, Predict, SparseSVM}; use std::convert::TryFrom; // CSVM