mirror of
https://github.com/rmontanana/mdlp.git
synced 2025-08-17 16:35:57 +00:00
Compare commits
13 Commits
v2.0.1
...
c1759ba1ce
Author | SHA1 | Date | |
---|---|---|---|
c1759ba1ce
|
|||
f1dae498ac
|
|||
4418ea8a6f
|
|||
159e24b5cb
|
|||
77e28e728e
|
|||
18db982dec
|
|||
99b751a4d4
|
|||
059fd33b4e
|
|||
e068bf0a54
|
|||
|
cfb993f5ec | ||
7d62d6af4a
|
|||
ea70535984
|
|||
2d8b949abd
|
11
.conan/profiles/default
Normal file
11
.conan/profiles/default
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
[settings]
|
||||||
|
os=Linux
|
||||||
|
arch=x86_64
|
||||||
|
compiler=gcc
|
||||||
|
compiler.version=11
|
||||||
|
compiler.libcxx=libstdc++11
|
||||||
|
build_type=Release
|
||||||
|
|
||||||
|
[conf]
|
||||||
|
tools.system.package_manager:mode=install
|
||||||
|
tools.system.package_manager:sudo=True
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,3 +0,0 @@
|
|||||||
[submodule "tests/lib/Files"]
|
|
||||||
path = tests/lib/Files
|
|
||||||
url = https://github.com/rmontanana/ArffFiles.git
|
|
6
.vscode/settings.json
vendored
6
.vscode/settings.json
vendored
@@ -104,6 +104,10 @@
|
|||||||
"stop_token": "cpp",
|
"stop_token": "cpp",
|
||||||
"text_encoding": "cpp",
|
"text_encoding": "cpp",
|
||||||
"typeindex": "cpp",
|
"typeindex": "cpp",
|
||||||
"valarray": "cpp"
|
"valarray": "cpp",
|
||||||
|
"csignal": "cpp",
|
||||||
|
"regex": "cpp",
|
||||||
|
"future": "cpp",
|
||||||
|
"shared_mutex": "cpp"
|
||||||
}
|
}
|
||||||
}
|
}
|
214
CHANGELOG.md
Normal file
214
CHANGELOG.md
Normal file
@@ -0,0 +1,214 @@
|
|||||||
|
# Changelog
|
||||||
|
|
||||||
|
All notable changes to this project will be documented in this file.
|
||||||
|
|
||||||
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||||
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||||
|
|
||||||
|
## [2.1.0] - 2025-06-28
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Conan dependency manager support
|
||||||
|
- Technical analysis report
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Updated README.md
|
||||||
|
- Refactored library version and installation system
|
||||||
|
- Updated config variable names
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Removed unneeded semicolon
|
||||||
|
|
||||||
|
## [2.0.1] - 2024-07-22
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- CMake install target and make install command
|
||||||
|
- Flag to control sample building in Makefile
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Library name changed to `fimdlp`
|
||||||
|
- Updated version numbers across test files
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Version number consistency in tests
|
||||||
|
|
||||||
|
## [2.0.0] - 2024-07-04
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Makefile with build & test actions for easier development
|
||||||
|
- PyTorch (libtorch) integration for tensor operations
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Major refactoring of build system
|
||||||
|
- Updated build workflows and CI configuration
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- BinDisc quantile calculation errors (#9)
|
||||||
|
- Error in percentile method calculation
|
||||||
|
- Integer type issues in calculations
|
||||||
|
- Multiple GitHub Actions configuration fixes
|
||||||
|
|
||||||
|
## [1.2.1] - 2024-06-08
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- PyTorch tensor methods for discretization
|
||||||
|
- Improved library build system
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Refactored sample build process
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Library creation and linking issues
|
||||||
|
- Multiple GitHub Actions workflow fixes
|
||||||
|
|
||||||
|
## [1.2.0] - 2024-06-05
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- **Discretizer** - Abstract base class for all discretization algorithms (#8)
|
||||||
|
- **BinDisc** - K-bins discretization with quantile and uniform strategies (#7)
|
||||||
|
- Transform method to discretize values using existing cut points
|
||||||
|
- Support for multiple datasets in sample program
|
||||||
|
- Docker development container configuration
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Refactored system types throughout the library
|
||||||
|
- Improved sample program with better dataset handling
|
||||||
|
- Enhanced build system with debug options
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Transform method initialization issues
|
||||||
|
- ARFF file attribute name extraction
|
||||||
|
- Sample program library binary separation
|
||||||
|
|
||||||
|
## [1.1.3] - 2024-06-05
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- `max_cutpoints` hyperparameter for controlling algorithm complexity
|
||||||
|
- `max_depth` and `min_length` as configurable hyperparameters
|
||||||
|
- Enhanced sample program with hyperparameter support
|
||||||
|
- Additional datasets for testing
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Improved constructor design and parameter handling
|
||||||
|
- Enhanced test coverage and reporting
|
||||||
|
- Refactored build system configuration
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Depth initialization in fit method
|
||||||
|
- Code quality improvements and smell fixes
|
||||||
|
- Exception handling in value cut point calculations
|
||||||
|
|
||||||
|
## [1.1.2] - 2023-04-01
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Comprehensive test suite with GitHub Actions CI
|
||||||
|
- SonarCloud integration for code quality analysis
|
||||||
|
- Enhanced build system with automated testing
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Improved GitHub Actions workflow configuration
|
||||||
|
- Updated project structure for better maintainability
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Build system configuration issues
|
||||||
|
- Test execution and coverage reporting
|
||||||
|
|
||||||
|
## [1.1.1] - 2023-02-22
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Limits header for proper compilation
|
||||||
|
- Enhanced build system support
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Updated version numbering system
|
||||||
|
- Improved SonarCloud configuration
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- ValueCutPoint exception handling (removed unnecessary exception)
|
||||||
|
- Build system compatibility issues
|
||||||
|
- GitHub Actions token configuration
|
||||||
|
|
||||||
|
## [1.1.0] - 2023-02-21
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Classic algorithm implementation for performance comparison
|
||||||
|
- Enhanced ValueCutPoint logic with same_values detection
|
||||||
|
- Glass dataset support in sample program
|
||||||
|
- Debug configuration for development
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Refactored ValueCutPoint algorithm for better accuracy
|
||||||
|
- Improved candidate selection logic
|
||||||
|
- Enhanced sample program with multiple datasets
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Sign error in valueCutPoint calculation
|
||||||
|
- Final cut value computation
|
||||||
|
- Duplicate dataset handling in sample
|
||||||
|
|
||||||
|
## [1.0.0.0] - 2022-12-21
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Initial release of MDLP (Minimum Description Length Principle) discretization library
|
||||||
|
- Core CPPFImdlp algorithm implementation based on Fayyad & Irani's paper
|
||||||
|
- Entropy and information gain calculation methods
|
||||||
|
- Sample program demonstrating library usage
|
||||||
|
- CMake build system
|
||||||
|
- Basic test suite
|
||||||
|
- ARFF file format support for datasets
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- Recursive discretization using entropy-based criteria
|
||||||
|
- Stable sorting with tie-breaking for identical values
|
||||||
|
- Configurable algorithm parameters
|
||||||
|
- Cross-platform C++ implementation
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Release Notes
|
||||||
|
|
||||||
|
### Version 2.x
|
||||||
|
|
||||||
|
- **Breaking Changes**: Library renamed to `fimdlp`
|
||||||
|
- **Major Enhancement**: PyTorch integration for improved performance
|
||||||
|
- **New Features**: Comprehensive discretization framework with multiple algorithms
|
||||||
|
|
||||||
|
### Version 1.x
|
||||||
|
|
||||||
|
- **Core Algorithm**: MDLP discretization implementation
|
||||||
|
- **Extensibility**: Hyperparameter support and algorithm variants
|
||||||
|
- **Quality**: Comprehensive testing and CI/CD pipeline
|
||||||
|
|
||||||
|
### Version 1.0.x
|
||||||
|
|
||||||
|
- **Foundation**: Initial stable implementation
|
||||||
|
- **Algorithm**: Core MDLP discretization functionality
|
77
CLAUDE.md
Normal file
77
CLAUDE.md
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
# CLAUDE.md
|
||||||
|
|
||||||
|
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||||
|
|
||||||
|
## Project Overview
|
||||||
|
|
||||||
|
This is a C++ implementation of the MDLP (Minimum Description Length Principle) discretization algorithm based on Fayyad & Irani's paper. The library provides discretization methods for continuous-valued attributes in classification learning.
|
||||||
|
|
||||||
|
## Build System
|
||||||
|
|
||||||
|
The project uses CMake with a Makefile wrapper for common tasks:
|
||||||
|
|
||||||
|
### Common Commands
|
||||||
|
- `make build` - Build release version with sample program
|
||||||
|
- `make test` - Run full test suite with coverage report
|
||||||
|
- `make install` - Install the library
|
||||||
|
|
||||||
|
### Build Configurations
|
||||||
|
- **Release**: Built in `build_release/` directory
|
||||||
|
- **Debug**: Built in `build_debug/` directory (for testing)
|
||||||
|
|
||||||
|
### Dependencies
|
||||||
|
- PyTorch (libtorch) - Required dependency
|
||||||
|
- GoogleTest - Fetched automatically for testing
|
||||||
|
- Coverage tools: lcov, genhtml
|
||||||
|
|
||||||
|
## Code Architecture
|
||||||
|
|
||||||
|
### Core Components
|
||||||
|
|
||||||
|
1. **Discretizer** (`src/Discretizer.h/cpp`) - Abstract base class for all discretizers
|
||||||
|
2. **CPPFImdlp** (`src/CPPFImdlp.h/cpp`) - Main MDLP algorithm implementation
|
||||||
|
3. **BinDisc** (`src/BinDisc.h/cpp`) - K-bins discretization (quantile/uniform strategies)
|
||||||
|
4. **Metrics** (`src/Metrics.h/cpp`) - Entropy and information gain calculations
|
||||||
|
|
||||||
|
### Key Data Types
|
||||||
|
- `samples_t` - Input data samples
|
||||||
|
- `labels_t` - Classification labels
|
||||||
|
- `indices_t` - Index arrays for sorting/processing
|
||||||
|
- `precision_t` - Floating-point precision type
|
||||||
|
|
||||||
|
### Algorithm Flow
|
||||||
|
1. Data is sorted using labels as tie-breakers for identical values
|
||||||
|
2. MDLP recursively finds optimal cut points using entropy-based criteria
|
||||||
|
3. Cut points are validated to ensure meaningful splits
|
||||||
|
4. Transform method maps continuous values to discrete bins
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
Tests are built with GoogleTest and include:
|
||||||
|
- `Metrics_unittest` - Entropy/information gain tests
|
||||||
|
- `FImdlp_unittest` - Core MDLP algorithm tests
|
||||||
|
- `BinDisc_unittest` - K-bins discretization tests
|
||||||
|
- `Discretizer_unittest` - Base class functionality tests
|
||||||
|
|
||||||
|
### Running Tests
|
||||||
|
```bash
|
||||||
|
make test # Runs all tests and generates coverage report
|
||||||
|
cd build_debug/tests && ctest # Run tests directly
|
||||||
|
```
|
||||||
|
|
||||||
|
Coverage reports are generated at `build_debug/tests/coverage/index.html`.
|
||||||
|
|
||||||
|
## Sample Usage
|
||||||
|
|
||||||
|
The sample program demonstrates basic usage:
|
||||||
|
```bash
|
||||||
|
build_release/sample/sample -f iris -m 2
|
||||||
|
```
|
||||||
|
|
||||||
|
## Development Notes
|
||||||
|
|
||||||
|
- The library uses PyTorch tensors for efficient numerical operations
|
||||||
|
- Code follows C++17 standards
|
||||||
|
- Coverage is maintained at 100%
|
||||||
|
- The implementation handles edge cases like duplicate values and small intervals
|
||||||
|
- Conan package manager support is available via `conanfile.py`
|
@@ -1,44 +1,80 @@
|
|||||||
cmake_minimum_required(VERSION 3.20)
|
cmake_minimum_required(VERSION 3.20)
|
||||||
|
|
||||||
project(fimdlp)
|
project(fimdlp
|
||||||
|
LANGUAGES CXX
|
||||||
|
DESCRIPTION "Discretization algorithm based on the paper by Fayyad & Irani Multi-Interval Discretization of Continuous-Valued Attributes for Classification Learning."
|
||||||
|
HOMEPAGE_URL "https://github.com/rmontanana/mdlp"
|
||||||
|
VERSION 2.1.0
|
||||||
|
)
|
||||||
set(CMAKE_CXX_STANDARD 17)
|
set(CMAKE_CXX_STANDARD 17)
|
||||||
cmake_policy(SET CMP0135 NEW)
|
cmake_policy(SET CMP0135 NEW)
|
||||||
|
|
||||||
find_package(Torch CONFIG REQUIRED)
|
# Find dependencies
|
||||||
|
find_package(Torch REQUIRED)
|
||||||
|
|
||||||
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-elide-constructors")
|
# Options
|
||||||
|
# -------
|
||||||
|
option(ENABLE_TESTING OFF)
|
||||||
|
option(ENABLE_SAMPLE OFF)
|
||||||
|
option(COVERAGE OFF)
|
||||||
|
|
||||||
|
add_subdirectory(config)
|
||||||
|
|
||||||
|
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-elide-constructors")
|
||||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3")
|
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3")
|
||||||
if (NOT ${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
if (NOT ${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
||||||
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-default-inline")
|
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-default-inline")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (ENABLE_TESTING)
|
if (ENABLE_TESTING)
|
||||||
MESSAGE("Debug mode")
|
message("Debug mode")
|
||||||
|
|
||||||
enable_testing()
|
enable_testing()
|
||||||
set(CODE_COVERAGE ON)
|
set(CODE_COVERAGE ON)
|
||||||
SET(GCC_COVERAGE_LINK_FLAGS " ${GCC_COVERAGE_LINK_FLAGS} -lgcov --coverage")
|
set(GCC_COVERAGE_LINK_FLAGS "${GCC_COVERAGE_LINK_FLAGS} -lgcov --coverage")
|
||||||
add_subdirectory(tests)
|
add_subdirectory(tests)
|
||||||
else(ENABLE_TESTING)
|
else()
|
||||||
MESSAGE("Release mode")
|
message("Release mode")
|
||||||
endif(ENABLE_TESTING)
|
endif()
|
||||||
|
|
||||||
if (ENABLE_SAMPLE)
|
if (ENABLE_SAMPLE)
|
||||||
message("Building sample")
|
message("Building sample")
|
||||||
add_subdirectory(sample)
|
add_subdirectory(sample)
|
||||||
endif(ENABLE_SAMPLE)
|
endif()
|
||||||
|
|
||||||
include_directories(
|
include_directories(
|
||||||
${TORCH_INCLUDE_DIRS}
|
|
||||||
${fimdlp_SOURCE_DIR}/src
|
${fimdlp_SOURCE_DIR}/src
|
||||||
|
${CMAKE_BINARY_DIR}/configured_files/include
|
||||||
)
|
)
|
||||||
|
|
||||||
add_library(fimdlp src/CPPFImdlp.cpp src/Metrics.cpp src/BinDisc.cpp src/Discretizer.cpp)
|
add_library(fimdlp src/CPPFImdlp.cpp src/Metrics.cpp src/BinDisc.cpp src/Discretizer.cpp)
|
||||||
target_link_libraries(fimdlp "${TORCH_LIBRARIES}")
|
target_link_libraries(fimdlp PRIVATE torch::torch)
|
||||||
|
|
||||||
# Installation
|
# Installation
|
||||||
# ------------
|
# ------------
|
||||||
|
include(CMakePackageConfigHelpers)
|
||||||
|
write_basic_package_version_file(
|
||||||
|
"${CMAKE_CURRENT_BINARY_DIR}/fimdlpConfigVersion.cmake"
|
||||||
|
VERSION ${PROJECT_VERSION}
|
||||||
|
COMPATIBILITY AnyNewerVersion
|
||||||
|
)
|
||||||
|
|
||||||
install(TARGETS fimdlp
|
install(TARGETS fimdlp
|
||||||
|
EXPORT fimdlpTargets
|
||||||
ARCHIVE DESTINATION lib
|
ARCHIVE DESTINATION lib
|
||||||
LIBRARY DESTINATION lib
|
LIBRARY DESTINATION lib
|
||||||
CONFIGURATIONS Release)
|
CONFIGURATIONS Release)
|
||||||
|
|
||||||
install(DIRECTORY src/ DESTINATION include/fimdlp FILES_MATCHING CONFIGURATIONS Release PATTERN "*.h")
|
install(DIRECTORY src/ DESTINATION include/fimdlp FILES_MATCHING CONFIGURATIONS Release PATTERN "*.h")
|
||||||
|
install(FILES ${CMAKE_BINARY_DIR}/configured_files/include/config.h DESTINATION include/fimdlp CONFIGURATIONS Release)
|
||||||
|
|
||||||
|
install(EXPORT fimdlpTargets
|
||||||
|
FILE fimdlpTargets.cmake
|
||||||
|
NAMESPACE fimdlp::
|
||||||
|
DESTINATION lib/cmake/fimdlp)
|
||||||
|
|
||||||
|
configure_file(fimdlpConfig.cmake.in "${CMAKE_CURRENT_BINARY_DIR}/fimdlpConfig.cmake" @ONLY)
|
||||||
|
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/fimdlpConfig.cmake"
|
||||||
|
"${CMAKE_CURRENT_BINARY_DIR}/fimdlpConfigVersion.cmake"
|
||||||
|
DESTINATION lib/cmake/fimdlp)
|
||||||
|
|
||||||
|
11
CMakeUserPresets.json
Normal file
11
CMakeUserPresets.json
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
{
|
||||||
|
"version": 4,
|
||||||
|
"vendor": {
|
||||||
|
"conan": {}
|
||||||
|
},
|
||||||
|
"include": [
|
||||||
|
"build_release/build/Release/generators/CMakePresets.json",
|
||||||
|
"build_debug/build/Debug/generators/CMakePresets.json",
|
||||||
|
"build/Release/generators/CMakePresets.json"
|
||||||
|
]
|
||||||
|
}
|
153
CONAN_README.md
Normal file
153
CONAN_README.md
Normal file
@@ -0,0 +1,153 @@
|
|||||||
|
# Conan Package for fimdlp
|
||||||
|
|
||||||
|
This directory contains the Conan package configuration for the fimdlp library.
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
|
||||||
|
The package manages the following dependencies:
|
||||||
|
|
||||||
|
### Build Requirements
|
||||||
|
- **libtorch/2.4.1** - PyTorch C++ library for tensor operations
|
||||||
|
|
||||||
|
### Test Requirements (when testing enabled)
|
||||||
|
- **catch2/3.8.1** - Modern C++ testing framework
|
||||||
|
- **arff-files** - ARFF file format support (included locally in tests/lib/Files/)
|
||||||
|
|
||||||
|
## Building with Conan
|
||||||
|
|
||||||
|
### 1. Install Dependencies and Build
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install dependencies
|
||||||
|
conan install . --output-folder=build --build=missing
|
||||||
|
|
||||||
|
# Build the project
|
||||||
|
cd build
|
||||||
|
cmake .. -DCMAKE_TOOLCHAIN_FILE=conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release
|
||||||
|
cmake --build .
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Using the Build Script
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build release version
|
||||||
|
./scripts/build_conan.sh
|
||||||
|
|
||||||
|
# Build with tests
|
||||||
|
./scripts/build_conan.sh --test
|
||||||
|
```
|
||||||
|
|
||||||
|
## Creating a Package
|
||||||
|
|
||||||
|
### 1. Create Package Locally
|
||||||
|
|
||||||
|
```bash
|
||||||
|
conan create . --profile:build=default --profile:host=default
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Create Package with Options
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create with testing enabled
|
||||||
|
conan create . -o enable_testing=True --profile:build=default --profile:host=default
|
||||||
|
|
||||||
|
# Create shared library version
|
||||||
|
conan create . -o shared=True --profile:build=default --profile:host=default
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Using the Package Creation Script
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/create_package.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## Uploading to Cimmeria
|
||||||
|
|
||||||
|
### 1. Configure Remote
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Add Cimmeria remote
|
||||||
|
conan remote add cimmeria <cimmeria-server-url>
|
||||||
|
|
||||||
|
# Login to Cimmeria
|
||||||
|
conan remote login cimmeria <username>
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Upload Package
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Upload the package
|
||||||
|
conan upload fimdlp/2.1.0 --remote=cimmeria --all
|
||||||
|
|
||||||
|
# Or use the script (will configure remote instructions if not set up)
|
||||||
|
./scripts/create_package.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## Using the Package
|
||||||
|
|
||||||
|
### In conanfile.txt
|
||||||
|
|
||||||
|
```ini
|
||||||
|
[requires]
|
||||||
|
fimdlp/2.1.0
|
||||||
|
|
||||||
|
[generators]
|
||||||
|
CMakeDeps
|
||||||
|
CMakeToolchain
|
||||||
|
```
|
||||||
|
|
||||||
|
### In conanfile.py
|
||||||
|
|
||||||
|
```python
|
||||||
|
def requirements(self):
|
||||||
|
self.requires("fimdlp/2.1.0")
|
||||||
|
```
|
||||||
|
|
||||||
|
### In CMakeLists.txt
|
||||||
|
|
||||||
|
```cmake
|
||||||
|
find_package(fimdlp REQUIRED)
|
||||||
|
target_link_libraries(your_target fimdlp::fimdlp)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Package Options
|
||||||
|
|
||||||
|
| Option | Values | Default | Description |
|
||||||
|
|--------|--------|---------|-------------|
|
||||||
|
| shared | True/False | False | Build shared library |
|
||||||
|
| fPIC | True/False | True | Position independent code |
|
||||||
|
| enable_testing | True/False | False | Enable test suite |
|
||||||
|
| enable_sample | True/False | False | Build sample program |
|
||||||
|
|
||||||
|
## Example Usage
|
||||||
|
|
||||||
|
```cpp
|
||||||
|
#include <fimdlp/CPPFImdlp.h>
|
||||||
|
#include <fimdlp/Metrics.h>
|
||||||
|
|
||||||
|
int main() {
|
||||||
|
// Create MDLP discretizer
|
||||||
|
CPPFImdlp discretizer;
|
||||||
|
|
||||||
|
// Calculate entropy
|
||||||
|
Metrics metrics;
|
||||||
|
std::vector<int> labels = {0, 1, 0, 1, 1};
|
||||||
|
double entropy = metrics.entropy(labels);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
The package includes comprehensive tests that can be enabled with:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
conan create . -o enable_testing=True
|
||||||
|
```
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- C++17 compatible compiler
|
||||||
|
- CMake 3.20 or later
|
||||||
|
- Conan 2.0 or later
|
43
Makefile
43
Makefile
@@ -1,35 +1,42 @@
|
|||||||
SHELL := /bin/bash
|
SHELL := /bin/bash
|
||||||
.DEFAULT_GOAL := build
|
.DEFAULT_GOAL := build
|
||||||
.PHONY: build test
|
.PHONY: build install test
|
||||||
lcov := lcov
|
lcov := lcov
|
||||||
|
|
||||||
build:
|
f_debug = build_debug
|
||||||
@if [ -d build_release ]; then rm -fr build_release; fi
|
f_release = build_release
|
||||||
@mkdir build_release
|
|
||||||
@cmake -B build_release -S . -DCMAKE_BUILD_TYPE=Release -DENABLE_TESTING=OFF -DENABLE_SAMPLE=ON
|
|
||||||
@cmake --build build_release -j 8
|
|
||||||
|
|
||||||
install:
|
build: ## Build the project for Release
|
||||||
|
@echo ">>> Building the project for Release..."
|
||||||
|
@if [ -d $(f_release) ]; then rm -fr $(f_release); fi
|
||||||
|
@conan install . --build=missing -of $(f_release) -s build_type=Release --profile:build=default --profile:host=default
|
||||||
|
cmake -S . -B $(f_release) -DCMAKE_TOOLCHAIN_FILE=$(f_release)/build/Release/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release -DENABLE_TESTING=OFF -DENABLE_SAMPLE=OFF
|
||||||
|
@cmake --build $(f_release) -j 8
|
||||||
|
|
||||||
|
install: ## Install the project
|
||||||
|
@echo ">>> Installing the project..."
|
||||||
@cmake --build build_release --target install -j 8
|
@cmake --build build_release --target install -j 8
|
||||||
|
|
||||||
test:
|
test: ## Build Debug version and run tests
|
||||||
@if [ -d build_debug ]; then rm -fr build_debug; fi
|
@echo ">>> Building Debug version and running tests..."
|
||||||
@mkdir build_debug
|
@if [ -d $(f_debug) ]; then rm -fr $(f_debug); fi
|
||||||
@cmake -B build_debug -S . -DCMAKE_BUILD_TYPE=Debug -DENABLE_TESTING=ON -DENABLE_SAMPLE=ON
|
@conan install . --build=missing -of $(f_debug) -s build_type=Debug
|
||||||
@cmake --build build_debug -j 8
|
@cmake -B $(f_debug) -S . -DCMAKE_BUILD_TYPE=Debug -DCMAKE_TOOLCHAIN_FILE=$(f_debug)/build/Debug/generators/conan_toolchain.cmake -DENABLE_TESTING=ON -DENABLE_SAMPLE=ON
|
||||||
@cd build_debug/tests && ctest --output-on-failure -j 8
|
@cmake --build $(f_debug) -j 8
|
||||||
@cd build_debug/tests && $(lcov) --capture --directory ../ --demangle-cpp --ignore-errors source,source --ignore-errors mismatch --output-file coverage.info >/dev/null 2>&1; \
|
@cp -r tests/datasets $(f_debug)/tests/datasets
|
||||||
|
@cd $(f_debug)/tests && ctest --output-on-failure -j 8
|
||||||
|
@cd $(f_debug)/tests && $(lcov) --capture --directory ../ --demangle-cpp --ignore-errors source,source --ignore-errors mismatch --output-file coverage.info >/dev/null 2>&1; \
|
||||||
$(lcov) --remove coverage.info '/usr/*' --output-file coverage.info >/dev/null 2>&1; \
|
$(lcov) --remove coverage.info '/usr/*' --output-file coverage.info >/dev/null 2>&1; \
|
||||||
$(lcov) --remove coverage.info 'lib/*' --output-file coverage.info >/dev/null 2>&1; \
|
$(lcov) --remove coverage.info 'lib/*' --output-file coverage.info >/dev/null 2>&1; \
|
||||||
$(lcov) --remove coverage.info 'libtorch/*' --output-file coverage.info >/dev/null 2>&1; \
|
$(lcov) --remove coverage.info 'libtorch/*' --output-file coverage.info >/dev/null 2>&1; \
|
||||||
$(lcov) --remove coverage.info 'tests/*' --output-file coverage.info >/dev/null 2>&1; \
|
$(lcov) --remove coverage.info 'tests/*' --output-file coverage.info >/dev/null 2>&1; \
|
||||||
$(lcov) --remove coverage.info 'gtest/*' --output-file coverage.info >/dev/null 2>&1;
|
$(lcov) --remove coverage.info 'gtest/*' --output-file coverage.info >/dev/null 2>&1;
|
||||||
@genhtml build_debug/tests/coverage.info --demangle-cpp --output-directory build_debug/tests/coverage --title "Discretizer mdlp Coverage Report" -s -k -f --legend
|
@genhtml $(f_debug)/tests/coverage.info --demangle-cpp --output-directory $(f_debug)/tests/coverage --title "Discretizer mdlp Coverage Report" -s -k -f --legend
|
||||||
@echo "* Coverage report is generated at build_debug/tests/coverage/index.html"
|
@echo "* Coverage report is generated at $(f_debug)/tests/coverage/index.html"
|
||||||
@which python || (echo ">>> Please install python"; exit 1)
|
@which python || (echo ">>> Please install python"; exit 1)
|
||||||
@if [ ! -f build_debug/tests/coverage.info ]; then \
|
@if [ ! -f $(f_debug)/tests/coverage.info ]; then \
|
||||||
echo ">>> No coverage.info file found!"; \
|
echo ">>> No coverage.info file found!"; \
|
||||||
exit 1; \
|
exit 1; \
|
||||||
fi
|
fi
|
||||||
@echo ">>> Updating coverage badge..."
|
@echo ">>> Updating coverage badge..."
|
||||||
@env python update_coverage.py build_debug/tests
|
@env python update_coverage.py $(f_debug)/tests
|
@@ -2,6 +2,8 @@
|
|||||||
[](https://sonarcloud.io/summary/new_code?id=rmontanana_mdlp)
|
[](https://sonarcloud.io/summary/new_code?id=rmontanana_mdlp)
|
||||||
[](https://sonarcloud.io/summary/new_code?id=rmontanana_mdlp)
|
[](https://sonarcloud.io/summary/new_code?id=rmontanana_mdlp)
|
||||||
[](html/index.html)
|
[](html/index.html)
|
||||||
|
[](https://deepwiki.com/rmontanana/mdlp)
|
||||||
|
[](https://doi.org/10.5281/zenodo.14245443)
|
||||||
|
|
||||||
# <img src="logo.png" alt="logo" width="50"/> mdlp
|
# <img src="logo.png" alt="logo" width="50"/> mdlp
|
||||||
|
|
||||||
|
525
TECHNICAL_ANALYSIS_REPORT.md
Normal file
525
TECHNICAL_ANALYSIS_REPORT.md
Normal file
@@ -0,0 +1,525 @@
|
|||||||
|
# Technical Analysis Report: MDLP Discretization Library
|
||||||
|
|
||||||
|
## Executive Summary
|
||||||
|
|
||||||
|
This document presents a comprehensive technical analysis of the MDLP (Minimum Description Length Principle) discretization library. The analysis covers project structure, code quality, architecture, testing methodology, documentation, and security assessment.
|
||||||
|
|
||||||
|
**Overall Rating: B+ (Good with Notable Issues)**
|
||||||
|
|
||||||
|
The library demonstrates solid software engineering practices with excellent test coverage and clean architectural design, but contains several security vulnerabilities and code quality issues that require attention before production deployment.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Table of Contents
|
||||||
|
|
||||||
|
1. [Project Overview](#project-overview)
|
||||||
|
2. [Architecture & Design Analysis](#architecture--design-analysis)
|
||||||
|
3. [Code Quality Assessment](#code-quality-assessment)
|
||||||
|
4. [Testing Framework Analysis](#testing-framework-analysis)
|
||||||
|
5. [Security Analysis](#security-analysis)
|
||||||
|
6. [Documentation & Maintainability](#documentation--maintainability)
|
||||||
|
7. [Build System Evaluation](#build-system-evaluation)
|
||||||
|
8. [Strengths & Weaknesses Summary](#strengths--weaknesses-summary)
|
||||||
|
9. [Recommendations](#recommendations)
|
||||||
|
10. [Risk Assessment](#risk-assessment)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Project Overview
|
||||||
|
|
||||||
|
### Description
|
||||||
|
The MDLP discretization library is a C++ implementation of Fayyad & Irani's Multi-Interval Discretization algorithm for continuous-valued attributes in classification learning. The library provides both traditional binning strategies and advanced MDLP-based discretization.
|
||||||
|
|
||||||
|
### Key Features
|
||||||
|
- **MDLP Algorithm**: Implementation of information-theoretic discretization
|
||||||
|
- **Multiple Strategies**: Uniform and quantile-based binning options
|
||||||
|
- **PyTorch Integration**: Native support for PyTorch tensors
|
||||||
|
- **High Performance**: Optimized algorithms with caching mechanisms
|
||||||
|
- **Complete Testing**: 100% code coverage with comprehensive test suite
|
||||||
|
|
||||||
|
### Technology Stack
|
||||||
|
- **Language**: C++17
|
||||||
|
- **Build System**: CMake 3.20+
|
||||||
|
- **Dependencies**: PyTorch (libtorch 2.7.0)
|
||||||
|
- **Testing**: Google Test (GTest)
|
||||||
|
- **Coverage**: lcov/genhtml
|
||||||
|
- **Package Manager**: Conan
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Architecture & Design Analysis
|
||||||
|
|
||||||
|
### Class Hierarchy
|
||||||
|
|
||||||
|
```
|
||||||
|
Discretizer (Abstract Base Class)
|
||||||
|
├── CPPFImdlp (MDLP Implementation)
|
||||||
|
└── BinDisc (Simple Binning)
|
||||||
|
|
||||||
|
Metrics (Standalone Utility Class)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Design Patterns Identified
|
||||||
|
|
||||||
|
#### ✅ **Well-Implemented Patterns**
|
||||||
|
- **Template Method Pattern**: Base class provides `fit_transform()` while derived classes implement `fit()`
|
||||||
|
- **Facade Pattern**: Unified interface for both C++ vectors and PyTorch tensors
|
||||||
|
- **Composition**: `CPPFImdlp` composes `Metrics` for statistical calculations
|
||||||
|
|
||||||
|
#### ⚠️ **Pattern Issues**
|
||||||
|
- **Strategy Pattern**: `BinDisc` uses enum-based strategy instead of proper object-oriented strategy pattern
|
||||||
|
- **Interface Segregation**: `BinDisc.fit()` ignores `y` parameter, violating interface contract
|
||||||
|
|
||||||
|
### SOLID Principles Adherence
|
||||||
|
|
||||||
|
| Principle | Rating | Notes |
|
||||||
|
|-----------|--------|-------|
|
||||||
|
| **Single Responsibility** | ✅ Good | Each class has clear, focused responsibility |
|
||||||
|
| **Open/Closed** | ✅ Good | Easy to extend with new discretization algorithms |
|
||||||
|
| **Liskov Substitution** | ⚠️ Issues | `BinDisc` doesn't properly handle supervised interface |
|
||||||
|
| **Interface Segregation** | ✅ Good | Focused interfaces, not overly broad |
|
||||||
|
| **Dependency Inversion** | ✅ Good | Depends on abstractions, not implementations |
|
||||||
|
|
||||||
|
### Architectural Strengths
|
||||||
|
- **Clean Separation**: Algorithm logic, metrics, and data handling well-separated
|
||||||
|
- **Extensible Design**: Easy to add new discretization methods
|
||||||
|
- **Multi-Interface Support**: Both C++ native and PyTorch integration
|
||||||
|
- **Performance Optimized**: Caching and efficient data structures
|
||||||
|
|
||||||
|
### Architectural Weaknesses
|
||||||
|
- **Interface Inconsistency**: Mixed supervised/unsupervised interface handling
|
||||||
|
- **Complex Single Methods**: `computeCutPoints()` handles too many responsibilities
|
||||||
|
- **Tight Coupling**: Direct access to internal data structures
|
||||||
|
- **Limited Configuration**: Algorithm parameters scattered across classes
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Code Quality Assessment
|
||||||
|
|
||||||
|
### Code Style & Standards
|
||||||
|
- **Consistent Naming**: Good use of camelCase and snake_case conventions
|
||||||
|
- **Header Organization**: Proper SPDX licensing and copyright headers
|
||||||
|
- **Type Safety**: Centralized type definitions in `typesFImdlp.h`
|
||||||
|
- **Modern C++**: Good use of C++17 features
|
||||||
|
|
||||||
|
### Critical Code Issues
|
||||||
|
|
||||||
|
#### 🔴 **High Priority Issues**
|
||||||
|
|
||||||
|
**Memory Safety - Unsafe Pointer Operations**
|
||||||
|
```cpp
|
||||||
|
// Location: Discretizer.cpp:35-36
|
||||||
|
samples_t X(X_.data_ptr<precision_t>(), X_.data_ptr<precision_t>() + num_elements);
|
||||||
|
labels_t y(y_.data_ptr<int>(), y_.data_ptr<int>() + num_elements);
|
||||||
|
```
|
||||||
|
- **Issue**: Direct pointer arithmetic without bounds checking
|
||||||
|
- **Risk**: Buffer overflow if tensor data is malformed
|
||||||
|
- **Fix**: Add tensor validation before pointer operations
|
||||||
|
|
||||||
|
#### 🟡 **Medium Priority Issues**
|
||||||
|
|
||||||
|
**Integer Underflow Risk**
|
||||||
|
```cpp
|
||||||
|
// Location: CPPFImdlp.cpp:98-100
|
||||||
|
n = cut - 1 - idxPrev; // Could underflow if cut <= idxPrev
|
||||||
|
m = idxNext - cut - 1; // Could underflow if idxNext <= cut
|
||||||
|
```
|
||||||
|
- **Issue**: Size arithmetic without underflow protection
|
||||||
|
- **Risk**: Extremely large values from underflow
|
||||||
|
- **Fix**: Add underflow validation
|
||||||
|
|
||||||
|
**Vector Access Without Bounds Checking**
|
||||||
|
```cpp
|
||||||
|
// Location: Multiple locations
|
||||||
|
X[indices[idx]] // No bounds validation
|
||||||
|
```
|
||||||
|
- **Issue**: Direct vector access using potentially invalid indices
|
||||||
|
- **Risk**: Out-of-bounds memory access
|
||||||
|
- **Fix**: Use `at()` method or add explicit bounds checking
|
||||||
|
|
||||||
|
### Performance Considerations
|
||||||
|
- **Caching Strategy**: Good use of entropy and information gain caching
|
||||||
|
- **Memory Efficiency**: Smart use of indices to avoid data copying
|
||||||
|
- **Algorithmic Complexity**: Efficient O(n log n) sorting with optimized cutpoint selection
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Testing Framework Analysis
|
||||||
|
|
||||||
|
### Test Organization
|
||||||
|
|
||||||
|
| Test File | Focus Area | Key Features |
|
||||||
|
|-----------|------------|-------------|
|
||||||
|
| `BinDisc_unittest.cpp` | Binning strategies | Parametric testing, multiple bin counts |
|
||||||
|
| `Discretizer_unittest.cpp` | Base interface | PyTorch integration, transform methods |
|
||||||
|
| `FImdlp_unittest.cpp` | MDLP algorithm | Real datasets, comprehensive scenarios |
|
||||||
|
| `Metrics_unittest.cpp` | Statistical calculations | Entropy, information gain validation |
|
||||||
|
|
||||||
|
### Testing Strengths
|
||||||
|
- **100% Code Coverage**: Complete line and branch coverage
|
||||||
|
- **Real Dataset Testing**: Uses Iris, Diabetes, Glass datasets from ARFF files
|
||||||
|
- **Edge Case Coverage**: Empty datasets, constant values, single elements
|
||||||
|
- **Parametric Testing**: Multiple configurations and strategies
|
||||||
|
- **Data-Driven Approach**: Systematic test generation with `tests.txt`
|
||||||
|
- **Multiple APIs**: Tests both C++ vectors and PyTorch tensors
|
||||||
|
|
||||||
|
### Testing Methodology
|
||||||
|
- **Framework**: Google Test with proper fixture usage
|
||||||
|
- **Precision Testing**: Consistent floating-point comparison margins
|
||||||
|
- **Exception Testing**: Proper error condition validation
|
||||||
|
- **Integration Testing**: End-to-end algorithm validation
|
||||||
|
|
||||||
|
### Testing Gaps
|
||||||
|
- **Performance Testing**: No benchmarks or performance regression tests
|
||||||
|
- **Memory Testing**: Limited memory pressure or leak testing
|
||||||
|
- **Thread Safety**: No concurrent access testing
|
||||||
|
- **Fuzzing**: No randomized input testing
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Security Analysis
|
||||||
|
|
||||||
|
### Overall Security Risk: **MEDIUM**
|
||||||
|
|
||||||
|
### Critical Security Vulnerabilities
|
||||||
|
|
||||||
|
#### 🔴 **HIGH RISK - Memory Safety**
|
||||||
|
|
||||||
|
**Unsafe PyTorch Tensor Operations**
|
||||||
|
- **Location**: `Discretizer.cpp:35-36, 42, 49-50`
|
||||||
|
- **Vulnerability**: Direct pointer arithmetic without validation
|
||||||
|
- **Impact**: Buffer overflow, memory corruption
|
||||||
|
- **Exploit Scenario**: Malformed tensor data causing out-of-bounds access
|
||||||
|
- **Mitigation**:
|
||||||
|
```cpp
|
||||||
|
if (!X_.is_contiguous() || !y_.is_contiguous()) {
|
||||||
|
throw std::invalid_argument("Tensors must be contiguous");
|
||||||
|
}
|
||||||
|
if (X_.dtype() != torch::kFloat32 || y_.dtype() != torch::kInt32) {
|
||||||
|
throw std::invalid_argument("Invalid tensor types");
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 🟡 **MEDIUM RISK - Input Validation**
|
||||||
|
|
||||||
|
**Insufficient Parameter Validation**
|
||||||
|
- **Location**: Multiple entry points
|
||||||
|
- **Vulnerability**: Missing bounds checking on user inputs
|
||||||
|
- **Impact**: Integer overflow, out-of-bounds access
|
||||||
|
- **Examples**:
|
||||||
|
- `proposed_cuts` parameter without overflow protection
|
||||||
|
- Tensor dimensions not validated
|
||||||
|
- Array indices not bounds-checked
|
||||||
|
|
||||||
|
**Thread Safety Issues**
|
||||||
|
- **Location**: `Metrics` class cache containers
|
||||||
|
- **Vulnerability**: Shared state without synchronization
|
||||||
|
- **Impact**: Race conditions, data corruption
|
||||||
|
- **Mitigation**: Add mutex protection or document thread requirements
|
||||||
|
|
||||||
|
#### 🟢 **LOW RISK - Information Disclosure**
|
||||||
|
|
||||||
|
**Debug Information Leakage**
|
||||||
|
- **Location**: Sample code and test files
|
||||||
|
- **Vulnerability**: Detailed internal data exposure
|
||||||
|
- **Impact**: Minor information disclosure
|
||||||
|
- **Mitigation**: Remove or conditionalize debug output
|
||||||
|
|
||||||
|
### Security Recommendations
|
||||||
|
|
||||||
|
#### Immediate Actions
|
||||||
|
1. **Add Tensor Validation**: Comprehensive validation before pointer operations
|
||||||
|
2. **Implement Bounds Checking**: Explicit validation for all array access
|
||||||
|
3. **Add Overflow Protection**: Safe arithmetic operations
|
||||||
|
|
||||||
|
#### Short-term Actions
|
||||||
|
1. **Enhance Input Validation**: Parameter validation at all public interfaces
|
||||||
|
2. **Add Thread Safety**: Documentation or synchronization mechanisms
|
||||||
|
3. **Update Dependencies**: Ensure PyTorch is current and secure
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Documentation & Maintainability
|
||||||
|
|
||||||
|
### Current Documentation Status
|
||||||
|
|
||||||
|
#### ✅ **Available Documentation**
|
||||||
|
- **README.md**: Basic usage instructions and build commands
|
||||||
|
- **Code Comments**: SPDX headers and licensing information
|
||||||
|
- **Build Instructions**: CMake configuration and make targets
|
||||||
|
|
||||||
|
#### ❌ **Missing Documentation**
|
||||||
|
- **API Documentation**: No comprehensive API reference
|
||||||
|
- **Algorithm Documentation**: Limited explanation of MDLP implementation
|
||||||
|
- **Usage Examples**: Minimal code examples beyond basic sample
|
||||||
|
- **Configuration Guide**: No detailed parameter explanation
|
||||||
|
- **Architecture Documentation**: No design document or UML diagrams
|
||||||
|
|
||||||
|
### Maintainability Assessment
|
||||||
|
|
||||||
|
#### Strengths
|
||||||
|
- **Clear Code Structure**: Well-organized class hierarchy
|
||||||
|
- **Consistent Style**: Uniform naming and formatting conventions
|
||||||
|
- **Separation of Concerns**: Clear module boundaries
|
||||||
|
- **Version Control**: Proper git repository with meaningful commits
|
||||||
|
|
||||||
|
#### Weaknesses
|
||||||
|
- **Complex Methods**: Some functions handle multiple responsibilities
|
||||||
|
- **Magic Numbers**: Hardcoded values without explanation
|
||||||
|
- **Limited Comments**: Algorithm logic lacks explanatory comments
|
||||||
|
- **Configuration Scattered**: Parameters spread across multiple classes
|
||||||
|
|
||||||
|
### Documentation Recommendations
|
||||||
|
1. **Generate API Documentation**: Use Doxygen for comprehensive API docs
|
||||||
|
2. **Add Algorithm Explanation**: Document MDLP implementation details
|
||||||
|
3. **Create Usage Guide**: Comprehensive examples and tutorials
|
||||||
|
4. **Architecture Document**: High-level design documentation
|
||||||
|
5. **Configuration Reference**: Centralized parameter documentation
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Build System Evaluation
|
||||||
|
|
||||||
|
### CMake Configuration Analysis
|
||||||
|
|
||||||
|
#### Strengths
|
||||||
|
- **Modern CMake**: Uses version 3.20+ with current best practices
|
||||||
|
- **Multi-Configuration**: Separate debug/release builds
|
||||||
|
- **Dependency Management**: Proper PyTorch integration
|
||||||
|
- **Installation Support**: Complete install targets and package config
|
||||||
|
- **Testing Integration**: CTest integration with coverage
|
||||||
|
|
||||||
|
#### Build Features
|
||||||
|
```cmake
|
||||||
|
# Key configurations
|
||||||
|
set(CMAKE_CXX_STANDARD 17)
|
||||||
|
find_package(Torch CONFIG REQUIRED)
|
||||||
|
option(ENABLE_TESTING OFF)
|
||||||
|
option(ENABLE_SAMPLE OFF)
|
||||||
|
option(COVERAGE OFF)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Build System Issues
|
||||||
|
|
||||||
|
#### Security Concerns
|
||||||
|
- **Debug Flags**: May affect release builds
|
||||||
|
- **Dependency Versions**: Fixed PyTorch version without security updates
|
||||||
|
|
||||||
|
#### Usability Issues
|
||||||
|
- **Complex Makefile**: Manual build directory management
|
||||||
|
- **Coverage Complexity**: Complex lcov command chain
|
||||||
|
|
||||||
|
### Build Recommendations
|
||||||
|
1. **Simplify Build Process**: Use CMake presets for common configurations
|
||||||
|
2. **Improve Dependency Management**: Flexible version constraints
|
||||||
|
3. **Add Build Validation**: Compiler and platform checks
|
||||||
|
4. **Enhance Documentation**: Detailed build instructions
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Strengths & Weaknesses Summary
|
||||||
|
|
||||||
|
### 🏆 **Key Strengths**
|
||||||
|
|
||||||
|
#### Technical Excellence
|
||||||
|
- **Algorithmic Correctness**: Faithful implementation of Fayyad & Irani algorithm
|
||||||
|
- **Performance Optimization**: Efficient caching and data structures
|
||||||
|
- **Code Coverage**: 100% test coverage with comprehensive edge cases
|
||||||
|
- **Modern C++**: Good use of C++17 features and best practices
|
||||||
|
|
||||||
|
#### Software Engineering
|
||||||
|
- **Clean Architecture**: Well-structured OOP design with clear separation
|
||||||
|
- **SOLID Principles**: Generally good adherence to design principles
|
||||||
|
- **Multi-Platform**: CMake-based build system for cross-platform support
|
||||||
|
- **Professional Quality**: Proper licensing, version control, CI/CD integration
|
||||||
|
|
||||||
|
#### API Design
|
||||||
|
- **Multiple Interfaces**: Both C++ native and PyTorch tensor support
|
||||||
|
- **Sklearn-like API**: Familiar `fit()`/`transform()`/`fit_transform()` pattern
|
||||||
|
- **Extensible**: Easy to add new discretization algorithms
|
||||||
|
|
||||||
|
### ⚠️ **Critical Weaknesses**
|
||||||
|
|
||||||
|
#### Security Issues
|
||||||
|
- **Memory Safety**: Unsafe pointer operations in PyTorch integration
|
||||||
|
- **Input Validation**: Insufficient bounds checking and parameter validation
|
||||||
|
- **Thread Safety**: Shared state without proper synchronization
|
||||||
|
|
||||||
|
#### Code Quality
|
||||||
|
- **Interface Consistency**: LSP violation in `BinDisc` class
|
||||||
|
- **Method Complexity**: Some functions handle too many responsibilities
|
||||||
|
- **Error Handling**: Inconsistent exception handling patterns
|
||||||
|
|
||||||
|
#### Documentation
|
||||||
|
- **API Documentation**: Minimal inline documentation
|
||||||
|
- **Usage Examples**: Limited practical examples
|
||||||
|
- **Architecture Documentation**: No high-level design documentation
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Recommendations
|
||||||
|
|
||||||
|
### 🚨 **Immediate Actions (HIGH Priority)**
|
||||||
|
|
||||||
|
#### Security Fixes
|
||||||
|
```cpp
|
||||||
|
// 1. Add tensor validation in Discretizer::fit_t()
|
||||||
|
void Discretizer::fit_t(const torch::Tensor& X_, const torch::Tensor& y_) {
|
||||||
|
// Validate tensor properties
|
||||||
|
if (!X_.is_contiguous() || !y_.is_contiguous()) {
|
||||||
|
throw std::invalid_argument("Tensors must be contiguous");
|
||||||
|
}
|
||||||
|
if (X_.sizes().size() != 1 || y_.sizes().size() != 1) {
|
||||||
|
throw std::invalid_argument("Only 1D tensors supported");
|
||||||
|
}
|
||||||
|
if (X_.dtype() != torch::kFloat32 || y_.dtype() != torch::kInt32) {
|
||||||
|
throw std::invalid_argument("Invalid tensor types");
|
||||||
|
}
|
||||||
|
// ... rest of implementation
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```cpp
|
||||||
|
// 2. Add bounds checking for vector access
|
||||||
|
inline precision_t safe_vector_access(const samples_t& vec, size_t idx) {
|
||||||
|
if (idx >= vec.size()) {
|
||||||
|
throw std::out_of_range("Vector index out of bounds");
|
||||||
|
}
|
||||||
|
return vec[idx];
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```cpp
|
||||||
|
// 3. Add underflow protection in arithmetic operations
|
||||||
|
size_t safe_subtract(size_t a, size_t b) {
|
||||||
|
if (b > a) {
|
||||||
|
throw std::underflow_error("Subtraction would cause underflow");
|
||||||
|
}
|
||||||
|
return a - b;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 📋 **Short-term Actions (MEDIUM Priority)**
|
||||||
|
|
||||||
|
#### Code Quality Improvements
|
||||||
|
1. **Fix Interface Consistency**: Separate supervised/unsupervised interfaces
|
||||||
|
2. **Refactor Complex Methods**: Break down `computeCutPoints()` function
|
||||||
|
3. **Standardize Error Handling**: Consistent exception types and messages
|
||||||
|
4. **Add Input Validation**: Comprehensive parameter checking
|
||||||
|
|
||||||
|
#### Thread Safety
|
||||||
|
```cpp
|
||||||
|
// Add thread safety to Metrics class
|
||||||
|
class Metrics {
|
||||||
|
private:
|
||||||
|
mutable std::mutex cache_mutex;
|
||||||
|
cacheEnt_t entropyCache;
|
||||||
|
cacheIg_t igCache;
|
||||||
|
|
||||||
|
public:
|
||||||
|
precision_t entropy(size_t start, size_t end) const {
|
||||||
|
std::lock_guard<std::mutex> lock(cache_mutex);
|
||||||
|
// ... implementation
|
||||||
|
}
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
### 📚 **Long-term Actions (LOW Priority)**
|
||||||
|
|
||||||
|
#### Documentation & Usability
|
||||||
|
1. **API Documentation**: Generate comprehensive Doxygen documentation
|
||||||
|
2. **Usage Examples**: Create detailed tutorial and example repository
|
||||||
|
3. **Performance Testing**: Add benchmarking and regression tests
|
||||||
|
4. **Architecture Documentation**: Create design documents and UML diagrams
|
||||||
|
|
||||||
|
#### Code Modernization
|
||||||
|
1. **Strategy Pattern**: Proper implementation for `BinDisc` strategies
|
||||||
|
2. **Configuration Management**: Centralized parameter handling
|
||||||
|
3. **Factory Pattern**: Discretizer creation factory
|
||||||
|
4. **Resource Management**: RAII patterns for memory safety
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Risk Assessment
|
||||||
|
|
||||||
|
### Risk Priority Matrix
|
||||||
|
|
||||||
|
| Risk Category | High | Medium | Low | Total |
|
||||||
|
|---------------|------|--------|-----|-------|
|
||||||
|
| **Security** | 1 | 7 | 2 | 10 |
|
||||||
|
| **Code Quality** | 2 | 5 | 3 | 10 |
|
||||||
|
| **Maintainability** | 0 | 3 | 4 | 7 |
|
||||||
|
| **Performance** | 0 | 1 | 2 | 3 |
|
||||||
|
| **Total** | **3** | **16** | **11** | **30** |
|
||||||
|
|
||||||
|
### Risk Impact Assessment
|
||||||
|
|
||||||
|
#### Critical Risks (Immediate Attention Required)
|
||||||
|
1. **Memory Safety Vulnerabilities**: Could lead to crashes or security exploits
|
||||||
|
2. **Interface Consistency Issues**: Violates expected behavior contracts
|
||||||
|
3. **Input Validation Gaps**: Potential for crashes with malformed input
|
||||||
|
|
||||||
|
#### Moderate Risks (Address in Next Release)
|
||||||
|
1. **Thread Safety Issues**: Problems in multi-threaded environments
|
||||||
|
2. **Complex Method Design**: Maintenance and debugging difficulties
|
||||||
|
3. **Documentation Gaps**: Reduced adoption and maintainability
|
||||||
|
|
||||||
|
#### Low Risks (Future Improvements)
|
||||||
|
1. **Performance Optimization**: Minor efficiency improvements
|
||||||
|
2. **Code Style Consistency**: Enhanced readability
|
||||||
|
3. **Build System Enhancements**: Improved developer experience
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Conclusion
|
||||||
|
|
||||||
|
The MDLP discretization library represents a solid implementation of an important machine learning algorithm with excellent test coverage and clean architectural design. However, it requires attention to security vulnerabilities and code quality issues before production deployment.
|
||||||
|
|
||||||
|
### Final Verdict
|
||||||
|
|
||||||
|
**Rating: B+ (Good with Notable Issues)**
|
||||||
|
|
||||||
|
- **Core Algorithm**: Excellent implementation of MDLP with proper mathematical foundations
|
||||||
|
- **Software Engineering**: Good OOP design following most best practices
|
||||||
|
- **Testing**: Exemplary test coverage and methodology
|
||||||
|
- **Security**: Notable vulnerabilities requiring immediate attention
|
||||||
|
- **Documentation**: Adequate but could be significantly improved
|
||||||
|
|
||||||
|
### Deployment Recommendation
|
||||||
|
|
||||||
|
**Not Ready for Production** without addressing HIGH priority security issues, particularly around memory safety and input validation. Once these are resolved, the library would be suitable for production use in most contexts.
|
||||||
|
|
||||||
|
### Next Steps
|
||||||
|
|
||||||
|
1. **Security Audit**: Address all HIGH and MEDIUM priority security issues
|
||||||
|
2. **Code Review**: Implement fixes for interface consistency and method complexity
|
||||||
|
3. **Documentation**: Create comprehensive API documentation and usage guides
|
||||||
|
4. **Testing**: Add performance benchmarks and stress testing
|
||||||
|
5. **Release**: Prepare version 2.1.0 with security and quality improvements
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Appendix
|
||||||
|
|
||||||
|
### Files Analyzed
|
||||||
|
- `src/CPPFImdlp.h` & `src/CPPFImdlp.cpp` - MDLP algorithm implementation
|
||||||
|
- `src/Discretizer.h` & `src/Discretizer.cpp` - Base class and PyTorch integration
|
||||||
|
- `src/BinDisc.h` & `src/BinDisc.cpp` - Simple binning strategies
|
||||||
|
- `src/Metrics.h` & `src/Metrics.cpp` - Statistical calculations
|
||||||
|
- `src/typesFImdlp.h` - Type definitions
|
||||||
|
- `CMakeLists.txt` - Build configuration
|
||||||
|
- `conanfile.py` - Dependency management
|
||||||
|
- `tests/*` - Comprehensive test suite
|
||||||
|
|
||||||
|
### Analysis Date
|
||||||
|
**Report Generated**: June 27, 2025
|
||||||
|
|
||||||
|
### Tools Used
|
||||||
|
- **Static Analysis**: Manual code review with security focus
|
||||||
|
- **Architecture Analysis**: SOLID principles and design pattern evaluation
|
||||||
|
- **Test Analysis**: Coverage and methodology assessment
|
||||||
|
- **Security Analysis**: Vulnerability assessment with risk prioritization
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*This report provides a comprehensive technical analysis of the MDLP discretization library. For questions or clarifications, please refer to the project repository or contact the development team.*
|
101
build_conan/CMakeCache.txt
Normal file
101
build_conan/CMakeCache.txt
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
# This is the CMakeCache file.
|
||||||
|
# For build in directory: /home/rmontanana/Code/mdlp/build_conan
|
||||||
|
# It was generated by CMake: /usr/bin/cmake
|
||||||
|
# You can edit this file to change values found and used by cmake.
|
||||||
|
# If you do not want to change any of the values, simply exit the editor.
|
||||||
|
# If you do want to change a value, simply edit, save, and exit the editor.
|
||||||
|
# The syntax for the file is as follows:
|
||||||
|
# KEY:TYPE=VALUE
|
||||||
|
# KEY is the name of a variable in the cache.
|
||||||
|
# TYPE is a hint to GUIs for the type of VALUE, DO NOT EDIT TYPE!.
|
||||||
|
# VALUE is the current value for the KEY.
|
||||||
|
|
||||||
|
########################
|
||||||
|
# EXTERNAL cache entries
|
||||||
|
########################
|
||||||
|
|
||||||
|
//No help, variable specified on the command line.
|
||||||
|
CMAKE_BUILD_TYPE:UNINITIALIZED=Release
|
||||||
|
|
||||||
|
//Value Computed by CMake.
|
||||||
|
CMAKE_FIND_PACKAGE_REDIRECTS_DIR:STATIC=/home/rmontanana/Code/mdlp/build_conan/CMakeFiles/pkgRedirects
|
||||||
|
|
||||||
|
//Value Computed by CMake
|
||||||
|
CMAKE_PROJECT_DESCRIPTION:STATIC=Discretization algorithm based on the paper by Fayyad & Irani Multi-Interval Discretization of Continuous-Valued Attributes for Classification Learning.
|
||||||
|
|
||||||
|
//Value Computed by CMake
|
||||||
|
CMAKE_PROJECT_HOMEPAGE_URL:STATIC=https://github.com/rmontanana/mdlp
|
||||||
|
|
||||||
|
//Value Computed by CMake
|
||||||
|
CMAKE_PROJECT_NAME:STATIC=fimdlp
|
||||||
|
|
||||||
|
//Value Computed by CMake
|
||||||
|
CMAKE_PROJECT_VERSION:STATIC=2.1.0
|
||||||
|
|
||||||
|
//Value Computed by CMake
|
||||||
|
CMAKE_PROJECT_VERSION_MAJOR:STATIC=2
|
||||||
|
|
||||||
|
//Value Computed by CMake
|
||||||
|
CMAKE_PROJECT_VERSION_MINOR:STATIC=1
|
||||||
|
|
||||||
|
//Value Computed by CMake
|
||||||
|
CMAKE_PROJECT_VERSION_PATCH:STATIC=0
|
||||||
|
|
||||||
|
//Value Computed by CMake
|
||||||
|
CMAKE_PROJECT_VERSION_TWEAK:STATIC=
|
||||||
|
|
||||||
|
//No help, variable specified on the command line.
|
||||||
|
CMAKE_TOOLCHAIN_FILE:UNINITIALIZED=conan_toolchain.cmake
|
||||||
|
|
||||||
|
//Value Computed by CMake
|
||||||
|
fimdlp_BINARY_DIR:STATIC=/home/rmontanana/Code/mdlp/build_conan
|
||||||
|
|
||||||
|
//Value Computed by CMake
|
||||||
|
fimdlp_IS_TOP_LEVEL:STATIC=ON
|
||||||
|
|
||||||
|
//Value Computed by CMake
|
||||||
|
fimdlp_SOURCE_DIR:STATIC=/home/rmontanana/Code/mdlp
|
||||||
|
|
||||||
|
|
||||||
|
########################
|
||||||
|
# INTERNAL cache entries
|
||||||
|
########################
|
||||||
|
|
||||||
|
//This is the directory where this CMakeCache.txt was created
|
||||||
|
CMAKE_CACHEFILE_DIR:INTERNAL=/home/rmontanana/Code/mdlp/build_conan
|
||||||
|
//Major version of cmake used to create the current loaded cache
|
||||||
|
CMAKE_CACHE_MAJOR_VERSION:INTERNAL=3
|
||||||
|
//Minor version of cmake used to create the current loaded cache
|
||||||
|
CMAKE_CACHE_MINOR_VERSION:INTERNAL=30
|
||||||
|
//Patch version of cmake used to create the current loaded cache
|
||||||
|
CMAKE_CACHE_PATCH_VERSION:INTERNAL=8
|
||||||
|
//Path to CMake executable.
|
||||||
|
CMAKE_COMMAND:INTERNAL=/usr/bin/cmake
|
||||||
|
//Path to cpack program executable.
|
||||||
|
CMAKE_CPACK_COMMAND:INTERNAL=/usr/bin/cpack
|
||||||
|
//Path to ctest program executable.
|
||||||
|
CMAKE_CTEST_COMMAND:INTERNAL=/usr/bin/ctest
|
||||||
|
//Path to cache edit program executable.
|
||||||
|
CMAKE_EDIT_COMMAND:INTERNAL=/usr/bin/ccmake
|
||||||
|
//Name of external makefile project generator.
|
||||||
|
CMAKE_EXTRA_GENERATOR:INTERNAL=
|
||||||
|
//Name of generator.
|
||||||
|
CMAKE_GENERATOR:INTERNAL=Unix Makefiles
|
||||||
|
//Generator instance identifier.
|
||||||
|
CMAKE_GENERATOR_INSTANCE:INTERNAL=
|
||||||
|
//Name of generator platform.
|
||||||
|
CMAKE_GENERATOR_PLATFORM:INTERNAL=
|
||||||
|
//Name of generator toolset.
|
||||||
|
CMAKE_GENERATOR_TOOLSET:INTERNAL=
|
||||||
|
//Source directory with the top level CMakeLists.txt file for this
|
||||||
|
// project
|
||||||
|
CMAKE_HOME_DIRECTORY:INTERNAL=/home/rmontanana/Code/mdlp
|
||||||
|
//number of local generators
|
||||||
|
CMAKE_NUMBER_OF_MAKEFILES:INTERNAL=1
|
||||||
|
//Platform information initialized
|
||||||
|
CMAKE_PLATFORM_INFO_INITIALIZED:INTERNAL=1
|
||||||
|
//Path to CMake installation.
|
||||||
|
CMAKE_ROOT:INTERNAL=/usr/share/cmake
|
||||||
|
//uname command
|
||||||
|
CMAKE_UNAME:INTERNAL=/usr/bin/uname
|
||||||
|
|
16
conandata.yml
Normal file
16
conandata.yml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
sources:
|
||||||
|
"2.1.0":
|
||||||
|
url: "https://github.com/rmontanana/mdlp/archive/refs/tags/v2.1.0.tar.gz"
|
||||||
|
sha256: "placeholder_sha256_hash"
|
||||||
|
"2.0.1":
|
||||||
|
url: "https://github.com/rmontanana/mdlp/archive/refs/tags/v2.0.1.tar.gz"
|
||||||
|
sha256: "placeholder_sha256_hash"
|
||||||
|
"2.0.0":
|
||||||
|
url: "https://github.com/rmontanana/mdlp/archive/refs/tags/v2.0.0.tar.gz"
|
||||||
|
sha256: "placeholder_sha256_hash"
|
||||||
|
|
||||||
|
patches:
|
||||||
|
"2.1.0":
|
||||||
|
- patch_file: "patches/001-cmake-fix.patch"
|
||||||
|
patch_description: "Fix CMake configuration for Conan compatibility"
|
||||||
|
patch_type: "portability"
|
111
conanfile.py
Normal file
111
conanfile.py
Normal file
@@ -0,0 +1,111 @@
|
|||||||
|
import os
|
||||||
|
import re
|
||||||
|
from conan import ConanFile
|
||||||
|
from conan.tools.cmake import CMakeToolchain, CMake, cmake_layout, CMakeDeps
|
||||||
|
from conan.tools.files import load, copy
|
||||||
|
|
||||||
|
|
||||||
|
class FimdlpConan(ConanFile):
|
||||||
|
name = "fimdlp"
|
||||||
|
version = "X.X.X"
|
||||||
|
license = "MIT"
|
||||||
|
author = "Ricardo Montañana <rmontanana@gmail.com>"
|
||||||
|
url = "https://github.com/rmontanana/mdlp"
|
||||||
|
description = "Discretization algorithm based on the paper by Fayyad & Irani Multi-Interval Discretization of Continuous-Valued Attributes for Classification Learning."
|
||||||
|
topics = ("machine-learning", "discretization", "mdlp", "classification")
|
||||||
|
|
||||||
|
# Package configuration
|
||||||
|
settings = "os", "compiler", "build_type", "arch"
|
||||||
|
options = {
|
||||||
|
"shared": [True, False],
|
||||||
|
"fPIC": [True, False],
|
||||||
|
"enable_testing": [True, False],
|
||||||
|
"enable_sample": [True, False],
|
||||||
|
}
|
||||||
|
default_options = {
|
||||||
|
"shared": False,
|
||||||
|
"fPIC": True,
|
||||||
|
"enable_testing": False,
|
||||||
|
"enable_sample": False,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Sources are located in the same place as this recipe, copy them to the recipe
|
||||||
|
exports_sources = "CMakeLists.txt", "src/*", "sample/*", "tests/*", "config/*", "fimdlpConfig.cmake.in"
|
||||||
|
|
||||||
|
def set_version(self):
|
||||||
|
content = load(self, "CMakeLists.txt")
|
||||||
|
version_pattern = re.compile(r'project\s*\([^\)]*VERSION\s+([0-9]+\.[0-9]+\.[0-9]+)', re.IGNORECASE | re.DOTALL)
|
||||||
|
match = version_pattern.search(content)
|
||||||
|
if match:
|
||||||
|
self.version = match.group(1)
|
||||||
|
else:
|
||||||
|
raise Exception("Version not found in CMakeLists.txt")
|
||||||
|
|
||||||
|
def config_options(self):
|
||||||
|
if self.settings.os == "Windows":
|
||||||
|
self.options.rm_safe("fPIC")
|
||||||
|
|
||||||
|
def configure(self):
|
||||||
|
if self.options.shared:
|
||||||
|
self.options.rm_safe("fPIC")
|
||||||
|
|
||||||
|
def requirements(self):
|
||||||
|
# PyTorch dependency for tensor operations
|
||||||
|
self.requires("libtorch/2.7.0")
|
||||||
|
|
||||||
|
def build_requirements(self):
|
||||||
|
self.requires("arff-files/1.2.0") # for tests and sample
|
||||||
|
if self.options.enable_testing:
|
||||||
|
self.test_requires("gtest/1.16.0")
|
||||||
|
|
||||||
|
def layout(self):
|
||||||
|
cmake_layout(self)
|
||||||
|
|
||||||
|
def generate(self):
|
||||||
|
# Generate CMake configuration files
|
||||||
|
deps = CMakeDeps(self)
|
||||||
|
deps.generate()
|
||||||
|
|
||||||
|
tc = CMakeToolchain(self)
|
||||||
|
# Set CMake variables based on options
|
||||||
|
tc.variables["ENABLE_TESTING"] = self.options.enable_testing
|
||||||
|
tc.variables["ENABLE_SAMPLE"] = self.options.enable_sample
|
||||||
|
tc.variables["BUILD_SHARED_LIBS"] = self.options.shared
|
||||||
|
tc.generate()
|
||||||
|
|
||||||
|
def build(self):
|
||||||
|
cmake = CMake(self)
|
||||||
|
cmake.configure()
|
||||||
|
cmake.build()
|
||||||
|
|
||||||
|
# Run tests if enabled
|
||||||
|
if self.options.enable_testing:
|
||||||
|
cmake.test()
|
||||||
|
|
||||||
|
def package(self):
|
||||||
|
# Install using CMake
|
||||||
|
cmake = CMake(self)
|
||||||
|
cmake.install()
|
||||||
|
|
||||||
|
# Copy license file
|
||||||
|
copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
|
||||||
|
|
||||||
|
def package_info(self):
|
||||||
|
# Library configuration
|
||||||
|
self.cpp_info.libs = ["fimdlp"]
|
||||||
|
self.cpp_info.includedirs = ["include"]
|
||||||
|
|
||||||
|
# CMake package configuration
|
||||||
|
self.cpp_info.set_property("cmake_file_name", "fimdlp")
|
||||||
|
self.cpp_info.set_property("cmake_target_name", "fimdlp::fimdlp")
|
||||||
|
|
||||||
|
# Compiler features
|
||||||
|
self.cpp_info.cppstd = "17"
|
||||||
|
|
||||||
|
# System libraries (if needed)
|
||||||
|
if self.settings.os in ["Linux", "FreeBSD"]:
|
||||||
|
self.cpp_info.system_libs.append("m") # Math library
|
||||||
|
self.cpp_info.system_libs.append("pthread") # Threading
|
||||||
|
|
||||||
|
# Build information for consumers
|
||||||
|
self.cpp_info.builddirs = ["lib/cmake/fimdlp"]
|
4
config/CMakeLists.txt
Normal file
4
config/CMakeLists.txt
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
configure_file(
|
||||||
|
"config.h.in"
|
||||||
|
"${CMAKE_BINARY_DIR}/configured_files/include/config.h" ESCAPE_QUOTES
|
||||||
|
)
|
13
config/config.h.in
Normal file
13
config/config.h.in
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <string_view>
|
||||||
|
|
||||||
|
#define PROJECT_VERSION_MAJOR @PROJECT_VERSION_MAJOR @
|
||||||
|
#define PROJECT_VERSION_MINOR @PROJECT_VERSION_MINOR @
|
||||||
|
#define PROJECT_VERSION_PATCH @PROJECT_VERSION_PATCH @
|
||||||
|
|
||||||
|
static constexpr std::string_view project_mdlp_name = "@PROJECT_NAME@";
|
||||||
|
static constexpr std::string_view project_mdlp_version = "@PROJECT_VERSION@";
|
||||||
|
static constexpr std::string_view project_mdlp_description = "@PROJECT_DESCRIPTION@";
|
||||||
|
static constexpr std::string_view git_mdlp_sha = "@GIT_SHA@";
|
2
fimdlpConfig.cmake.in
Normal file
2
fimdlpConfig.cmake.in
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
@PACKAGE_INIT@
|
||||||
|
include("${CMAKE_CURRENT_LIST_DIR}/fimdlpTargets.cmake")
|
47
getversion.py
Normal file
47
getversion.py
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
|
||||||
|
# read the version from the CMakeLists.txt file
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
def get_version_from_cmakelists(cmakelists_path):
|
||||||
|
# Read the CMakeLists.txt file
|
||||||
|
try:
|
||||||
|
with open(cmakelists_path, 'r') as file:
|
||||||
|
content = file.read()
|
||||||
|
except IOError as e:
|
||||||
|
print(f"Error reading {cmakelists_path}: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
# Use regex to find the version line
|
||||||
|
# The regex pattern looks for a line that starts with 'project' and captures the version number
|
||||||
|
# in the format VERSION x.y.z where x, y, and z are digits.
|
||||||
|
# It allows for optional whitespace around the parentheses and the version number.
|
||||||
|
version_pattern = re.compile(
|
||||||
|
r'project\s*\([^\)]*VERSION\s+([0-9]+\.[0-9]+\.[0-9]+)', re.IGNORECASE | re.DOTALL
|
||||||
|
)
|
||||||
|
match = version_pattern.search(content)
|
||||||
|
if match:
|
||||||
|
return match.group(1)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Get the path to the CMakeLists.txt file
|
||||||
|
cmakelists_path = Path(__file__).parent / "CMakeLists.txt"
|
||||||
|
|
||||||
|
# Check if the file exists
|
||||||
|
if not cmakelists_path.exists():
|
||||||
|
print(f"Error: {cmakelists_path} does not exist.")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Get the version from the CMakeLists.txt file
|
||||||
|
version = get_version_from_cmakelists(cmakelists_path)
|
||||||
|
|
||||||
|
if version:
|
||||||
|
print(f"Version: {version}")
|
||||||
|
else:
|
||||||
|
print("Version not found in CMakeLists.txt.")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
@@ -2,10 +2,15 @@ set(CMAKE_CXX_STANDARD 17)
|
|||||||
|
|
||||||
set(CMAKE_BUILD_TYPE Debug)
|
set(CMAKE_BUILD_TYPE Debug)
|
||||||
|
|
||||||
|
find_package(arff-files REQUIRED)
|
||||||
|
|
||||||
include_directories(
|
include_directories(
|
||||||
${fimdlp_SOURCE_DIR}/src
|
${fimdlp_SOURCE_DIR}/src
|
||||||
${fimdlp_SOURCE_DIR}/tests/lib/Files
|
${fimdlp_SOURCE_DIR}/tests/lib/Files
|
||||||
|
${CMAKE_BINARY_DIR}/configured_files/include
|
||||||
|
${libtorch_INCLUDE_DIRS_RELEASE}
|
||||||
|
${arff-files_INCLUDE_DIRS}
|
||||||
)
|
)
|
||||||
|
|
||||||
add_executable(sample sample.cpp )
|
add_executable(sample sample.cpp)
|
||||||
target_link_libraries(sample fimdlp "${TORCH_LIBRARIES}")
|
target_link_libraries(sample PRIVATE fimdlp torch::torch arff-files::arff-files)
|
||||||
|
25
scripts/build_conan.sh
Executable file
25
scripts/build_conan.sh
Executable file
@@ -0,0 +1,25 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Build script for fimdlp using Conan
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo "Building fimdlp with Conan..."
|
||||||
|
|
||||||
|
# Clean previous builds
|
||||||
|
rm -rf build_conan
|
||||||
|
|
||||||
|
# Install dependencies and build
|
||||||
|
conan install . --output-folder=build_conan --build=missing --profile:build=default --profile:host=default
|
||||||
|
|
||||||
|
# Build the project
|
||||||
|
cd build_conan
|
||||||
|
cmake .. -DCMAKE_TOOLCHAIN_FILE=conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release
|
||||||
|
cmake --build .
|
||||||
|
|
||||||
|
echo "Build completed successfully!"
|
||||||
|
|
||||||
|
# Run tests if requested
|
||||||
|
if [ "$1" = "--test" ]; then
|
||||||
|
echo "Running tests..."
|
||||||
|
ctest --output-on-failure
|
||||||
|
fi
|
33
scripts/create_package.sh
Executable file
33
scripts/create_package.sh
Executable file
@@ -0,0 +1,33 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Script to create and upload fimdlp Conan package
|
||||||
|
set -e
|
||||||
|
|
||||||
|
PACKAGE_NAME="fimdlp"
|
||||||
|
PACKAGE_VERSION="2.1.0"
|
||||||
|
REMOTE_NAME="cimmeria"
|
||||||
|
|
||||||
|
echo "Creating Conan package for $PACKAGE_NAME/$PACKAGE_VERSION..."
|
||||||
|
|
||||||
|
# Create the package
|
||||||
|
conan create . --profile:build=default --profile:host=default
|
||||||
|
|
||||||
|
echo "Package created successfully!"
|
||||||
|
|
||||||
|
# Test the package
|
||||||
|
echo "Testing package..."
|
||||||
|
conan test test_package $PACKAGE_NAME/$PACKAGE_VERSION@ --profile:build=default --profile:host=default
|
||||||
|
|
||||||
|
echo "Package tested successfully!"
|
||||||
|
|
||||||
|
# Upload to Cimmeria (if remote is configured)
|
||||||
|
if conan remote list | grep -q "$REMOTE_NAME"; then
|
||||||
|
echo "Uploading package to $REMOTE_NAME..."
|
||||||
|
conan upload $PACKAGE_NAME/$PACKAGE_VERSION --remote=$REMOTE_NAME --all
|
||||||
|
echo "Package uploaded to $REMOTE_NAME successfully!"
|
||||||
|
else
|
||||||
|
echo "Remote '$REMOTE_NAME' not configured. To upload the package:"
|
||||||
|
echo "1. Add the remote: conan remote add $REMOTE_NAME <cimmeria-url>"
|
||||||
|
echo "2. Login: conan remote login $REMOTE_NAME <username>"
|
||||||
|
echo "3. Upload: conan upload $PACKAGE_NAME/$PACKAGE_VERSION --remote=$REMOTE_NAME --all"
|
||||||
|
fi
|
@@ -22,13 +22,15 @@ namespace mdlp {
|
|||||||
BinDisc::~BinDisc() = default;
|
BinDisc::~BinDisc() = default;
|
||||||
void BinDisc::fit(samples_t& X)
|
void BinDisc::fit(samples_t& X)
|
||||||
{
|
{
|
||||||
// y is included for compatibility with the Discretizer interface
|
// Input validation
|
||||||
cutPoints.clear();
|
|
||||||
if (X.empty()) {
|
if (X.empty()) {
|
||||||
cutPoints.push_back(0.0);
|
throw std::invalid_argument("Input data X cannot be empty");
|
||||||
cutPoints.push_back(0.0);
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
if (X.size() < static_cast<size_t>(n_bins)) {
|
||||||
|
throw std::invalid_argument("Input data size must be at least equal to n_bins");
|
||||||
|
}
|
||||||
|
|
||||||
|
cutPoints.clear();
|
||||||
if (strategy == strategy_t::QUANTILE) {
|
if (strategy == strategy_t::QUANTILE) {
|
||||||
direction = bound_dir_t::RIGHT;
|
direction = bound_dir_t::RIGHT;
|
||||||
fit_quantile(X);
|
fit_quantile(X);
|
||||||
@@ -39,10 +41,27 @@ namespace mdlp {
|
|||||||
}
|
}
|
||||||
void BinDisc::fit(samples_t& X, labels_t& y)
|
void BinDisc::fit(samples_t& X, labels_t& y)
|
||||||
{
|
{
|
||||||
|
if (X.empty()) {
|
||||||
|
throw std::invalid_argument("X cannot be empty");
|
||||||
|
}
|
||||||
|
|
||||||
|
// BinDisc is inherently unsupervised, but we validate inputs for consistency
|
||||||
|
// Note: y parameter is validated but not used in binning strategy
|
||||||
fit(X);
|
fit(X);
|
||||||
}
|
}
|
||||||
std::vector<precision_t> linspace(precision_t start, precision_t end, int num)
|
std::vector<precision_t> linspace(precision_t start, precision_t end, int num)
|
||||||
{
|
{
|
||||||
|
// Input validation
|
||||||
|
if (num < 2) {
|
||||||
|
throw std::invalid_argument("Number of points must be at least 2 for linspace");
|
||||||
|
}
|
||||||
|
if (std::isnan(start) || std::isnan(end)) {
|
||||||
|
throw std::invalid_argument("Start and end values cannot be NaN");
|
||||||
|
}
|
||||||
|
if (std::isinf(start) || std::isinf(end)) {
|
||||||
|
throw std::invalid_argument("Start and end values cannot be infinite");
|
||||||
|
}
|
||||||
|
|
||||||
if (start == end) {
|
if (start == end) {
|
||||||
return { start, end };
|
return { start, end };
|
||||||
}
|
}
|
||||||
@@ -60,6 +79,14 @@ namespace mdlp {
|
|||||||
}
|
}
|
||||||
std::vector<precision_t> percentile(samples_t& data, const std::vector<precision_t>& percentiles)
|
std::vector<precision_t> percentile(samples_t& data, const std::vector<precision_t>& percentiles)
|
||||||
{
|
{
|
||||||
|
// Input validation
|
||||||
|
if (data.empty()) {
|
||||||
|
throw std::invalid_argument("Data cannot be empty for percentile calculation");
|
||||||
|
}
|
||||||
|
if (percentiles.empty()) {
|
||||||
|
throw std::invalid_argument("Percentiles cannot be empty");
|
||||||
|
}
|
||||||
|
|
||||||
// Implementation taken from https://dpilger26.github.io/NumCpp/doxygen/html/percentile_8hpp_source.html
|
// Implementation taken from https://dpilger26.github.io/NumCpp/doxygen/html/percentile_8hpp_source.html
|
||||||
std::vector<precision_t> results;
|
std::vector<precision_t> results;
|
||||||
bool first = true;
|
bool first = true;
|
||||||
|
@@ -8,6 +8,7 @@
|
|||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <set>
|
#include <set>
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
|
#include <stdexcept>
|
||||||
#include "CPPFImdlp.h"
|
#include "CPPFImdlp.h"
|
||||||
|
|
||||||
namespace mdlp {
|
namespace mdlp {
|
||||||
@@ -18,6 +19,17 @@ namespace mdlp {
|
|||||||
max_depth(max_depth_),
|
max_depth(max_depth_),
|
||||||
proposed_cuts(proposed)
|
proposed_cuts(proposed)
|
||||||
{
|
{
|
||||||
|
// Input validation for constructor parameters
|
||||||
|
if (min_length_ < 3) {
|
||||||
|
throw std::invalid_argument("min_length must be greater than 2");
|
||||||
|
}
|
||||||
|
if (max_depth_ < 1) {
|
||||||
|
throw std::invalid_argument("max_depth must be greater than 0");
|
||||||
|
}
|
||||||
|
if (proposed < 0.0f) {
|
||||||
|
throw std::invalid_argument("proposed_cuts must be non-negative");
|
||||||
|
}
|
||||||
|
|
||||||
direction = bound_dir_t::RIGHT;
|
direction = bound_dir_t::RIGHT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -27,7 +39,7 @@ namespace mdlp {
|
|||||||
if (proposed_cuts == 0) {
|
if (proposed_cuts == 0) {
|
||||||
return numeric_limits<size_t>::max();
|
return numeric_limits<size_t>::max();
|
||||||
}
|
}
|
||||||
if (proposed_cuts < 0 || proposed_cuts > static_cast<precision_t>(X.size())) {
|
if (proposed_cuts > static_cast<precision_t>(X.size())) {
|
||||||
throw invalid_argument("wrong proposed num_cuts value");
|
throw invalid_argument("wrong proposed num_cuts value");
|
||||||
}
|
}
|
||||||
if (proposed_cuts < 1)
|
if (proposed_cuts < 1)
|
||||||
@@ -44,17 +56,11 @@ namespace mdlp {
|
|||||||
discretizedData.clear();
|
discretizedData.clear();
|
||||||
cutPoints.clear();
|
cutPoints.clear();
|
||||||
if (X.size() != y.size()) {
|
if (X.size() != y.size()) {
|
||||||
throw invalid_argument("X and y must have the same size");
|
throw std::invalid_argument("X and y must have the same size: " + std::to_string(X.size()) + " != " + std::to_string(y.size()));
|
||||||
}
|
}
|
||||||
if (X.empty() || y.empty()) {
|
if (X.empty() || y.empty()) {
|
||||||
throw invalid_argument("X and y must have at least one element");
|
throw invalid_argument("X and y must have at least one element");
|
||||||
}
|
}
|
||||||
if (min_length < 3) {
|
|
||||||
throw invalid_argument("min_length must be greater than 2");
|
|
||||||
}
|
|
||||||
if (max_depth < 1) {
|
|
||||||
throw invalid_argument("max_depth must be greater than 0");
|
|
||||||
}
|
|
||||||
indices = sortIndices(X_, y_);
|
indices = sortIndices(X_, y_);
|
||||||
metrics.setData(y, indices);
|
metrics.setData(y, indices);
|
||||||
computeCutPoints(0, X.size(), 1);
|
computeCutPoints(0, X.size(), 1);
|
||||||
@@ -81,26 +87,33 @@ namespace mdlp {
|
|||||||
precision_t previous;
|
precision_t previous;
|
||||||
precision_t actual;
|
precision_t actual;
|
||||||
precision_t next;
|
precision_t next;
|
||||||
previous = X[indices[idxPrev]];
|
previous = safe_X_access(idxPrev);
|
||||||
actual = X[indices[cut]];
|
actual = safe_X_access(cut);
|
||||||
next = X[indices[idxNext]];
|
next = safe_X_access(idxNext);
|
||||||
// definition 2 of the paper => X[t-1] < X[t]
|
// definition 2 of the paper => X[t-1] < X[t]
|
||||||
// get the first equal value of X in the interval
|
// get the first equal value of X in the interval
|
||||||
while (idxPrev > start && actual == previous) {
|
while (idxPrev > start && actual == previous) {
|
||||||
previous = X[indices[--idxPrev]];
|
--idxPrev;
|
||||||
|
previous = safe_X_access(idxPrev);
|
||||||
}
|
}
|
||||||
backWall = idxPrev == start && actual == previous;
|
backWall = idxPrev == start && actual == previous;
|
||||||
// get the last equal value of X in the interval
|
// get the last equal value of X in the interval
|
||||||
while (idxNext < end - 1 && actual == next) {
|
while (idxNext < end - 1 && actual == next) {
|
||||||
next = X[indices[++idxNext]];
|
++idxNext;
|
||||||
|
next = safe_X_access(idxNext);
|
||||||
}
|
}
|
||||||
// # of duplicates before cutpoint
|
// # of duplicates before cutpoint
|
||||||
n = cut - 1 - idxPrev;
|
n = safe_subtract(safe_subtract(cut, 1), idxPrev);
|
||||||
// # of duplicates after cutpoint
|
// # of duplicates after cutpoint
|
||||||
m = idxNext - cut - 1;
|
m = idxNext - cut - 1;
|
||||||
// Decide which values to use
|
// Decide which values to use
|
||||||
cut = cut + (backWall ? m + 1 : -n);
|
if (backWall) {
|
||||||
actual = X[indices[cut]];
|
m = int(idxNext - cut - 1) < 0 ? 0 : m; // Ensure m right
|
||||||
|
cut = cut + m + 1;
|
||||||
|
} else {
|
||||||
|
cut = safe_subtract(cut, n);
|
||||||
|
}
|
||||||
|
actual = safe_X_access(cut);
|
||||||
return { (actual + previous) / 2, cut };
|
return { (actual + previous) / 2, cut };
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -109,7 +122,7 @@ namespace mdlp {
|
|||||||
size_t cut;
|
size_t cut;
|
||||||
pair<precision_t, size_t> result;
|
pair<precision_t, size_t> result;
|
||||||
// Check if the interval length and the depth are Ok
|
// Check if the interval length and the depth are Ok
|
||||||
if (end - start < min_length || depth_ > max_depth)
|
if (end < start || safe_subtract(end, start) < min_length || depth_ > max_depth)
|
||||||
return;
|
return;
|
||||||
depth = depth_ > depth ? depth_ : depth;
|
depth = depth_ > depth ? depth_ : depth;
|
||||||
cut = getCandidate(start, end);
|
cut = getCandidate(start, end);
|
||||||
@@ -129,14 +142,14 @@ namespace mdlp {
|
|||||||
/* Definition 1: A binary discretization for A is determined by selecting the cut point TA for which
|
/* Definition 1: A binary discretization for A is determined by selecting the cut point TA for which
|
||||||
E(A, TA; S) is minimal amongst all the candidate cut points. */
|
E(A, TA; S) is minimal amongst all the candidate cut points. */
|
||||||
size_t candidate = numeric_limits<size_t>::max();
|
size_t candidate = numeric_limits<size_t>::max();
|
||||||
size_t elements = end - start;
|
size_t elements = safe_subtract(end, start);
|
||||||
bool sameValues = true;
|
bool sameValues = true;
|
||||||
precision_t entropy_left;
|
precision_t entropy_left;
|
||||||
precision_t entropy_right;
|
precision_t entropy_right;
|
||||||
precision_t minEntropy;
|
precision_t minEntropy;
|
||||||
// Check if all the values of the variable in the interval are the same
|
// Check if all the values of the variable in the interval are the same
|
||||||
for (size_t idx = start + 1; idx < end; idx++) {
|
for (size_t idx = start + 1; idx < end; idx++) {
|
||||||
if (X[indices[idx]] != X[indices[start]]) {
|
if (safe_X_access(idx) != safe_X_access(start)) {
|
||||||
sameValues = false;
|
sameValues = false;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -146,7 +159,7 @@ namespace mdlp {
|
|||||||
minEntropy = metrics.entropy(start, end);
|
minEntropy = metrics.entropy(start, end);
|
||||||
for (size_t idx = start + 1; idx < end; idx++) {
|
for (size_t idx = start + 1; idx < end; idx++) {
|
||||||
// Cutpoints are always on boundaries (definition 2)
|
// Cutpoints are always on boundaries (definition 2)
|
||||||
if (y[indices[idx]] == y[indices[idx - 1]])
|
if (safe_y_access(idx) == safe_y_access(idx - 1))
|
||||||
continue;
|
continue;
|
||||||
entropy_left = precision_t(idx - start) / static_cast<precision_t>(elements) * metrics.entropy(start, idx);
|
entropy_left = precision_t(idx - start) / static_cast<precision_t>(elements) * metrics.entropy(start, idx);
|
||||||
entropy_right = precision_t(end - idx) / static_cast<precision_t>(elements) * metrics.entropy(idx, end);
|
entropy_right = precision_t(end - idx) / static_cast<precision_t>(elements) * metrics.entropy(idx, end);
|
||||||
@@ -168,7 +181,7 @@ namespace mdlp {
|
|||||||
precision_t ent;
|
precision_t ent;
|
||||||
precision_t ent1;
|
precision_t ent1;
|
||||||
precision_t ent2;
|
precision_t ent2;
|
||||||
auto N = precision_t(end - start);
|
auto N = precision_t(safe_subtract(end, start));
|
||||||
k = metrics.computeNumClasses(start, end);
|
k = metrics.computeNumClasses(start, end);
|
||||||
k1 = metrics.computeNumClasses(start, cut);
|
k1 = metrics.computeNumClasses(start, cut);
|
||||||
k2 = metrics.computeNumClasses(cut, end);
|
k2 = metrics.computeNumClasses(cut, end);
|
||||||
@@ -188,6 +201,9 @@ namespace mdlp {
|
|||||||
indices_t idx(X_.size());
|
indices_t idx(X_.size());
|
||||||
std::iota(idx.begin(), idx.end(), 0);
|
std::iota(idx.begin(), idx.end(), 0);
|
||||||
stable_sort(idx.begin(), idx.end(), [&X_, &y_](size_t i1, size_t i2) {
|
stable_sort(idx.begin(), idx.end(), [&X_, &y_](size_t i1, size_t i2) {
|
||||||
|
if (i1 >= X_.size() || i2 >= X_.size() || i1 >= y_.size() || i2 >= y_.size()) {
|
||||||
|
throw std::out_of_range("Index out of bounds in sort comparison");
|
||||||
|
}
|
||||||
if (X_[i1] == X_[i2])
|
if (X_[i1] == X_[i2])
|
||||||
return y_[i1] < y_[i2];
|
return y_[i1] < y_[i2];
|
||||||
else
|
else
|
||||||
@@ -206,7 +222,7 @@ namespace mdlp {
|
|||||||
size_t end;
|
size_t end;
|
||||||
for (size_t idx = 0; idx < cutPoints.size(); idx++) {
|
for (size_t idx = 0; idx < cutPoints.size(); idx++) {
|
||||||
end = begin;
|
end = begin;
|
||||||
while (X[indices[end]] < cutPoints[idx] && end < X.size())
|
while (end < indices.size() && safe_X_access(end) < cutPoints[idx] && end < X.size())
|
||||||
end++;
|
end++;
|
||||||
entropy = metrics.entropy(begin, end);
|
entropy = metrics.entropy(begin, end);
|
||||||
if (entropy > maxEntropy) {
|
if (entropy > maxEntropy) {
|
||||||
|
@@ -39,6 +39,33 @@ namespace mdlp {
|
|||||||
size_t getCandidate(size_t, size_t);
|
size_t getCandidate(size_t, size_t);
|
||||||
size_t compute_max_num_cut_points() const;
|
size_t compute_max_num_cut_points() const;
|
||||||
pair<precision_t, size_t> valueCutPoint(size_t, size_t, size_t);
|
pair<precision_t, size_t> valueCutPoint(size_t, size_t, size_t);
|
||||||
|
private:
|
||||||
|
inline precision_t safe_X_access(size_t idx) const {
|
||||||
|
if (idx >= indices.size()) {
|
||||||
|
throw std::out_of_range("Index out of bounds for indices array");
|
||||||
|
}
|
||||||
|
size_t real_idx = indices[idx];
|
||||||
|
if (real_idx >= X.size()) {
|
||||||
|
throw std::out_of_range("Index out of bounds for X array");
|
||||||
|
}
|
||||||
|
return X[real_idx];
|
||||||
|
}
|
||||||
|
inline label_t safe_y_access(size_t idx) const {
|
||||||
|
if (idx >= indices.size()) {
|
||||||
|
throw std::out_of_range("Index out of bounds for indices array");
|
||||||
|
}
|
||||||
|
size_t real_idx = indices[idx];
|
||||||
|
if (real_idx >= y.size()) {
|
||||||
|
throw std::out_of_range("Index out of bounds for y array");
|
||||||
|
}
|
||||||
|
return y[real_idx];
|
||||||
|
}
|
||||||
|
inline size_t safe_subtract(size_t a, size_t b) const {
|
||||||
|
if (b > a) {
|
||||||
|
throw std::underflow_error("Subtraction would cause underflow");
|
||||||
|
}
|
||||||
|
return a - b;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@@ -10,6 +10,14 @@ namespace mdlp {
|
|||||||
|
|
||||||
labels_t& Discretizer::transform(const samples_t& data)
|
labels_t& Discretizer::transform(const samples_t& data)
|
||||||
{
|
{
|
||||||
|
// Input validation
|
||||||
|
if (data.empty()) {
|
||||||
|
throw std::invalid_argument("Data for transformation cannot be empty");
|
||||||
|
}
|
||||||
|
if (cutPoints.size() < 2) {
|
||||||
|
throw std::runtime_error("Discretizer not fitted yet or no valid cut points found");
|
||||||
|
}
|
||||||
|
|
||||||
discretizedData.clear();
|
discretizedData.clear();
|
||||||
discretizedData.reserve(data.size());
|
discretizedData.reserve(data.size());
|
||||||
// CutPoints always have at least two items
|
// CutPoints always have at least two items
|
||||||
@@ -31,6 +39,26 @@ namespace mdlp {
|
|||||||
}
|
}
|
||||||
void Discretizer::fit_t(const torch::Tensor& X_, const torch::Tensor& y_)
|
void Discretizer::fit_t(const torch::Tensor& X_, const torch::Tensor& y_)
|
||||||
{
|
{
|
||||||
|
// Validate tensor properties for security
|
||||||
|
if (!X_.is_contiguous() || !y_.is_contiguous()) {
|
||||||
|
throw std::invalid_argument("Tensors must be contiguous");
|
||||||
|
}
|
||||||
|
if (X_.sizes().size() != 1 || y_.sizes().size() != 1) {
|
||||||
|
throw std::invalid_argument("Only 1D tensors supported");
|
||||||
|
}
|
||||||
|
if (X_.dtype() != torch::kFloat32) {
|
||||||
|
throw std::invalid_argument("X tensor must be Float32 type");
|
||||||
|
}
|
||||||
|
if (y_.dtype() != torch::kInt32) {
|
||||||
|
throw std::invalid_argument("y tensor must be Int32 type");
|
||||||
|
}
|
||||||
|
if (X_.numel() != y_.numel()) {
|
||||||
|
throw std::invalid_argument("X and y tensors must have same number of elements");
|
||||||
|
}
|
||||||
|
if (X_.numel() == 0) {
|
||||||
|
throw std::invalid_argument("Tensors cannot be empty");
|
||||||
|
}
|
||||||
|
|
||||||
auto num_elements = X_.numel();
|
auto num_elements = X_.numel();
|
||||||
samples_t X(X_.data_ptr<precision_t>(), X_.data_ptr<precision_t>() + num_elements);
|
samples_t X(X_.data_ptr<precision_t>(), X_.data_ptr<precision_t>() + num_elements);
|
||||||
labels_t y(y_.data_ptr<int>(), y_.data_ptr<int>() + num_elements);
|
labels_t y(y_.data_ptr<int>(), y_.data_ptr<int>() + num_elements);
|
||||||
@@ -38,6 +66,20 @@ namespace mdlp {
|
|||||||
}
|
}
|
||||||
torch::Tensor Discretizer::transform_t(const torch::Tensor& X_)
|
torch::Tensor Discretizer::transform_t(const torch::Tensor& X_)
|
||||||
{
|
{
|
||||||
|
// Validate tensor properties for security
|
||||||
|
if (!X_.is_contiguous()) {
|
||||||
|
throw std::invalid_argument("Tensor must be contiguous");
|
||||||
|
}
|
||||||
|
if (X_.sizes().size() != 1) {
|
||||||
|
throw std::invalid_argument("Only 1D tensors supported");
|
||||||
|
}
|
||||||
|
if (X_.dtype() != torch::kFloat32) {
|
||||||
|
throw std::invalid_argument("X tensor must be Float32 type");
|
||||||
|
}
|
||||||
|
if (X_.numel() == 0) {
|
||||||
|
throw std::invalid_argument("Tensor cannot be empty");
|
||||||
|
}
|
||||||
|
|
||||||
auto num_elements = X_.numel();
|
auto num_elements = X_.numel();
|
||||||
samples_t X(X_.data_ptr<precision_t>(), X_.data_ptr<precision_t>() + num_elements);
|
samples_t X(X_.data_ptr<precision_t>(), X_.data_ptr<precision_t>() + num_elements);
|
||||||
auto result = transform(X);
|
auto result = transform(X);
|
||||||
@@ -45,6 +87,26 @@ namespace mdlp {
|
|||||||
}
|
}
|
||||||
torch::Tensor Discretizer::fit_transform_t(const torch::Tensor& X_, const torch::Tensor& y_)
|
torch::Tensor Discretizer::fit_transform_t(const torch::Tensor& X_, const torch::Tensor& y_)
|
||||||
{
|
{
|
||||||
|
// Validate tensor properties for security
|
||||||
|
if (!X_.is_contiguous() || !y_.is_contiguous()) {
|
||||||
|
throw std::invalid_argument("Tensors must be contiguous");
|
||||||
|
}
|
||||||
|
if (X_.sizes().size() != 1 || y_.sizes().size() != 1) {
|
||||||
|
throw std::invalid_argument("Only 1D tensors supported");
|
||||||
|
}
|
||||||
|
if (X_.dtype() != torch::kFloat32) {
|
||||||
|
throw std::invalid_argument("X tensor must be Float32 type");
|
||||||
|
}
|
||||||
|
if (y_.dtype() != torch::kInt32) {
|
||||||
|
throw std::invalid_argument("y tensor must be Int32 type");
|
||||||
|
}
|
||||||
|
if (X_.numel() != y_.numel()) {
|
||||||
|
throw std::invalid_argument("X and y tensors must have same number of elements");
|
||||||
|
}
|
||||||
|
if (X_.numel() == 0) {
|
||||||
|
throw std::invalid_argument("Tensors cannot be empty");
|
||||||
|
}
|
||||||
|
|
||||||
auto num_elements = X_.numel();
|
auto num_elements = X_.numel();
|
||||||
samples_t X(X_.data_ptr<precision_t>(), X_.data_ptr<precision_t>() + num_elements);
|
samples_t X(X_.data_ptr<precision_t>(), X_.data_ptr<precision_t>() + num_elements);
|
||||||
labels_t y(y_.data_ptr<int>(), y_.data_ptr<int>() + num_elements);
|
labels_t y(y_.data_ptr<int>(), y_.data_ptr<int>() + num_elements);
|
||||||
|
@@ -11,6 +11,7 @@
|
|||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include "typesFImdlp.h"
|
#include "typesFImdlp.h"
|
||||||
#include <torch/torch.h>
|
#include <torch/torch.h>
|
||||||
|
#include "config.h"
|
||||||
|
|
||||||
namespace mdlp {
|
namespace mdlp {
|
||||||
enum class bound_dir_t {
|
enum class bound_dir_t {
|
||||||
@@ -29,7 +30,7 @@ namespace mdlp {
|
|||||||
void fit_t(const torch::Tensor& X_, const torch::Tensor& y_);
|
void fit_t(const torch::Tensor& X_, const torch::Tensor& y_);
|
||||||
torch::Tensor transform_t(const torch::Tensor& X_);
|
torch::Tensor transform_t(const torch::Tensor& X_);
|
||||||
torch::Tensor fit_transform_t(const torch::Tensor& X_, const torch::Tensor& y_);
|
torch::Tensor fit_transform_t(const torch::Tensor& X_, const torch::Tensor& y_);
|
||||||
static inline std::string version() { return "2.0.1"; };
|
static inline std::string version() { return { project_mdlp_version.begin(), project_mdlp_version.end() }; };
|
||||||
protected:
|
protected:
|
||||||
labels_t discretizedData = labels_t();
|
labels_t discretizedData = labels_t();
|
||||||
cutPoints_t cutPoints; // At least two cutpoints must be provided, the first and the last will be ignored in transform
|
cutPoints_t cutPoints; // At least two cutpoints must be provided, the first and the last will be ignored in transform
|
||||||
|
@@ -26,6 +26,7 @@ namespace mdlp {
|
|||||||
|
|
||||||
void Metrics::setData(const labels_t& y_, const indices_t& indices_)
|
void Metrics::setData(const labels_t& y_, const indices_t& indices_)
|
||||||
{
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(cache_mutex);
|
||||||
indices = indices_;
|
indices = indices_;
|
||||||
y = y_;
|
y = y_;
|
||||||
numClasses = computeNumClasses(0, indices.size());
|
numClasses = computeNumClasses(0, indices.size());
|
||||||
@@ -35,15 +36,23 @@ namespace mdlp {
|
|||||||
|
|
||||||
precision_t Metrics::entropy(size_t start, size_t end)
|
precision_t Metrics::entropy(size_t start, size_t end)
|
||||||
{
|
{
|
||||||
|
if (end - start < 2)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
// Check cache first with read lock
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(cache_mutex);
|
||||||
|
if (entropyCache.find({ start, end }) != entropyCache.end()) {
|
||||||
|
return entropyCache[{start, end}];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute entropy outside of lock
|
||||||
precision_t p;
|
precision_t p;
|
||||||
precision_t ventropy = 0;
|
precision_t ventropy = 0;
|
||||||
int nElements = 0;
|
int nElements = 0;
|
||||||
labels_t counts(numClasses + 1, 0);
|
labels_t counts(numClasses + 1, 0);
|
||||||
if (end - start < 2)
|
|
||||||
return 0;
|
|
||||||
if (entropyCache.find({ start, end }) != entropyCache.end()) {
|
|
||||||
return entropyCache[{start, end}];
|
|
||||||
}
|
|
||||||
for (auto i = &indices[start]; i != &indices[end]; ++i) {
|
for (auto i = &indices[start]; i != &indices[end]; ++i) {
|
||||||
counts[y[*i]]++;
|
counts[y[*i]]++;
|
||||||
nElements++;
|
nElements++;
|
||||||
@@ -54,12 +63,27 @@ namespace mdlp {
|
|||||||
ventropy -= p * log2(p);
|
ventropy -= p * log2(p);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
entropyCache[{start, end}] = ventropy;
|
|
||||||
|
// Update cache with write lock
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(cache_mutex);
|
||||||
|
entropyCache[{start, end}] = ventropy;
|
||||||
|
}
|
||||||
|
|
||||||
return ventropy;
|
return ventropy;
|
||||||
}
|
}
|
||||||
|
|
||||||
precision_t Metrics::informationGain(size_t start, size_t cut, size_t end)
|
precision_t Metrics::informationGain(size_t start, size_t cut, size_t end)
|
||||||
{
|
{
|
||||||
|
// Check cache first with read lock
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(cache_mutex);
|
||||||
|
if (igCache.find(make_tuple(start, cut, end)) != igCache.end()) {
|
||||||
|
return igCache[make_tuple(start, cut, end)];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute information gain outside of lock
|
||||||
precision_t iGain;
|
precision_t iGain;
|
||||||
precision_t entropyInterval;
|
precision_t entropyInterval;
|
||||||
precision_t entropyLeft;
|
precision_t entropyLeft;
|
||||||
@@ -67,9 +91,7 @@ namespace mdlp {
|
|||||||
size_t nElementsLeft = cut - start;
|
size_t nElementsLeft = cut - start;
|
||||||
size_t nElementsRight = end - cut;
|
size_t nElementsRight = end - cut;
|
||||||
size_t nElements = end - start;
|
size_t nElements = end - start;
|
||||||
if (igCache.find(make_tuple(start, cut, end)) != igCache.end()) {
|
|
||||||
return igCache[make_tuple(start, cut, end)];
|
|
||||||
}
|
|
||||||
entropyInterval = entropy(start, end);
|
entropyInterval = entropy(start, end);
|
||||||
entropyLeft = entropy(start, cut);
|
entropyLeft = entropy(start, cut);
|
||||||
entropyRight = entropy(cut, end);
|
entropyRight = entropy(cut, end);
|
||||||
@@ -77,7 +99,13 @@ namespace mdlp {
|
|||||||
(static_cast<precision_t>(nElementsLeft) * entropyLeft +
|
(static_cast<precision_t>(nElementsLeft) * entropyLeft +
|
||||||
static_cast<precision_t>(nElementsRight) * entropyRight) /
|
static_cast<precision_t>(nElementsRight) * entropyRight) /
|
||||||
static_cast<precision_t>(nElements);
|
static_cast<precision_t>(nElements);
|
||||||
igCache[make_tuple(start, cut, end)] = iGain;
|
|
||||||
|
// Update cache with write lock
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(cache_mutex);
|
||||||
|
igCache[make_tuple(start, cut, end)] = iGain;
|
||||||
|
}
|
||||||
|
|
||||||
return iGain;
|
return iGain;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -8,6 +8,7 @@
|
|||||||
#define CCMETRICS_H
|
#define CCMETRICS_H
|
||||||
|
|
||||||
#include "typesFImdlp.h"
|
#include "typesFImdlp.h"
|
||||||
|
#include <mutex>
|
||||||
|
|
||||||
namespace mdlp {
|
namespace mdlp {
|
||||||
class Metrics {
|
class Metrics {
|
||||||
@@ -15,6 +16,7 @@ namespace mdlp {
|
|||||||
labels_t& y;
|
labels_t& y;
|
||||||
indices_t& indices;
|
indices_t& indices;
|
||||||
int numClasses;
|
int numClasses;
|
||||||
|
mutable std::mutex cache_mutex;
|
||||||
cacheEnt_t entropyCache = cacheEnt_t();
|
cacheEnt_t entropyCache = cacheEnt_t();
|
||||||
cacheIg_t igCache = cacheIg_t();
|
cacheIg_t igCache = cacheIg_t();
|
||||||
public:
|
public:
|
||||||
|
9
test_consumer/CMakeLists.txt
Normal file
9
test_consumer/CMakeLists.txt
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
cmake_minimum_required(VERSION 3.20)
|
||||||
|
project(test_fimdlp)
|
||||||
|
|
||||||
|
set(CMAKE_CXX_STANDARD 17)
|
||||||
|
|
||||||
|
find_package(fimdlp REQUIRED)
|
||||||
|
|
||||||
|
add_executable(test_fimdlp test_fimdlp.cpp)
|
||||||
|
target_link_libraries(test_fimdlp fimdlp::fimdlp)
|
9
test_consumer/CMakeUserPresets.json
Normal file
9
test_consumer/CMakeUserPresets.json
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"version": 4,
|
||||||
|
"vendor": {
|
||||||
|
"conan": {}
|
||||||
|
},
|
||||||
|
"include": [
|
||||||
|
"build/Release/generators/CMakePresets.json"
|
||||||
|
]
|
||||||
|
}
|
9
test_consumer/conanfile.txt
Normal file
9
test_consumer/conanfile.txt
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
[requires]
|
||||||
|
fimdlp/2.0.1
|
||||||
|
|
||||||
|
[generators]
|
||||||
|
CMakeDeps
|
||||||
|
CMakeToolchain
|
||||||
|
|
||||||
|
[layout]
|
||||||
|
cmake_layout
|
39
test_consumer/test_fimdlp.cpp
Normal file
39
test_consumer/test_fimdlp.cpp
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
#include <iostream>
|
||||||
|
#include <vector>
|
||||||
|
#include <fimdlp/CPPFImdlp.h>
|
||||||
|
#include <fimdlp/BinDisc.h>
|
||||||
|
|
||||||
|
int main() {
|
||||||
|
std::cout << "Testing FIMDLP package..." << std::endl;
|
||||||
|
|
||||||
|
// Test data - simple continuous values with binary classification
|
||||||
|
mdlp::samples_t data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0};
|
||||||
|
mdlp::labels_t labels = {0, 0, 0, 1, 1, 0, 1, 1, 1, 1};
|
||||||
|
|
||||||
|
std::cout << "Created test data with " << data.size() << " samples" << std::endl;
|
||||||
|
|
||||||
|
// Test MDLP discretizer
|
||||||
|
mdlp::CPPFImdlp discretizer;
|
||||||
|
discretizer.fit(data, labels);
|
||||||
|
|
||||||
|
auto cut_points = discretizer.getCutPoints();
|
||||||
|
std::cout << "MDLP found " << cut_points.size() << " cut points" << std::endl;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < cut_points.size(); ++i) {
|
||||||
|
std::cout << "Cut point " << i << ": " << cut_points[i] << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test BinDisc discretizer
|
||||||
|
mdlp::BinDisc bin_discretizer(3, mdlp::strategy_t::UNIFORM); // 3 bins, uniform strategy
|
||||||
|
bin_discretizer.fit(data, labels);
|
||||||
|
|
||||||
|
auto bin_cut_points = bin_discretizer.getCutPoints();
|
||||||
|
std::cout << "BinDisc found " << bin_cut_points.size() << " cut points" << std::endl;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < bin_cut_points.size(); ++i) {
|
||||||
|
std::cout << "Bin cut point " << i << ": " << bin_cut_points[i] << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cout << "FIMDLP package test completed successfully!" << std::endl;
|
||||||
|
return 0;
|
||||||
|
}
|
9
test_package/CMakeLists.txt
Normal file
9
test_package/CMakeLists.txt
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
cmake_minimum_required(VERSION 3.20)
|
||||||
|
project(test_fimdlp)
|
||||||
|
|
||||||
|
find_package(fimdlp REQUIRED)
|
||||||
|
find_package(Torch REQUIRED)
|
||||||
|
|
||||||
|
add_executable(test_fimdlp src/test_fimdlp.cpp)
|
||||||
|
target_link_libraries(test_fimdlp fimdlp::fimdlp torch::torch)
|
||||||
|
target_compile_features(test_fimdlp PRIVATE cxx_std_17)
|
9
test_package/CMakeUserPresets.json
Normal file
9
test_package/CMakeUserPresets.json
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"version": 4,
|
||||||
|
"vendor": {
|
||||||
|
"conan": {}
|
||||||
|
},
|
||||||
|
"include": [
|
||||||
|
"build/gcc-14-x86_64-gnu17-release/generators/CMakePresets.json"
|
||||||
|
]
|
||||||
|
}
|
28
test_package/conanfile.py
Normal file
28
test_package/conanfile.py
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
import os
|
||||||
|
from conan import ConanFile
|
||||||
|
from conan.tools.cmake import CMake, cmake_layout
|
||||||
|
from conan.tools.build import can_run
|
||||||
|
|
||||||
|
|
||||||
|
class FimdlpTestConan(ConanFile):
|
||||||
|
settings = "os", "compiler", "build_type", "arch"
|
||||||
|
# VirtualBuildEnv and VirtualRunEnv can be avoided if "tools.env:CONAN_RUN_TESTS" is false
|
||||||
|
generators = "CMakeDeps", "CMakeToolchain", "VirtualRunEnv"
|
||||||
|
apply_env = False # avoid the default VirtualBuildEnv from the base class
|
||||||
|
test_type = "explicit"
|
||||||
|
|
||||||
|
def requirements(self):
|
||||||
|
self.requires(self.tested_reference_str)
|
||||||
|
|
||||||
|
def layout(self):
|
||||||
|
cmake_layout(self)
|
||||||
|
|
||||||
|
def build(self):
|
||||||
|
cmake = CMake(self)
|
||||||
|
cmake.configure()
|
||||||
|
cmake.build()
|
||||||
|
|
||||||
|
def test(self):
|
||||||
|
if can_run(self):
|
||||||
|
cmd = os.path.join(self.cpp.build.bindir, "test_fimdlp")
|
||||||
|
self.run(cmd, env="conanrun")
|
27
test_package/src/test_fimdlp.cpp
Normal file
27
test_package/src/test_fimdlp.cpp
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
#include <iostream>
|
||||||
|
#include <vector>
|
||||||
|
#include <fimdlp/CPPFImdlp.h>
|
||||||
|
#include <fimdlp/Metrics.h>
|
||||||
|
|
||||||
|
int main() {
|
||||||
|
std::cout << "Testing fimdlp library..." << std::endl;
|
||||||
|
|
||||||
|
// Simple test of the library
|
||||||
|
try {
|
||||||
|
// Test Metrics class
|
||||||
|
Metrics metrics;
|
||||||
|
std::vector<int> labels = {0, 0, 1, 1, 0, 1};
|
||||||
|
double entropy = metrics.entropy(labels);
|
||||||
|
std::cout << "Entropy calculated: " << entropy << std::endl;
|
||||||
|
|
||||||
|
// Test CPPFImdlp creation
|
||||||
|
CPPFImdlp discretizer;
|
||||||
|
std::cout << "CPPFImdlp instance created successfully" << std::endl;
|
||||||
|
|
||||||
|
std::cout << "fimdlp library test completed successfully!" << std::endl;
|
||||||
|
return 0;
|
||||||
|
} catch (const std::exception& e) {
|
||||||
|
std::cerr << "Error testing fimdlp library: " << e.what() << std::endl;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
@@ -16,13 +16,13 @@ namespace mdlp {
|
|||||||
const float margin = 1e-4;
|
const float margin = 1e-4;
|
||||||
static std::string set_data_path()
|
static std::string set_data_path()
|
||||||
{
|
{
|
||||||
std::string path = "../datasets/";
|
std::string path = "datasets/";
|
||||||
std::ifstream file(path + "iris.arff");
|
std::ifstream file(path + "iris.arff");
|
||||||
if (file.is_open()) {
|
if (file.is_open()) {
|
||||||
file.close();
|
file.close();
|
||||||
return path;
|
return path;
|
||||||
}
|
}
|
||||||
return "../../tests/datasets/";
|
return "tests/datasets/";
|
||||||
}
|
}
|
||||||
const std::string data_path = set_data_path();
|
const std::string data_path = set_data_path();
|
||||||
class TestBinDisc3U : public BinDisc, public testing::Test {
|
class TestBinDisc3U : public BinDisc, public testing::Test {
|
||||||
@@ -153,20 +153,12 @@ namespace mdlp {
|
|||||||
TEST_F(TestBinDisc3U, EmptyUniform)
|
TEST_F(TestBinDisc3U, EmptyUniform)
|
||||||
{
|
{
|
||||||
samples_t X = {};
|
samples_t X = {};
|
||||||
fit(X);
|
EXPECT_THROW(fit(X), std::invalid_argument);
|
||||||
auto cuts = getCutPoints();
|
|
||||||
ASSERT_EQ(2, cuts.size());
|
|
||||||
EXPECT_NEAR(0, cuts.at(0), margin);
|
|
||||||
EXPECT_NEAR(0, cuts.at(1), margin);
|
|
||||||
}
|
}
|
||||||
TEST_F(TestBinDisc3Q, EmptyQuantile)
|
TEST_F(TestBinDisc3Q, EmptyQuantile)
|
||||||
{
|
{
|
||||||
samples_t X = {};
|
samples_t X = {};
|
||||||
fit(X);
|
EXPECT_THROW(fit(X), std::invalid_argument);
|
||||||
auto cuts = getCutPoints();
|
|
||||||
ASSERT_EQ(2, cuts.size());
|
|
||||||
EXPECT_NEAR(0, cuts.at(0), margin);
|
|
||||||
EXPECT_NEAR(0, cuts.at(1), margin);
|
|
||||||
}
|
}
|
||||||
TEST(TestBinDisc3, ExceptionNumberBins)
|
TEST(TestBinDisc3, ExceptionNumberBins)
|
||||||
{
|
{
|
||||||
@@ -406,6 +398,6 @@ namespace mdlp {
|
|||||||
EXPECT_NEAR(exp.cutpoints_.at(i), cuts.at(i), margin);
|
EXPECT_NEAR(exp.cutpoints_.at(i), cuts.at(i), margin);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
std::cout << "* Number of experiments tested: " << num << std::endl;
|
// std::cout << "* Number of experiments tested: " << num << std::endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -1,17 +1,12 @@
|
|||||||
include(FetchContent)
|
|
||||||
include_directories(${GTEST_INCLUDE_DIRS})
|
find_package(arff-files REQUIRED)
|
||||||
FetchContent_Declare(
|
find_package(GTest REQUIRED)
|
||||||
googletest
|
|
||||||
URL https://github.com/google/googletest/archive/03597a01ee50ed33e9dfd640b249b4be3799d395.zip
|
|
||||||
)
|
|
||||||
# For Windows: Prevent overriding the parent project's compiler/linker settings
|
|
||||||
set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
|
|
||||||
FetchContent_MakeAvailable(googletest)
|
|
||||||
|
|
||||||
include_directories(
|
include_directories(
|
||||||
${TORCH_INCLUDE_DIRS}
|
${libtorch_INCLUDE_DIRS_DEBUG}
|
||||||
${fimdlp_SOURCE_DIR}/src
|
${fimdlp_SOURCE_DIR}/src
|
||||||
${fimdlp_SOURCE_DIR}/tests/lib/Files
|
${arff-files_INCLUDE_DIRS}
|
||||||
|
${CMAKE_BINARY_DIR}/configured_files/include
|
||||||
)
|
)
|
||||||
|
|
||||||
add_executable(Metrics_unittest ${fimdlp_SOURCE_DIR}/src/Metrics.cpp Metrics_unittest.cpp)
|
add_executable(Metrics_unittest ${fimdlp_SOURCE_DIR}/src/Metrics.cpp Metrics_unittest.cpp)
|
||||||
@@ -21,18 +16,18 @@ target_link_options(Metrics_unittest PRIVATE --coverage)
|
|||||||
|
|
||||||
add_executable(FImdlp_unittest FImdlp_unittest.cpp
|
add_executable(FImdlp_unittest FImdlp_unittest.cpp
|
||||||
${fimdlp_SOURCE_DIR}/src/CPPFImdlp.cpp ${fimdlp_SOURCE_DIR}/src/Metrics.cpp ${fimdlp_SOURCE_DIR}/src/Discretizer.cpp)
|
${fimdlp_SOURCE_DIR}/src/CPPFImdlp.cpp ${fimdlp_SOURCE_DIR}/src/Metrics.cpp ${fimdlp_SOURCE_DIR}/src/Discretizer.cpp)
|
||||||
target_link_libraries(FImdlp_unittest GTest::gtest_main "${TORCH_LIBRARIES}")
|
target_link_libraries(FImdlp_unittest GTest::gtest_main torch::torch)
|
||||||
target_compile_options(FImdlp_unittest PRIVATE --coverage)
|
target_compile_options(FImdlp_unittest PRIVATE --coverage)
|
||||||
target_link_options(FImdlp_unittest PRIVATE --coverage)
|
target_link_options(FImdlp_unittest PRIVATE --coverage)
|
||||||
|
|
||||||
add_executable(BinDisc_unittest BinDisc_unittest.cpp ${fimdlp_SOURCE_DIR}/src/BinDisc.cpp ${fimdlp_SOURCE_DIR}/src/Discretizer.cpp)
|
add_executable(BinDisc_unittest BinDisc_unittest.cpp ${fimdlp_SOURCE_DIR}/src/BinDisc.cpp ${fimdlp_SOURCE_DIR}/src/Discretizer.cpp)
|
||||||
target_link_libraries(BinDisc_unittest GTest::gtest_main "${TORCH_LIBRARIES}")
|
target_link_libraries(BinDisc_unittest GTest::gtest_main torch::torch)
|
||||||
target_compile_options(BinDisc_unittest PRIVATE --coverage)
|
target_compile_options(BinDisc_unittest PRIVATE --coverage)
|
||||||
target_link_options(BinDisc_unittest PRIVATE --coverage)
|
target_link_options(BinDisc_unittest PRIVATE --coverage)
|
||||||
|
|
||||||
add_executable(Discretizer_unittest Discretizer_unittest.cpp
|
add_executable(Discretizer_unittest Discretizer_unittest.cpp
|
||||||
${fimdlp_SOURCE_DIR}/src/BinDisc.cpp ${fimdlp_SOURCE_DIR}/src/CPPFImdlp.cpp ${fimdlp_SOURCE_DIR}/src/Metrics.cpp ${fimdlp_SOURCE_DIR}/src/Discretizer.cpp )
|
${fimdlp_SOURCE_DIR}/src/BinDisc.cpp ${fimdlp_SOURCE_DIR}/src/CPPFImdlp.cpp ${fimdlp_SOURCE_DIR}/src/Metrics.cpp ${fimdlp_SOURCE_DIR}/src/Discretizer.cpp )
|
||||||
target_link_libraries(Discretizer_unittest GTest::gtest_main "${TORCH_LIBRARIES}")
|
target_link_libraries(Discretizer_unittest GTest::gtest_main torch::torch)
|
||||||
target_compile_options(Discretizer_unittest PRIVATE --coverage)
|
target_compile_options(Discretizer_unittest PRIVATE --coverage)
|
||||||
target_link_options(Discretizer_unittest PRIVATE --coverage)
|
target_link_options(Discretizer_unittest PRIVATE --coverage)
|
||||||
|
|
||||||
|
@@ -17,13 +17,13 @@ namespace mdlp {
|
|||||||
const float margin = 1e-4;
|
const float margin = 1e-4;
|
||||||
static std::string set_data_path()
|
static std::string set_data_path()
|
||||||
{
|
{
|
||||||
std::string path = "../datasets/";
|
std::string path = "tests/datasets/";
|
||||||
std::ifstream file(path + "iris.arff");
|
std::ifstream file(path + "iris.arff");
|
||||||
if (file.is_open()) {
|
if (file.is_open()) {
|
||||||
file.close();
|
file.close();
|
||||||
return path;
|
return path;
|
||||||
}
|
}
|
||||||
return "../../tests/datasets/";
|
return "datasets/";
|
||||||
}
|
}
|
||||||
const std::string data_path = set_data_path();
|
const std::string data_path = set_data_path();
|
||||||
const labels_t iris_quantile = { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 2, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 3, 3, 3, 1, 3, 1, 2, 0, 3, 1, 0, 2, 2, 2, 1, 3, 1, 2, 2, 1, 2, 2, 2, 2, 3, 3, 3, 3, 2, 1, 1, 1, 2, 2, 1, 2, 3, 2, 1, 1, 1, 2, 2, 0, 1, 1, 1, 2, 1, 1, 2, 2, 3, 2, 3, 3, 0, 3, 3, 3, 3, 3, 3, 1, 2, 3, 3, 3, 3, 2, 3, 1, 3, 2, 3, 3, 2, 2, 3, 3, 3, 3, 3, 2, 2, 3, 2, 3, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 2, 2 };
|
const labels_t iris_quantile = { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 2, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 3, 3, 3, 1, 3, 1, 2, 0, 3, 1, 0, 2, 2, 2, 1, 3, 1, 2, 2, 1, 2, 2, 2, 2, 3, 3, 3, 3, 2, 1, 1, 1, 2, 2, 1, 2, 3, 2, 1, 1, 1, 2, 2, 0, 1, 1, 1, 2, 1, 1, 2, 2, 3, 2, 3, 3, 0, 3, 3, 3, 3, 3, 3, 1, 2, 3, 3, 3, 3, 2, 3, 1, 3, 2, 3, 3, 2, 2, 3, 3, 3, 3, 3, 2, 2, 3, 2, 3, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 2, 2 };
|
||||||
@@ -32,8 +32,7 @@ namespace mdlp {
|
|||||||
Discretizer* disc = new BinDisc(4, strategy_t::UNIFORM);
|
Discretizer* disc = new BinDisc(4, strategy_t::UNIFORM);
|
||||||
auto version = disc->version();
|
auto version = disc->version();
|
||||||
delete disc;
|
delete disc;
|
||||||
std::cout << "Version computed: " << version;
|
EXPECT_EQ("2.1.0", version);
|
||||||
EXPECT_EQ("2.0.1", version);
|
|
||||||
}
|
}
|
||||||
TEST(Discretizer, BinIrisUniform)
|
TEST(Discretizer, BinIrisUniform)
|
||||||
{
|
{
|
||||||
|
@@ -40,13 +40,13 @@ namespace mdlp {
|
|||||||
|
|
||||||
static string set_data_path()
|
static string set_data_path()
|
||||||
{
|
{
|
||||||
string path = "../datasets/";
|
string path = "datasets/";
|
||||||
ifstream file(path + "iris.arff");
|
ifstream file(path + "iris.arff");
|
||||||
if (file.is_open()) {
|
if (file.is_open()) {
|
||||||
file.close();
|
file.close();
|
||||||
return path;
|
return path;
|
||||||
}
|
}
|
||||||
return "../../tests/datasets/";
|
return "tests/datasets/";
|
||||||
}
|
}
|
||||||
|
|
||||||
void checkSortedVector()
|
void checkSortedVector()
|
||||||
@@ -64,7 +64,7 @@ namespace mdlp {
|
|||||||
{
|
{
|
||||||
EXPECT_EQ(computed.size(), expected.size());
|
EXPECT_EQ(computed.size(), expected.size());
|
||||||
for (unsigned long i = 0; i < computed.size(); i++) {
|
for (unsigned long i = 0; i < computed.size(); i++) {
|
||||||
cout << "(" << computed[i] << ", " << expected[i] << ") ";
|
// cout << "(" << computed[i] << ", " << expected[i] << ") ";
|
||||||
EXPECT_NEAR(computed[i], expected[i], precision);
|
EXPECT_NEAR(computed[i], expected[i], precision);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -76,7 +76,7 @@ namespace mdlp {
|
|||||||
X = X_;
|
X = X_;
|
||||||
y = y_;
|
y = y_;
|
||||||
indices = sortIndices(X, y);
|
indices = sortIndices(X, y);
|
||||||
cout << "* " << title << endl;
|
// cout << "* " << title << endl;
|
||||||
result = valueCutPoint(0, cut, 10);
|
result = valueCutPoint(0, cut, 10);
|
||||||
EXPECT_NEAR(result.first, midPoint, precision);
|
EXPECT_NEAR(result.first, midPoint, precision);
|
||||||
EXPECT_EQ(result.second, limit);
|
EXPECT_EQ(result.second, limit);
|
||||||
@@ -95,9 +95,9 @@ namespace mdlp {
|
|||||||
test.fit(X[feature], y);
|
test.fit(X[feature], y);
|
||||||
EXPECT_EQ(test.get_depth(), depths[feature]);
|
EXPECT_EQ(test.get_depth(), depths[feature]);
|
||||||
auto computed = test.getCutPoints();
|
auto computed = test.getCutPoints();
|
||||||
cout << "Feature " << feature << ": ";
|
// cout << "Feature " << feature << ": ";
|
||||||
checkCutPoints(computed, expected[feature]);
|
checkCutPoints(computed, expected[feature]);
|
||||||
cout << endl;
|
// cout << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -113,17 +113,16 @@ namespace mdlp {
|
|||||||
{
|
{
|
||||||
X = { 1, 2, 3 };
|
X = { 1, 2, 3 };
|
||||||
y = { 1, 2 };
|
y = { 1, 2 };
|
||||||
EXPECT_THROW_WITH_MESSAGE(fit(X, y), invalid_argument, "X and y must have the same size");
|
EXPECT_THROW_WITH_MESSAGE(fit(X, y), invalid_argument, "X and y must have the same size: " + std::to_string(X.size()) + " != " + std::to_string(y.size()));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestFImdlp, FitErrorMinLengtMaxDepth)
|
TEST_F(TestFImdlp, FitErrorMinLength)
|
||||||
{
|
{
|
||||||
auto testLength = CPPFImdlp(2, 10, 0);
|
EXPECT_THROW_WITH_MESSAGE(CPPFImdlp(2, 10, 0), invalid_argument, "min_length must be greater than 2");
|
||||||
auto testDepth = CPPFImdlp(3, 0, 0);
|
}
|
||||||
X = { 1, 2, 3 };
|
TEST_F(TestFImdlp, FitErrorMaxDepth)
|
||||||
y = { 1, 2, 3 };
|
{
|
||||||
EXPECT_THROW_WITH_MESSAGE(testLength.fit(X, y), invalid_argument, "min_length must be greater than 2");
|
EXPECT_THROW_WITH_MESSAGE(CPPFImdlp(3, 0, 0), invalid_argument, "max_depth must be greater than 0");
|
||||||
EXPECT_THROW_WITH_MESSAGE(testDepth.fit(X, y), invalid_argument, "max_depth must be greater than 0");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestFImdlp, JoinFit)
|
TEST_F(TestFImdlp, JoinFit)
|
||||||
@@ -137,14 +136,16 @@ namespace mdlp {
|
|||||||
checkCutPoints(computed, expected);
|
checkCutPoints(computed, expected);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(TestFImdlp, FitErrorMinCutPoints)
|
||||||
|
{
|
||||||
|
EXPECT_THROW_WITH_MESSAGE(CPPFImdlp(3, 10, -1), invalid_argument, "proposed_cuts must be non-negative");
|
||||||
|
}
|
||||||
TEST_F(TestFImdlp, FitErrorMaxCutPoints)
|
TEST_F(TestFImdlp, FitErrorMaxCutPoints)
|
||||||
{
|
{
|
||||||
auto testmin = CPPFImdlp(2, 10, -1);
|
auto test = CPPFImdlp(3, 1, 8);
|
||||||
auto testmax = CPPFImdlp(3, 0, 200);
|
samples_t X_ = { 1, 2, 2, 3, 4, 2, 3 };
|
||||||
X = { 1, 2, 3 };
|
labels_t y_ = { 0, 0, 1, 2, 3, 4, 5 };
|
||||||
y = { 1, 2, 3 };
|
EXPECT_THROW_WITH_MESSAGE(test.fit(X_, y_), invalid_argument, "wrong proposed num_cuts value");
|
||||||
EXPECT_THROW_WITH_MESSAGE(testmin.fit(X, y), invalid_argument, "wrong proposed num_cuts value");
|
|
||||||
EXPECT_THROW_WITH_MESSAGE(testmax.fit(X, y), invalid_argument, "wrong proposed num_cuts value");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestFImdlp, SortIndices)
|
TEST_F(TestFImdlp, SortIndices)
|
||||||
|
Submodule tests/lib/Files deleted from a5316928d4
Reference in New Issue
Block a user