Almost working
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
cmake_minimum_required(VERSION 3.5)
|
||||
project(PyWrap)
|
||||
|
||||
set(CMAKE_CXX_STANDARD 20)
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
|
||||
find_package(Python3 3.11...3.11.9 COMPONENTS Interpreter Development REQUIRED)
|
||||
|
@@ -3,7 +3,7 @@ include_directories(${Python3_INCLUDE_DIRS})
|
||||
include_directories(${TORCH_INCLUDE_DIRS})
|
||||
|
||||
add_executable(main main.cc STree.cc SVC.cc PyClassifier.cc PyWrap.cc)
|
||||
add_executable(example example.cpp)
|
||||
add_executable(example example.cpp PyWrap.cc)
|
||||
|
||||
target_link_libraries(main ${Python3_LIBRARIES} "${TORCH_LIBRARIES}" ${LIBTORCH_PYTHON} Boost::boost Boost::python Boost::numpy ArffFiles)
|
||||
target_link_libraries(example ${Python3_LIBRARIES} "${TORCH_LIBRARIES}" Boost::boost Boost::python Boost::numpy ArffFiles)
|
||||
|
@@ -1,6 +1,5 @@
|
||||
#include "PyClassifier.h"
|
||||
#include <boost/python/numpy.hpp>
|
||||
#include <torch/csrc/utils/tensor_numpy.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
namespace pywrap {
|
||||
@@ -17,6 +16,27 @@ namespace pywrap {
|
||||
pyWrap->clean(module, className);
|
||||
std::cout << "Classifier cleaned" << std::endl;
|
||||
}
|
||||
void print_array(np::ndarray& array)
|
||||
{
|
||||
std::cout << "Array: " << std::endl;
|
||||
std::cout << p::extract<char const*>(p::str(array)) << std::endl;
|
||||
}
|
||||
np::ndarray tensor2numpy(torch::Tensor& X)
|
||||
{
|
||||
int m = X.size(0);
|
||||
int n = X.size(1);
|
||||
auto Xn = np::from_data(X.data_ptr(), np::dtype::get_builtin<float>(), p::make_tuple(m, n), p::make_tuple(sizeof(X.dtype()) * 2 * n, sizeof(X.dtype()) * 2), p::object());
|
||||
Xn = Xn.transpose();
|
||||
return Xn;
|
||||
}
|
||||
std::pair<np::ndarray, np::ndarray> tensors2numpy(torch::Tensor& X, torch::Tensor& y)
|
||||
{
|
||||
int n = X.size(1);
|
||||
auto yn = np::from_data(y.data_ptr(), np::dtype::get_builtin<int32_t>(), p::make_tuple(n), p::make_tuple(sizeof(y.dtype()) * 2), p::object());
|
||||
//std::cout << "Printing from within tensors2numpy" << std::endl;
|
||||
// print_array(yn);
|
||||
return { tensor2numpy(X), yn };
|
||||
}
|
||||
std::string PyClassifier::version()
|
||||
{
|
||||
return pyWrap->version(module, className);
|
||||
@@ -29,56 +49,35 @@ namespace pywrap {
|
||||
{
|
||||
return pyWrap->callMethodString(module, className, method);
|
||||
}
|
||||
void print_array(np::ndarray& array)
|
||||
{
|
||||
std::cout << "Array: " << std::endl;
|
||||
std::cout << p::extract<char const*>(p::str(array)) << std::endl;
|
||||
}
|
||||
|
||||
PyClassifier& PyClassifier::fit(torch::Tensor& X, torch::Tensor& y, const std::vector<std::string>& features, const std::string& className, std::map<std::string, std::vector<int>>& states)
|
||||
{
|
||||
std::cout << "PyClassifier:fit:Converting X to PyObject" << std::endl;
|
||||
std::cout << "X.defined() = " << X.defined() << std::endl;
|
||||
int m = X.size(0);
|
||||
int n = X.size(1);
|
||||
auto data_numpy = np::from_data(X.data_ptr(), np::dtype::get_builtin<float>(), p::make_tuple(m, n), p::make_tuple(sizeof(X.dtype()) * 2 * n, sizeof(X.dtype()) * 2), p::object());
|
||||
data_numpy = data_numpy.transpose();
|
||||
print_array(data_numpy);
|
||||
CPyObject Xp = data_numpy.ptr();
|
||||
auto [Xn, yn] = tensors2numpy(X, y);
|
||||
CPyObject Xp = Xn.ptr();
|
||||
std::cout << "PyClassifier:fit:Converting y to PyObject" << std::endl;
|
||||
auto y_numpy = np::from_data(y.data_ptr(), np::dtype::get_builtin<int32_t>(), p::make_tuple(n), p::make_tuple(sizeof(y.dtype()) * 2), p::object());
|
||||
print_array(y_numpy);
|
||||
CPyObject yp = y_numpy.ptr();
|
||||
print_array(yn);
|
||||
CPyObject yp = yn.ptr();
|
||||
std::cout << "PyClassifier:fit:Calling fit" << std::endl;
|
||||
pyWrap->fit(module, this->className, Xp, yp);
|
||||
return *this;
|
||||
}
|
||||
torch::Tensor PyClassifier::predict(torch::Tensor& X)
|
||||
{
|
||||
int m = X.size(0);
|
||||
int n = X.size(1);
|
||||
auto data_numpy = np::from_data(X.data_ptr(), np::dtype::get_builtin<float>(), p::make_tuple(m, n), p::make_tuple(sizeof(X.dtype()) * 2 * n, sizeof(X.dtype()) * 2), p::object());
|
||||
data_numpy = data_numpy.transpose();
|
||||
print_array(data_numpy);
|
||||
CPyObject Xp = data_numpy.ptr();
|
||||
auto Xn = tensor2numpy(X);
|
||||
print_array(Xn);
|
||||
CPyObject Xp = Xn.ptr();
|
||||
auto PyResult = pyWrap->predict(module, className, Xp);
|
||||
auto result = torch::tensor({ 1,2,3 });
|
||||
return result;
|
||||
}
|
||||
double PyClassifier::score(torch::Tensor& X, torch::Tensor& y)
|
||||
{
|
||||
std::cout << "Converting X to PyObject" << std::endl;
|
||||
std::cout << "X.defined() = " << X.defined() << std::endl;
|
||||
//std::cout << "X.pyobj() = " << X.pyobj() << std::endl;
|
||||
//PyObject* Xp = torch::utils::tensor_to_numpy(X);
|
||||
auto XX = X.transpose(0, 1);
|
||||
int m = XX.size(0);
|
||||
int n = XX.size(1);
|
||||
auto data_numpy = np::from_data(XX.data_ptr(), np::dtype::get_builtin<float>(), p::make_tuple(m, n), p::make_tuple(sizeof(XX.dtype()) * 2 * n, sizeof(XX.dtype()) * 2), p::object());
|
||||
print_array(data_numpy);
|
||||
CPyObject Xp = data_numpy.ptr();
|
||||
std::cout << "Converting y to PyObject" << std::endl;
|
||||
auto y_numpy = np::from_data(y.data_ptr(), np::dtype::get_builtin<int32_t>(), p::make_tuple(m), p::make_tuple(sizeof(y.dtype()) * 2), p::object());
|
||||
CPyObject yp = y_numpy.ptr();
|
||||
std::cout << "PyClassifier::Score:Converting X to PyObject" << std::endl;
|
||||
auto [Xn, yn] = tensors2numpy(X, y);
|
||||
CPyObject Xp = Xn.ptr();
|
||||
CPyObject yp = yn.ptr();
|
||||
print_array(yn);
|
||||
auto result = pyWrap->score(module, className, Xp, yp);
|
||||
return result;
|
||||
}
|
||||
|
@@ -3,10 +3,13 @@
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <utility>
|
||||
#include <torch/torch.h>
|
||||
#include <boost/python/numpy.hpp>
|
||||
#include "PyWrap.h"
|
||||
|
||||
namespace pywrap {
|
||||
|
||||
class PyClassifier {
|
||||
public:
|
||||
PyClassifier(const std::string& module, const std::string& className);
|
||||
|
@@ -11,6 +11,7 @@ namespace pywrap {
|
||||
PyWrap* PyWrap::wrapper = nullptr;
|
||||
std::mutex PyWrap::mutex;
|
||||
CPyInstance* PyWrap::pyInstance = nullptr;
|
||||
auto moduleClassMap = std::map<std::pair<std::string, std::string>, std::tuple<PyObject*, PyObject*, PyObject*>>();
|
||||
|
||||
PyWrap* PyWrap::GetInstance()
|
||||
{
|
||||
@@ -25,12 +26,21 @@ namespace pywrap {
|
||||
}
|
||||
void PyWrap::RemoveInstance()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
if (wrapper != nullptr) {
|
||||
std::cout << "Liberando instancia" << std::endl;
|
||||
delete pyInstance;
|
||||
std::cout << "Liberando instancia Python Stack" << std::endl;
|
||||
if (pyInstance != nullptr) {
|
||||
std::cout << "-Liberando Python => PyHelper" << std::endl;
|
||||
delete pyInstance;
|
||||
} else {
|
||||
std::cout << "*No había instancia de python para liberar. => PyHelper" << std::endl;
|
||||
}
|
||||
pyInstance = nullptr;
|
||||
delete wrapper;
|
||||
if (wrapper != nullptr) {
|
||||
std::cout << "-Liberando PyWrap." << std::endl;
|
||||
delete wrapper;
|
||||
} else {
|
||||
std::cout << "*No había instancia de PyWrap para liberar." << std::endl;
|
||||
}
|
||||
wrapper = nullptr;
|
||||
std::cout << "Instancia liberada" << std::endl;
|
||||
}
|
||||
@@ -55,17 +65,25 @@ namespace pywrap {
|
||||
if (PyErr_Occurred()) {
|
||||
errorAbort("Couldn't create instance of class " + className);
|
||||
}
|
||||
moduleClassMap[{moduleName, className}] = { module, classObject, instance };
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
module.AddRef();
|
||||
classObject.AddRef();
|
||||
instance.AddRef();
|
||||
moduleClassMap.insert({ { moduleName, className }, { module.getObject(), classObject.getObject(), instance.getObject() } });
|
||||
std::cout << "Clase importada" << std::endl;
|
||||
}
|
||||
void PyWrap::clean(const std::string& moduleName, const std::string& className)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
std::cout << "Limpiando" << std::endl;
|
||||
auto result = moduleClassMap.find({ moduleName, className });
|
||||
if (result == moduleClassMap.end()) {
|
||||
return;
|
||||
}
|
||||
std::cout << "--> Limpiando" << std::endl;
|
||||
Py_DECREF(std::get<0>(result->second));
|
||||
Py_DECREF(std::get<1>(result->second));
|
||||
Py_DECREF(std::get<2>(result->second));
|
||||
moduleClassMap.erase(result);
|
||||
if (PyErr_Occurred()) {
|
||||
PyErr_Print();
|
||||
@@ -122,11 +140,11 @@ namespace pywrap {
|
||||
void PyWrap::fit(const std::string& moduleName, const std::string& className, CPyObject& X, CPyObject& y)
|
||||
{
|
||||
std::cout << "Llamando método fit" << std::endl;
|
||||
CPyObject instance = getClass(moduleName, className);
|
||||
PyObject* instance = getClass(moduleName, className);
|
||||
CPyObject result;
|
||||
std::string method = "fit";
|
||||
try {
|
||||
if (!(result = PyObject_CallMethodObjArgs(instance, PyUnicode_FromString(method.c_str()), X, y, NULL)))
|
||||
if (!(result = PyObject_CallMethodObjArgs(instance, PyUnicode_FromString(method.c_str()), X.getObject(), y.getObject(), NULL)))
|
||||
errorAbort("Couldn't call method fit");
|
||||
}
|
||||
catch (const std::exception& e) {
|
||||
@@ -142,7 +160,7 @@ namespace pywrap {
|
||||
CPyObject result;
|
||||
std::string method = "predict";
|
||||
try {
|
||||
if (!(result = PyObject_CallMethodObjArgs(instance, PyUnicode_FromString(method.c_str()), X, NULL)))
|
||||
if (!(result = PyObject_CallMethodObjArgs(instance, PyUnicode_FromString(method.c_str()), X.getObject(), NULL)))
|
||||
errorAbort("Couldn't call method predict");
|
||||
}
|
||||
catch (const std::exception& e) {
|
||||
@@ -155,11 +173,11 @@ namespace pywrap {
|
||||
double PyWrap::score(const std::string& moduleName, const std::string& className, CPyObject& X, CPyObject& y)
|
||||
{
|
||||
std::cout << "Llamando método score" << std::endl;
|
||||
CPyObject instance = getClass(moduleName, className);
|
||||
PyObject* instance = getClass(moduleName, className);
|
||||
CPyObject result;
|
||||
std::string method = "score";
|
||||
try {
|
||||
if (!(result = PyObject_CallMethodObjArgs(instance, PyUnicode_FromString(method.c_str()), X, y, NULL)))
|
||||
if (!(result = PyObject_CallMethodObjArgs(instance, PyUnicode_FromString(method.c_str()), X.getObject(), y.getObject(), NULL)))
|
||||
errorAbort("Couldn't call method score");
|
||||
}
|
||||
catch (const std::exception& e) {
|
||||
|
14
src/PyWrap.h
14
src/PyWrap.h
@@ -15,27 +15,29 @@ namespace pywrap {
|
||||
*/
|
||||
class PyWrap {
|
||||
public:
|
||||
PyWrap() = default;
|
||||
PyWrap(PyWrap& other) = delete;
|
||||
static PyWrap* GetInstance();
|
||||
static void RemoveInstance();
|
||||
void operator=(const PyWrap&) = delete;
|
||||
~PyWrap() = default;
|
||||
void fit(const std::string& moduleName, const std::string& className, CPyObject& X, CPyObject& y);
|
||||
CPyObject predict(const std::string& moduleName, const std::string& className, CPyObject& X);
|
||||
std::string callMethodString(const std::string& moduleName, const std::string& className, const std::string& method);
|
||||
std::string version(const std::string& moduleName, const std::string& className);
|
||||
std::string graph(const std::string& moduleName, const std::string& className);
|
||||
void fit(const std::string& moduleName, const std::string& className, CPyObject& X, CPyObject& y);
|
||||
CPyObject predict(const std::string& moduleName, const std::string& className, CPyObject& X);
|
||||
double score(const std::string& moduleName, const std::string& className, CPyObject& X, CPyObject& y);
|
||||
void clean(const std::string& moduleName, const std::string& className);
|
||||
void importClass(const std::string& moduleName, const std::string& className);
|
||||
private:
|
||||
PyWrap() = default;
|
||||
PyObject* getClass(const std::string& moduleName, const std::string& className);
|
||||
private:
|
||||
// Only call RemoveInstance from clean method
|
||||
static void RemoveInstance();
|
||||
void errorAbort(const std::string& message);
|
||||
// No need to use static map here, since this class is a singleton
|
||||
std::map<std::pair<std::string, std::string>, std::tuple<PyObject*, PyObject*, PyObject*>> moduleClassMap;
|
||||
static CPyInstance* pyInstance;
|
||||
static PyWrap* wrapper;
|
||||
static std::mutex mutex;
|
||||
std::map<std::pair<std::string, std::string>, std::tuple<CPyObject, CPyObject, CPyObject>> moduleClassMap;
|
||||
};
|
||||
} /* namespace pywrap */
|
||||
#endif /* PYWRAP_H */
|
343
src/example.cpp
343
src/example.cpp
@@ -1,151 +1,220 @@
|
||||
#include <torch/torch.h>
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
#include "ArffFiles.h"
|
||||
#include <torch/torch.h>
|
||||
#include "PyHelper.hpp"
|
||||
#include <boost/python/numpy.hpp>
|
||||
#include "PyWrap.h"
|
||||
|
||||
namespace pywrap {
|
||||
using namespace std;
|
||||
|
||||
void errorAbort(const std::string& message)
|
||||
{
|
||||
std::cerr << message << std::endl;
|
||||
PyErr_Print();
|
||||
exit(1);
|
||||
}
|
||||
void print_array(np::ndarray& array)
|
||||
{
|
||||
std::cout << "Array: " << std::endl;
|
||||
std::cout << p::extract<char const*>(p::str(array)) << std::endl;
|
||||
}
|
||||
np::ndarray to_numpy_matrix(torch::Tensor& input_data, np::dtype numpy_dtype)
|
||||
{
|
||||
p::tuple shape = p::make_tuple(input_data.size(0), input_data.size(1));
|
||||
auto tensor_dtype = input_data.dtype();
|
||||
p::tuple stride = p::make_tuple(sizeof(tensor_dtype) * input_data.size(1), sizeof(tensor_dtype));
|
||||
auto dito = input_data.transpose(1, 0);
|
||||
np::ndarray result = np::from_data(dito.data_ptr(), numpy_dtype, shape, stride, p::object());
|
||||
return result;
|
||||
}
|
||||
np::ndarray to_numpy_vector(torch::Tensor& input_data, np::dtype numpy_dtype)
|
||||
{
|
||||
p::tuple shape = p::make_tuple(input_data.size(0));
|
||||
auto tensor_dtype = input_data.dtype();
|
||||
p::tuple stride = p::make_tuple(sizeof(tensor_dtype), sizeof(tensor_dtype));
|
||||
np::ndarray result = np::from_data(input_data.data_ptr(), numpy_dtype, shape, stride, p::object());
|
||||
return result;
|
||||
}
|
||||
void flat()
|
||||
{
|
||||
double data[][4] = { {0.1, 0.2, 0.3, 0.4} , { 0.5, 0.6, 0.7, 0.8 }, { 0.9, 0.11, 0.12, 0.13 }, { 0.14, 0.15, 0.16, 0.17 }, { 0.18, 0.19, 0.21, 0.22 }, { 0.23, 0.24, 0.25, 0.26 }, { 0.27, 0.28, 0.29, 0.31 } };
|
||||
int labels[] = { 0, 1, 0, 1 , 0, 0, 1 };
|
||||
// cout << "Array data: (" << m << ", " << n << ") " << endl;
|
||||
// for (int i = 0; i < m; ++i) {
|
||||
// cout << "[ ";
|
||||
// for (int j = 0; j < n; ++j) {
|
||||
// cout << setw(4) << std::setprecision(2) << fixed << data[i][j] << " ";
|
||||
// }
|
||||
// cout << "]" << endl;
|
||||
// }
|
||||
// cout << "Array labels: " << endl;
|
||||
// for (int i = 0; i < m; ++i) {
|
||||
// cout << labels[i] << " ";
|
||||
// }
|
||||
// cout << endl;
|
||||
// auto data_numpy = np::from_data(data, np::dtype::get_builtin<double>(), p::make_tuple(m, n), p::make_tuple(sizeof(double) * n, sizeof(double)), p::object());
|
||||
// auto y_numpy = np::from_data(labels, np::dtype::get_builtin<int>(), p::make_tuple(m), p::make_tuple(sizeof(int)), p::object());
|
||||
}
|
||||
class Paths {
|
||||
public:
|
||||
static string datasets()
|
||||
{
|
||||
return "/home/rmontanana/Code/discretizbench/datasets/";
|
||||
}
|
||||
};
|
||||
void errorAbort(const std::string& message)
|
||||
{
|
||||
std::cerr << message << std::endl;
|
||||
PyErr_Print();
|
||||
exit(1);
|
||||
}
|
||||
void print_array(pywrap::np::ndarray& array)
|
||||
{
|
||||
std::cout << "Array: " << std::endl;
|
||||
std::cout << pywrap::p::extract<char const*>(pywrap::p::str(array)) << std::endl;
|
||||
}
|
||||
// np::ndarray to_numpy_matrix(torch::Tensor& input_data, np::dtype numpy_dtype)
|
||||
// {
|
||||
// p::tuple shape = p::make_tuple(input_data.size(0), input_data.size(1));
|
||||
// auto tensor_dtype = input_data.dtype();
|
||||
// p::tuple stride = p::make_tuple(sizeof(tensor_dtype) * input_data.size(1), sizeof(tensor_dtype));
|
||||
// auto dito = input_data.transpose(1, 0);
|
||||
// np::ndarray result = np::from_data(dito.data_ptr(), numpy_dtype, shape, stride, p::object());
|
||||
// return result;
|
||||
// }
|
||||
// np::ndarray to_numpy_vector(torch::Tensor& input_data, np::dtype numpy_dtype)
|
||||
// {
|
||||
// p::tuple shape = p::make_tuple(input_data.size(0));
|
||||
// auto tensor_dtype = input_data.dtype();
|
||||
// p::tuple stride = p::make_tuple(sizeof(tensor_dtype), sizeof(tensor_dtype));
|
||||
// np::ndarray result = np::from_data(input_data.data_ptr(), numpy_dtype, shape, stride, p::object());
|
||||
// return result;
|
||||
// }
|
||||
|
||||
tuple<torch::Tensor, torch::Tensor, vector<string>, string, map<string, vector<int>>> loadDataset(const string& name, bool class_last)
|
||||
class Paths {
|
||||
public:
|
||||
static string datasets()
|
||||
{
|
||||
auto handler = ArffFiles();
|
||||
handler.load(Paths::datasets() + static_cast<string>(name) + ".arff", class_last);
|
||||
// Get Dataset X, y
|
||||
vector<vector<float>> X = handler.getX();
|
||||
vector<int> y = handler.getY();
|
||||
// Get className & Features
|
||||
auto className = handler.getClassName();
|
||||
vector<string> features;
|
||||
auto attributes = handler.getAttributes();
|
||||
transform(attributes.begin(), attributes.end(), back_inserter(features), [](const auto& pair) { return pair.first; });
|
||||
torch::Tensor Xd;
|
||||
auto states = map<string, vector<int>>();
|
||||
Xd = torch::zeros({ static_cast<int>(X.size()), static_cast<int>(X[0].size()) }, torch::kFloat32);
|
||||
for (int i = 0; i < features.size(); ++i) {
|
||||
Xd.index_put_({ i, "..." }, torch::tensor(X[i], torch::kFloat32));
|
||||
}
|
||||
return { Xd, torch::tensor(y, torch::kInt32), features, className, states };
|
||||
return "../discretizbench/datasets/";
|
||||
}
|
||||
};
|
||||
|
||||
tuple<torch::Tensor, torch::Tensor, vector<string>, string, map<string, vector<int>>> loadDataset(const string& name, bool class_last)
|
||||
{
|
||||
auto handler = ArffFiles();
|
||||
handler.load(Paths::datasets() + static_cast<string>(name) + ".arff", class_last);
|
||||
// Get Dataset X, y
|
||||
vector<vector<float>> X = handler.getX();
|
||||
vector<int> y = handler.getY();
|
||||
// Get className & Features
|
||||
auto className = handler.getClassName();
|
||||
vector<string> features;
|
||||
auto attributes = handler.getAttributes();
|
||||
transform(attributes.begin(), attributes.end(), back_inserter(features), [](const auto& pair) { return pair.first; });
|
||||
torch::Tensor Xd;
|
||||
auto states = map<string, vector<int>>();
|
||||
Xd = torch::zeros({ static_cast<int>(X.size()), static_cast<int>(X[0].size()) }, torch::kFloat32);
|
||||
for (int i = 0; i < features.size(); ++i) {
|
||||
Xd.index_put_({ i, "..." }, torch::tensor(X[i], torch::kFloat32));
|
||||
}
|
||||
return { Xd, torch::tensor(y, torch::kInt32), features, className, states };
|
||||
}
|
||||
|
||||
} /* namespace pywrap */
|
||||
using namespace pywrap;
|
||||
np::ndarray tensor2numpy(torch::Tensor& X)
|
||||
{
|
||||
int m = X.size(0);
|
||||
int n = X.size(1);
|
||||
auto Xn = np::from_data(X.data_ptr(), np::dtype::get_builtin<float>(), p::make_tuple(m, n), p::make_tuple(sizeof(X.dtype()) * 2 * n, sizeof(X.dtype()) * 2), p::object());
|
||||
Xn = Xn.transpose();
|
||||
return Xn;
|
||||
}
|
||||
pair<np::ndarray, np::ndarray> tensors2numpy(torch::Tensor& X, torch::Tensor& y)
|
||||
{
|
||||
int n = X.size(1);
|
||||
auto yn = np::from_data(y.data_ptr(), np::dtype::get_builtin<int32_t>(), p::make_tuple(n), p::make_tuple(sizeof(y.dtype()) * 2), p::object());
|
||||
return { tensor2numpy(X), yn };
|
||||
}
|
||||
pair<np::ndarray, np::ndarray> getData(const string& dataset)
|
||||
{
|
||||
auto [X, y, featuresx, classNamex, statesx] = loadDataset(dataset, true);
|
||||
auto [Xn, yn] = tensors2numpy(X, y);
|
||||
auto Xn_shapes = Xn.get_shape();
|
||||
auto yn_shapes = yn.get_shape();
|
||||
cout << "Xn_shapes: " << Xn_shapes[0] << ", " << Xn_shapes[1] << endl;
|
||||
cout << "yn_shapes: " << yn_shapes[0] << endl;
|
||||
cout << "X shapes: " << X.sizes() << endl;
|
||||
cout << "y shapes: " << y.sizes() << endl;
|
||||
assert(Xn_shapes[0] == X.sizes()[0]);
|
||||
assert(Xn_shapes[1] == X.sizes()[1]);
|
||||
assert(yn_shapes[0] == y.sizes()[0]);
|
||||
|
||||
return { Xn, yn };
|
||||
}
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
auto [data_tensor, y_label, featuresx, classNamex, statesx] = loadDataset("iris", true);
|
||||
//data_tensor = data_tensor.transpose(0, 1);
|
||||
CPyInstance pInstance;
|
||||
int m = data_tensor.size(0);
|
||||
int n = data_tensor.size(1);
|
||||
// int m = 7;
|
||||
// int n = 4;
|
||||
// torch::Tensor data_tensor = torch::rand({ m, n }, torch::kFloat64);
|
||||
//torch::Tensor data_tensor = torch::tensor({ {0.1, 0.2, 0.3, 0.4} , { 0.5, 0.6, 0.7, 0.8 }, { 0.9, 0.11, 0.12, 0.13 }, { 0.14, 0.15, 0.16, 0.17 }, { 0.18, 0.19, 0.21, 0.22 }, { 0.23, 0.24, 0.25, 0.26 }, { 0.27, 0.28, 0.29, 0.31 } }, torch::kFloat32);
|
||||
// torch::Tensor y_label = torch::randint(0, 2, { m }, torch::kInt16);
|
||||
//torch::Tensor y_label = torch::tensor({ 17, 18, 19, 20 , 21, 22, 23 }, torch::kInt32);
|
||||
cout << "Tensor data: (" << data_tensor.size(0) << ", " << data_tensor.size(1) << ") " << endl << data_tensor << endl;
|
||||
cout << "Tensor data sizes: " << data_tensor.sizes() << endl;
|
||||
// cout << "Tensor labels: " << y_label << endl;
|
||||
cout << "Tensor labels sizes: " << y_label.sizes() << endl;
|
||||
auto data_numpy = np::from_data(data_tensor.data_ptr(), np::dtype::get_builtin<float>(), p::make_tuple(m, n), p::make_tuple(sizeof(data_tensor.dtype()) * 2 * n, sizeof(data_tensor.dtype()) * 2), p::object());
|
||||
data_numpy = data_numpy.transpose();
|
||||
auto y_numpy = np::from_data(y_label.data_ptr(), np::dtype::get_builtin<int32_t>(), p::make_tuple(n), p::make_tuple(sizeof(y_label.dtype()) * 2), p::object());
|
||||
//auto y_numpy = np::from_data(y_label.data_ptr(), np::dtype::get_builtin<int64_t>(), p::make_tuple(m), p::make_tuple(sizeof(y_label.dtype()) * 4), p::object());
|
||||
cout << "Numpy array data: " << endl;
|
||||
print_array(data_numpy);
|
||||
cout << "Numpy array labels: " << endl;
|
||||
print_array(y_numpy);
|
||||
cout << "primero" << endl;
|
||||
CPyObject p = data_numpy.ptr();
|
||||
CPyObject yp = y_numpy.ptr();
|
||||
cout << "segundo" << endl;
|
||||
string moduleName = "stree";
|
||||
string className = "Stree";
|
||||
string method = "version";
|
||||
CPyObject module = PyImport_ImportModule(moduleName.c_str());
|
||||
if (PyErr_Occurred()) {
|
||||
errorAbort("Could't import module " + moduleName);
|
||||
cout << "* Begin." << endl;
|
||||
{
|
||||
PyWrap* wrapper = PyWrap::GetInstance();
|
||||
string dataset = "iris";
|
||||
// Convert Tensor to numpy array
|
||||
// auto [Xn, yn] = tensors2numpy(X, y);
|
||||
// cout << "Numpy array data: " << endl;
|
||||
// print_array(Xn);
|
||||
// cout << "Numpy array labels: " << endl;
|
||||
// print_array(yn);
|
||||
// Import module
|
||||
string moduleName = "stree";
|
||||
string className = "Stree";
|
||||
// Import
|
||||
{
|
||||
cout << "--Import Phase--" << endl;
|
||||
wrapper->importClass(moduleName, className);
|
||||
cout << "--Import Phase end--" << endl;
|
||||
}
|
||||
// Version
|
||||
{
|
||||
cout << "--Version Phase--" << endl;
|
||||
auto version = wrapper->version(moduleName, className);
|
||||
cout << "Version: " << version << endl;
|
||||
cout << "--Version Phase end--" << endl;
|
||||
}
|
||||
// Fit
|
||||
{
|
||||
cout << "--Fit Phase--" << endl;
|
||||
auto [Xn, yn] = getData(dataset);
|
||||
CPyObject Xp = Xn.ptr();
|
||||
CPyObject yp = yn.ptr();
|
||||
// Call fit
|
||||
cout << "Calling fit" << endl;
|
||||
wrapper->fit(moduleName, className, Xp, yp);
|
||||
cout << "--Fit Phase end--" << endl;
|
||||
}
|
||||
// Call score
|
||||
{
|
||||
cout << "--Score Phase--" << endl;
|
||||
auto [Xn, yn] = getData(dataset);
|
||||
CPyObject Xp = Xn.ptr();
|
||||
CPyObject yp = yn.ptr();
|
||||
cout << "Calling score" << endl;
|
||||
auto score = wrapper->score(moduleName, className, Xp, yp);
|
||||
cout << "Score: " << score << endl;
|
||||
cout << "--Score Phase end--" << endl;
|
||||
}
|
||||
// Clean module
|
||||
{
|
||||
cout << "--Clean Phase--" << endl;
|
||||
wrapper->clean(moduleName, className);
|
||||
cout << "--Clean Phase end--" << endl;
|
||||
}
|
||||
}
|
||||
CPyObject classObject = PyObject_GetAttrString(module, className.c_str());
|
||||
if (PyErr_Occurred()) {
|
||||
errorAbort("Couldn't find class " + className);
|
||||
}
|
||||
CPyObject instance = PyObject_CallObject(classObject, NULL);
|
||||
if (PyErr_Occurred()) {
|
||||
errorAbort("Couldn't create instance of class " + className);
|
||||
}
|
||||
CPyObject result;
|
||||
if (!(result = PyObject_CallMethod(instance, method.c_str(), NULL)))
|
||||
errorAbort("Couldn't call method " + method);
|
||||
std::string value = PyUnicode_AsUTF8(result);
|
||||
cout << "Version: " << value << endl;
|
||||
cout << "Calling fit" << endl;
|
||||
p.AddRef();
|
||||
yp.AddRef();
|
||||
method = "fit";
|
||||
if (!(result = PyObject_CallMethodObjArgs(instance, PyUnicode_FromString(method.c_str()), p.getObject(), yp.getObject(), NULL)))
|
||||
errorAbort("Couldn't call method fit");
|
||||
cout << "Calling score" << endl;
|
||||
method = "score";
|
||||
if (!(result = PyObject_CallMethodObjArgs(instance, PyUnicode_FromString(method.c_str()), p.getObject(), yp.getObject(), NULL)))
|
||||
errorAbort("Couldn't call method score");
|
||||
float score = PyFloat_AsDouble(result);
|
||||
cout << "Score: " << score << endl;
|
||||
return 0;
|
||||
}
|
||||
cout << "* End." << endl;
|
||||
}
|
||||
// int main(int argc, char** argv)
|
||||
// {
|
||||
// auto [data_tensor, y_label, featuresx, classNamex, statesx] = loadDataset("iris", true);
|
||||
// // CPyInstance pInstance;
|
||||
// // auto wrapper = PyWrap();
|
||||
// PyWrap* wrapper = PyWrap::GetInstance();
|
||||
// // PyWrap* wrapper = PyWrap::GetInstance();
|
||||
// int m = data_tensor.size(0);
|
||||
// int n = data_tensor.size(1);
|
||||
// auto data_numpy = np::from_data(data_tensor.data_ptr(), np::dtype::get_builtin<float>(), p::make_tuple(m, n), p::make_tuple(sizeof(data_tensor.dtype()) * 2 * n, sizeof(data_tensor.dtype()) * 2), p::object());
|
||||
// data_numpy = data_numpy.transpose();
|
||||
// auto y_numpy = np::from_data(y_label.data_ptr(), np::dtype::get_builtin<int32_t>(), p::make_tuple(n), p::make_tuple(sizeof(y_label.dtype()) * 2), p::object());
|
||||
// cout << "Numpy array data: " << endl;
|
||||
// print_array(data_numpy);
|
||||
// cout << "Numpy array labels: " << endl;
|
||||
// print_array(y_numpy);
|
||||
// cout << "primero" << endl;
|
||||
// CPyObject p = data_numpy.ptr();
|
||||
// CPyObject yp = y_numpy.ptr();
|
||||
// string moduleName = "sklearn.svm";
|
||||
// string className = "SVC";
|
||||
// string method = "_repr_html_";
|
||||
// // CPyObject module = PyImport_ImportModule(moduleName.c_str());
|
||||
// // if (PyErr_Occurred()) {
|
||||
// // errorAbort("Could't import module " + moduleName);
|
||||
// // }
|
||||
// // CPyObject classObject = PyObject_GetAttrString(module, className.c_str());
|
||||
// // if (PyErr_Occurred()) {
|
||||
// // errorAbort("Couldn't find class " + className);
|
||||
// // }
|
||||
// // CPyObject instance = PyObject_CallObject(classObject, NULL);
|
||||
// // if (PyErr_Occurred()) {
|
||||
// // errorAbort("Couldn't create instance of class " + className);
|
||||
// // }
|
||||
// // wrapper.moduleClassMap.insert({ { moduleName, className }, { module, classObject, instance } });
|
||||
// wrapper->importClass(moduleName, className);
|
||||
// PyObject* instance = wrapper->getClass(moduleName, className);
|
||||
// CPyObject result;
|
||||
// if (!(result = PyObject_CallMethod(instance, method.c_str(), NULL)))
|
||||
// errorAbort("Couldn't call method " + method);
|
||||
// std::string value = PyUnicode_AsUTF8(result);
|
||||
// cout << "Version: " << value << endl;
|
||||
// cout << "Calling fit" << endl;
|
||||
// p.AddRef();
|
||||
// yp.AddRef();
|
||||
// method = "fit";
|
||||
// wrapper->fit(moduleName, className, p, yp);
|
||||
// // PyObject* instance2 = wrapper->getClass(moduleName, className);
|
||||
// // if (!(result = PyObject_CallMethodObjArgs(instance2, PyUnicode_FromString(method.c_str()), p.getObject(), yp.getObject(), NULL)))
|
||||
// // errorAbort("Couldn't call method fit");
|
||||
// // method = "fit";
|
||||
// // if (!(result = PyObject_CallMethodObjArgs(instance, PyUnicode_FromString(method.c_str()), p.getObject(), yp.getObject(), NULL)))
|
||||
// // errorAbort("Couldn't call method fit");
|
||||
// cout << "Calling score" << endl;
|
||||
// // method = "score";
|
||||
// // if (!(result = PyObject_CallMethodObjArgs(instance, PyUnicode_FromString(method.c_str()), p.getObject(), yp.getObject(), NULL)))
|
||||
// // errorAbort("Couldn't call method score");
|
||||
// // float score = PyFloat_AsDouble(result);
|
||||
// auto score = wrapper->score(moduleName, className, p, yp);
|
||||
// cout << "Score: " << score << endl;
|
||||
// wrapper->clean(moduleName, className);
|
||||
// return 0;
|
||||
// }
|
61
src/main.cc
61
src/main.cc
@@ -15,7 +15,7 @@ class Paths {
|
||||
public:
|
||||
static string datasets()
|
||||
{
|
||||
return "/home/rmontanana/Code/discretizbench/datasets/";
|
||||
return "../discretizbench/datasets/";
|
||||
}
|
||||
};
|
||||
|
||||
@@ -40,21 +40,50 @@ tuple<Tensor, Tensor, vector<string>, string, map<string, vector<int>>> loadData
|
||||
return { Xd, torch::tensor(y, torch::kInt32), features, className, states };
|
||||
}
|
||||
|
||||
// int main(int argc, char* argv[])
|
||||
// {
|
||||
// auto [X, y, features, className, states] = loadDataset("iris", true);
|
||||
// auto stree = pywrap::STree();
|
||||
// stree.version();
|
||||
// auto svc = pywrap::SVC();
|
||||
// svc.version();
|
||||
// cout << "Graph: " << stree.graph() << endl;
|
||||
// stree.version();
|
||||
// cout << string(80, '-') << endl;
|
||||
// cout << "X: " << X.sizes() << endl;
|
||||
// cout << "y: " << y.sizes() << endl;
|
||||
// auto result = stree.fit(X, y, features, className, states);
|
||||
// result = svc.fit(X, y, features, className, states);
|
||||
// cout << "Now calling score" << endl;
|
||||
// // auto result1 = stree.score(X, y);
|
||||
// // auto result2 = svc.score(X, y);
|
||||
// // cout << "STree score " << result1 << endl;
|
||||
// // cout << "SVC score " << result2 << endl;
|
||||
// return 0;
|
||||
// }
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
auto [X, y, features, className, states] = loadDataset("iris", true);
|
||||
auto stree = pywrap::STree();
|
||||
stree.version();
|
||||
auto svc = pywrap::SVC();
|
||||
//svc.version();
|
||||
cout << "Graph: " << stree.graph() << endl;
|
||||
stree.version();
|
||||
cout << string(80, '-') << endl;
|
||||
cout << "X: " << X.sizes() << endl;
|
||||
cout << "y: " << y.sizes() << endl;
|
||||
// auto result = stree.fit(X, y, features, className, states);
|
||||
// cout << "Now calling score" << endl;
|
||||
// auto result2 = stree.score(X, y);
|
||||
// cout << "SVC score " << result2 << endl;
|
||||
return 0;
|
||||
cout << "* Begin." << endl;
|
||||
{
|
||||
auto [X, y, features, className, states] = loadDataset("iris", true);
|
||||
cout << "X: " << X.sizes() << endl;
|
||||
cout << "y: " << y.sizes() << endl;
|
||||
cout << "y: " << y << endl;
|
||||
auto clf = pywrap::PyClassifier("stree", "Stree");
|
||||
cout << "STree Version: " << clf.version() << endl;
|
||||
// if (true) {
|
||||
// auto svc = pywrap::PyClassifier("sklearn.svm", "SVC");
|
||||
// cout << "SVC Version: " << svc.callMethodString("_repr_html_") << endl;
|
||||
// cout << "Calling fit" << endl;
|
||||
// svc.fit(X, y, features, className, states);
|
||||
// cout << "Calling score" << endl;
|
||||
// cout << "SVC Score: " << svc.score(X, y) << endl;
|
||||
// }
|
||||
cout << "Graph: " << clf.graph() << endl;
|
||||
cout << "Calling fit" << endl;
|
||||
clf.fit(X, y, features, className, states);
|
||||
cout << "Calling score" << endl;
|
||||
cout << "STree Score: " << clf.score(X, y) << endl;
|
||||
}
|
||||
cout << "* End." << endl;
|
||||
}
|
Reference in New Issue
Block a user