First commit

This commit is contained in:
2025-06-22 00:31:33 +02:00
parent a52c20d1fb
commit 4bdbcad256
110 changed files with 31991 additions and 1 deletions

View File

@@ -0,0 +1,39 @@
# This Makefile is used under Linux
MATLABDIR ?= /usr/local/matlab
# for Mac
# MATLABDIR ?= /opt/local/matlab
CXX ?= g++
#CXX = g++-4.1
CFLAGS = -Wall -Wconversion -O3 -fPIC -I$(MATLABDIR)/extern/include -I..
MEX = $(MATLABDIR)/bin/mex
MEX_OPTION = CC="$(CXX)" CXX="$(CXX)" CFLAGS="$(CFLAGS)" CXXFLAGS="$(CFLAGS)"
# comment the following line if you use MATLAB on 32-bit computer
MEX_OPTION += -largeArrayDims
MEX_EXT = $(shell $(MATLABDIR)/bin/mexext)
all: matlab
matlab: binary
octave:
@echo "please type make under Octave"
binary: svmpredict.$(MEX_EXT) svmtrain.$(MEX_EXT) libsvmread.$(MEX_EXT) libsvmwrite.$(MEX_EXT)
svmpredict.$(MEX_EXT): svmpredict.c ../svm.h ../svm.cpp svm_model_matlab.c
$(MEX) $(MEX_OPTION) svmpredict.c ../svm.cpp svm_model_matlab.c
svmtrain.$(MEX_EXT): svmtrain.c ../svm.h ../svm.cpp svm_model_matlab.c
$(MEX) $(MEX_OPTION) svmtrain.c ../svm.cpp svm_model_matlab.c
libsvmread.$(MEX_EXT): libsvmread.c
$(MEX) $(MEX_OPTION) libsvmread.c
libsvmwrite.$(MEX_EXT): libsvmwrite.c
$(MEX) $(MEX_OPTION) libsvmwrite.c
clean:
rm -f *~ *.o *.mex* *.obj

243
libsvm-3.36/matlab/README Normal file
View File

@@ -0,0 +1,243 @@
-----------------------------------------
--- MATLAB/OCTAVE interface of LIBSVM ---
-----------------------------------------
Table of Contents
=================
- Introduction
- Installation
- Usage
- Returned Model Structure
- Other Utilities
- Examples
- Additional Information
Introduction
============
This tool provides a simple interface to LIBSVM, a library for support vector
machines (http://www.csie.ntu.edu.tw/~cjlin/libsvm). It is very easy to use as
the usage and the way of specifying parameters are the same as that of LIBSVM.
Installation
============
On Windows systems, pre-built mex files are already in the
directory '..\windows', so please just copy them to the matlab
directory. Now we provide binary files only for 64bit MATLAB on
Windows. If you would like to re-build the package, please rely on the
following steps.
We recommend using make.m on both MATLAB and OCTAVE. Just type 'make'
to build 'libsvmread.mex', 'libsvmwrite.mex', 'svmtrain.mex', and
'svmpredict.mex'.
On MATLAB or Octave:
>> make
If make.m does not work on MATLAB (especially for Windows), try 'mex
-setup' to choose a suitable compiler for mex. Make sure your compiler
is accessible and workable. Then type 'make' to do the installation.
Example:
matlab>> mex -setup
MATLAB will choose the default compiler. If you have multiple compliers,
a list is given and you can choose one from the list. For more details,
please check the following page:
https://www.mathworks.com/help/matlab/matlab_external/choose-c-or-c-compilers.html
On Windows, make.m has been tested via using Visual C++.
On Unix systems, if neither make.m nor 'mex -setup' works, please use
Makefile and type 'make' in a command window. Note that we assume
your MATLAB is installed in '/usr/local/matlab'. If not, please change
MATLABDIR in Makefile.
Example:
linux> make
To use octave, type 'make octave':
Example:
linux> make octave
For a list of supported/compatible compilers for MATLAB, please check
the following page:
http://www.mathworks.com/support/compilers/current_release/
Usage
=====
matlab> model = svmtrain(training_label_vector, training_instance_matrix [, 'libsvm_options']);
-training_label_vector:
An m by 1 vector of training labels (type must be double).
-training_instance_matrix:
An m by n matrix of m training instances with n features.
It can be dense or sparse (type must be double).
-libsvm_options:
A string of training options in the same format as that of LIBSVM.
matlab> [predicted_label, accuracy, decision_values/prob_estimates] = svmpredict(testing_label_vector, testing_instance_matrix, model [, 'libsvm_options']);
matlab> [predicted_label] = svmpredict(testing_label_vector, testing_instance_matrix, model [, 'libsvm_options']);
-testing_label_vector:
An m by 1 vector of prediction labels. If labels of test
data are unknown, simply use any random values. (type must be double)
-testing_instance_matrix:
An m by n matrix of m testing instances with n features.
It can be dense or sparse. (type must be double)
-model:
The output of svmtrain.
-libsvm_options:
A string of testing options in the same format as that of LIBSVM.
Returned Model Structure
========================
The 'svmtrain' function returns a model which can be used for future
prediction. It is a structure and is organized as [Parameters, nr_class,
totalSV, rho, Label, ProbA, ProbB, Prob_density_marks, nSV, sv_coef, SVs]:
-Parameters: parameters
-nr_class: number of classes; = 2 for regression/one-class svm
-totalSV: total #SV
-rho: -b of the decision function(s) wx+b
-Label: label of each class; empty for regression/one-class SVM
-sv_indices: values in [1,...,num_traning_data] to indicate SVs in the training set
-ProbA: pairwise probability information; empty if -b 0 or in one-class SVM
-ProbB: pairwise probability information; empty if -b 0 or in one-class SVM
-Prob_density_marks: probability information for one-class SVM; empty if -b 0 or not in one-class SVM
-nSV: number of SVs for each class; empty for regression/one-class SVM
-sv_coef: coefficients for SVs in decision functions
-SVs: support vectors
If you do not use the option '-b 1', ProbA and ProbB are empty
matrices. If the '-v' option is specified, cross validation is
conducted and the returned model is just a scalar: cross-validation
accuracy for classification and mean-squared error for regression.
More details about this model can be found in LIBSVM FAQ
(http://www.csie.ntu.edu.tw/~cjlin/libsvm/faq.html) and LIBSVM
implementation document
(http://www.csie.ntu.edu.tw/~cjlin/papers/libsvm.pdf).
Result of Prediction
====================
The function 'svmpredict' has three outputs. The first one,
predictd_label, is a vector of predicted labels. The second output,
accuracy, is a vector including accuracy (for classification), mean
squared error, and squared correlation coefficient (for regression).
The third is a matrix containing decision values or probability
estimates (if '-b 1' is specified). If k is the number of classes
in training data, for decision values, each row includes results of
predicting k(k-1)/2 binary-class SVMs. For classification, k = 1 is a
special case. Decision value +1 is returned for each testing instance,
instead of an empty vector. For probabilities, each row contains k values
indicating the probability that the testing instance is in each class.
Note that the order of classes here is the same as 'Label' field
in the model structure.
For one-class SVM, each row contains two elements for probabilities
of normal instance/outlier.
Other Utilities
===============
A matlab function libsvmread reads files in LIBSVM format:
[label_vector, instance_matrix] = libsvmread('data.txt');
Two outputs are labels and instances, which can then be used as inputs
of svmtrain or svmpredict.
A matlab function libsvmwrite writes Matlab matrix to a file in LIBSVM format:
libsvmwrite('data.txt', label_vector, instance_matrix)
The instance_matrix must be a sparse matrix. (type must be double)
For 32bit and 64bit MATLAB on Windows, pre-built binary files are ready
in the directory `..\windows', but in future releases, we will only
include 64bit MATLAB binary files.
These codes are prepared by Rong-En Fan and Kai-Wei Chang from National
Taiwan University.
Examples
========
Train and test on the provided data heart_scale:
matlab> [heart_scale_label, heart_scale_inst] = libsvmread('../heart_scale');
matlab> model = svmtrain(heart_scale_label, heart_scale_inst, '-c 1 -g 0.07');
matlab> [predict_label, accuracy, dec_values] = svmpredict(heart_scale_label, heart_scale_inst, model); % test the training data
For probability estimates, you need '-b 1' for training and testing:
matlab> [heart_scale_label, heart_scale_inst] = libsvmread('../heart_scale');
matlab> model = svmtrain(heart_scale_label, heart_scale_inst, '-c 1 -g 0.07 -b 1');
matlab> [heart_scale_label, heart_scale_inst] = libsvmread('../heart_scale');
matlab> [predict_label, accuracy, prob_estimates] = svmpredict(heart_scale_label, heart_scale_inst, model, '-b 1');
To use precomputed kernel, you must include sample serial number as
the first column of the training and testing data (assume your kernel
matrix is K, # of instances is n):
matlab> K1 = [(1:n)', K]; % include sample serial number as first column
matlab> model = svmtrain(label_vector, K1, '-t 4');
matlab> [predict_label, accuracy, dec_values] = svmpredict(label_vector, K1, model); % test the training data
We give the following detailed example by splitting heart_scale into
150 training and 120 testing data. Constructing a linear kernel
matrix and then using the precomputed kernel gives exactly the same
testing error as using the LIBSVM built-in linear kernel.
matlab> [heart_scale_label, heart_scale_inst] = libsvmread('../heart_scale');
matlab>
matlab> % Split Data
matlab> train_data = heart_scale_inst(1:150,:);
matlab> train_label = heart_scale_label(1:150,:);
matlab> test_data = heart_scale_inst(151:270,:);
matlab> test_label = heart_scale_label(151:270,:);
matlab>
matlab> % Linear Kernel
matlab> model_linear = svmtrain(train_label, train_data, '-t 0');
matlab> [predict_label_L, accuracy_L, dec_values_L] = svmpredict(test_label, test_data, model_linear);
matlab>
matlab> % Precomputed Kernel
matlab> model_precomputed = svmtrain(train_label, [(1:150)', train_data*train_data'], '-t 4');
matlab> [predict_label_P, accuracy_P, dec_values_P] = svmpredict(test_label, [(1:120)', test_data*train_data'], model_precomputed);
matlab>
matlab> accuracy_L % Display the accuracy using linear kernel
matlab> accuracy_P % Display the accuracy using precomputed kernel
Note that for testing, you can put anything in the
testing_label_vector. For more details of precomputed kernels, please
read the section ``Precomputed Kernels'' in the README of the LIBSVM
package.
Additional Information
======================
This interface was initially written by Jun-Cheng Chen, Kuan-Jen Peng,
Chih-Yuan Yang and Chih-Huai Cheng from Department of Computer
Science, National Taiwan University. The current version was prepared
by Rong-En Fan and Ting-Fan Wu. If you find this tool useful, please
cite LIBSVM as follows
Chih-Chung Chang and Chih-Jen Lin, LIBSVM : a library for support
vector machines. ACM Transactions on Intelligent Systems and
Technology, 2:27:1--27:27, 2011. Software available at
http://www.csie.ntu.edu.tw/~cjlin/libsvm
For any question, please contact Chih-Jen Lin <cjlin@csie.ntu.edu.tw>,
or check the FAQ page:
http://www.csie.ntu.edu.tw/~cjlin/libsvm/faq.html#/Q10:_MATLAB_interface

View File

@@ -0,0 +1,212 @@
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
#include <errno.h>
#include "mex.h"
#ifdef MX_API_VER
#if MX_API_VER < 0x07030000
typedef int mwIndex;
#endif
#endif
#ifndef max
#define max(x,y) (((x)>(y))?(x):(y))
#endif
#ifndef min
#define min(x,y) (((x)<(y))?(x):(y))
#endif
void exit_with_help()
{
mexPrintf(
"Usage: [label_vector, instance_matrix] = libsvmread('filename');\n"
);
}
static void fake_answer(int nlhs, mxArray *plhs[])
{
int i;
for(i=0;i<nlhs;i++)
plhs[i] = mxCreateDoubleMatrix(0, 0, mxREAL);
}
static char *line;
static int max_line_len;
static char* readline(FILE *input)
{
int len;
if(fgets(line,max_line_len,input) == NULL)
return NULL;
while(strrchr(line,'\n') == NULL)
{
max_line_len *= 2;
line = (char *) realloc(line, max_line_len);
len = (int) strlen(line);
if(fgets(line+len,max_line_len-len,input) == NULL)
break;
}
return line;
}
// read in a problem (in libsvm format)
void read_problem(const char *filename, int nlhs, mxArray *plhs[])
{
int max_index, min_index, inst_max_index;
size_t elements, k, i, l=0;
FILE *fp = fopen(filename,"r");
char *endptr;
mwIndex *ir, *jc;
double *labels, *samples;
if(fp == NULL)
{
mexPrintf("can't open input file %s\n",filename);
fake_answer(nlhs, plhs);
return;
}
max_line_len = 1024;
line = (char *) malloc(max_line_len*sizeof(char));
max_index = 0;
min_index = 1; // our index starts from 1
elements = 0;
while(readline(fp) != NULL)
{
char *idx, *val;
// features
int index = 0;
inst_max_index = -1; // strtol gives 0 if wrong format, and precomputed kernel has <index> start from 0
strtok(line," \t"); // label
while (1)
{
idx = strtok(NULL,":"); // index:value
val = strtok(NULL," \t");
if(val == NULL)
break;
errno = 0;
index = (int) strtol(idx,&endptr,10);
if(endptr == idx || errno != 0 || *endptr != '\0' || index <= inst_max_index)
{
mexPrintf("Wrong input format at line %d\n",l+1);
fake_answer(nlhs, plhs);
return;
}
else
inst_max_index = index;
min_index = min(min_index, index);
elements++;
}
max_index = max(max_index, inst_max_index);
l++;
}
rewind(fp);
// y
plhs[0] = mxCreateDoubleMatrix(l, 1, mxREAL);
// x^T
if (min_index <= 0)
plhs[1] = mxCreateSparse(max_index-min_index+1, l, elements, mxREAL);
else
plhs[1] = mxCreateSparse(max_index, l, elements, mxREAL);
labels = mxGetPr(plhs[0]);
samples = mxGetPr(plhs[1]);
ir = mxGetIr(plhs[1]);
jc = mxGetJc(plhs[1]);
k=0;
for(i=0;i<l;i++)
{
char *idx, *val, *label;
jc[i] = k;
readline(fp);
label = strtok(line," \t\n");
if(label == NULL)
{
mexPrintf("Empty line at line %d\n",i+1);
fake_answer(nlhs, plhs);
return;
}
labels[i] = strtod(label,&endptr);
if(endptr == label || *endptr != '\0')
{
mexPrintf("Wrong input format at line %d\n",i+1);
fake_answer(nlhs, plhs);
return;
}
// features
while(1)
{
idx = strtok(NULL,":");
val = strtok(NULL," \t");
if(val == NULL)
break;
ir[k] = (mwIndex) (strtol(idx,&endptr,10) - min_index); // precomputed kernel has <index> start from 0
errno = 0;
samples[k] = strtod(val,&endptr);
if (endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr)))
{
mexPrintf("Wrong input format at line %d\n",i+1);
fake_answer(nlhs, plhs);
return;
}
++k;
}
}
jc[l] = k;
fclose(fp);
free(line);
{
mxArray *rhs[1], *lhs[1];
rhs[0] = plhs[1];
if(mexCallMATLAB(1, lhs, 1, rhs, "transpose"))
{
mexPrintf("Error: cannot transpose problem\n");
fake_answer(nlhs, plhs);
return;
}
plhs[1] = lhs[0];
}
}
void mexFunction( int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] )
{
#define filename_size 256
char filename[filename_size];
if(nrhs != 1 || nlhs != 2)
{
exit_with_help();
fake_answer(nlhs, plhs);
return;
}
if(mxGetString(prhs[0], filename, filename_size) == 1){
mexPrintf("Error: wrong or too long filename\n");
fake_answer(nlhs, plhs);
return;
}
read_problem(filename, nlhs, plhs);
return;
}

View File

@@ -0,0 +1,119 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "mex.h"
#ifdef MX_API_VER
#if MX_API_VER < 0x07030000
typedef int mwIndex;
#endif
#endif
void exit_with_help()
{
mexPrintf(
"Usage: libsvmwrite('filename', label_vector, instance_matrix);\n"
);
}
static void fake_answer(int nlhs, mxArray *plhs[])
{
int i;
for(i=0;i<nlhs;i++)
plhs[i] = mxCreateDoubleMatrix(0, 0, mxREAL);
}
void libsvmwrite(const char *filename, const mxArray *label_vec, const mxArray *instance_mat)
{
FILE *fp = fopen(filename,"w");
mwIndex *ir, *jc, k, low, high;
size_t i, l, label_vector_row_num;
double *samples, *labels;
mxArray *instance_mat_col; // instance sparse matrix in column format
if(fp ==NULL)
{
mexPrintf("can't open output file %s\n",filename);
return;
}
// transpose instance matrix
{
mxArray *prhs[1], *plhs[1];
prhs[0] = mxDuplicateArray(instance_mat);
if(mexCallMATLAB(1, plhs, 1, prhs, "transpose"))
{
mexPrintf("Error: cannot transpose instance matrix\n");
return;
}
instance_mat_col = plhs[0];
mxDestroyArray(prhs[0]);
}
// the number of instance
l = mxGetN(instance_mat_col);
label_vector_row_num = mxGetM(label_vec);
if(label_vector_row_num!=l)
{
mexPrintf("Length of label vector does not match # of instances.\n");
return;
}
// each column is one instance
labels = mxGetPr(label_vec);
samples = mxGetPr(instance_mat_col);
ir = mxGetIr(instance_mat_col);
jc = mxGetJc(instance_mat_col);
for(i=0;i<l;i++)
{
fprintf(fp,"%.17g", labels[i]);
low = jc[i], high = jc[i+1];
for(k=low;k<high;k++)
fprintf(fp," %lu:%g", (size_t)ir[k]+1, samples[k]);
fprintf(fp,"\n");
}
fclose(fp);
return;
}
void mexFunction( int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] )
{
if(nlhs > 0)
{
exit_with_help();
fake_answer(nlhs, plhs);
return;
}
// Transform the input Matrix to libsvm format
if(nrhs == 3)
{
char filename[256];
if(!mxIsDouble(prhs[1]) || !mxIsDouble(prhs[2]))
{
mexPrintf("Error: label vector and instance matrix must be double\n");
return;
}
mxGetString(prhs[0], filename, mxGetN(prhs[0])+1);
if(mxIsSparse(prhs[2]))
libsvmwrite(filename, prhs[1], prhs[2]);
else
{
mexPrintf("Instance_matrix must be sparse\n");
return;
}
}
else
{
exit_with_help();
return;
}
}

22
libsvm-3.36/matlab/make.m Normal file
View File

@@ -0,0 +1,22 @@
% This make.m is for MATLAB and OCTAVE under Windows, Mac, and Unix
function make()
try
% This part is for OCTAVE
if (exist ('OCTAVE_VERSION', 'builtin'))
mex libsvmread.c
mex libsvmwrite.c
mex -I.. svmtrain.c ../svm.cpp svm_model_matlab.c
mex -I.. svmpredict.c ../svm.cpp svm_model_matlab.c
% This part is for MATLAB
% Add -largeArrayDims on 64-bit machines of MATLAB
else
mex -largeArrayDims libsvmread.c
mex -largeArrayDims libsvmwrite.c
mex -I.. -largeArrayDims svmtrain.c ../svm.cpp svm_model_matlab.c
mex -I.. -largeArrayDims svmpredict.c ../svm.cpp svm_model_matlab.c
end
catch err
fprintf('Error: %s failed (line %d)\n', err.stack(1).file, err.stack(1).line);
disp(err.message);
fprintf('=> Please check README for detailed instructions.\n');
end

View File

@@ -0,0 +1,400 @@
#include <stdlib.h>
#include <string.h>
#include "svm.h"
#include "mex.h"
#ifdef MX_API_VER
#if MX_API_VER < 0x07030000
typedef int mwIndex;
#endif
#endif
#define NUM_OF_RETURN_FIELD 12
#define Malloc(type,n) (type *)malloc((n)*sizeof(type))
static const char *field_names[] = {
"Parameters",
"nr_class",
"totalSV",
"rho",
"Label",
"sv_indices",
"ProbA",
"ProbB",
"Prob_density_marks",
"nSV",
"sv_coef",
"SVs"
};
const char *model_to_matlab_structure(mxArray *plhs[], int num_of_feature, struct svm_model *model)
{
int i, j, n;
double *ptr;
mxArray *return_model, **rhs;
int out_id = 0;
rhs = (mxArray **)mxMalloc(sizeof(mxArray *)*NUM_OF_RETURN_FIELD);
// Parameters
rhs[out_id] = mxCreateDoubleMatrix(5, 1, mxREAL);
ptr = mxGetPr(rhs[out_id]);
ptr[0] = model->param.svm_type;
ptr[1] = model->param.kernel_type;
ptr[2] = model->param.degree;
ptr[3] = model->param.gamma;
ptr[4] = model->param.coef0;
out_id++;
// nr_class
rhs[out_id] = mxCreateDoubleMatrix(1, 1, mxREAL);
ptr = mxGetPr(rhs[out_id]);
ptr[0] = model->nr_class;
out_id++;
// total SV
rhs[out_id] = mxCreateDoubleMatrix(1, 1, mxREAL);
ptr = mxGetPr(rhs[out_id]);
ptr[0] = model->l;
out_id++;
// rho
n = model->nr_class*(model->nr_class-1)/2;
rhs[out_id] = mxCreateDoubleMatrix(n, 1, mxREAL);
ptr = mxGetPr(rhs[out_id]);
for(i = 0; i < n; i++)
ptr[i] = model->rho[i];
out_id++;
// Label
if(model->label)
{
rhs[out_id] = mxCreateDoubleMatrix(model->nr_class, 1, mxREAL);
ptr = mxGetPr(rhs[out_id]);
for(i = 0; i < model->nr_class; i++)
ptr[i] = model->label[i];
}
else
rhs[out_id] = mxCreateDoubleMatrix(0, 0, mxREAL);
out_id++;
// sv_indices
if(model->sv_indices)
{
rhs[out_id] = mxCreateDoubleMatrix(model->l, 1, mxREAL);
ptr = mxGetPr(rhs[out_id]);
for(i = 0; i < model->l; i++)
ptr[i] = model->sv_indices[i];
}
else
rhs[out_id] = mxCreateDoubleMatrix(0, 0, mxREAL);
out_id++;
// probA
if(model->probA != NULL)
{
rhs[out_id] = mxCreateDoubleMatrix(n, 1, mxREAL);
ptr = mxGetPr(rhs[out_id]);
for(i = 0; i < n; i++)
ptr[i] = model->probA[i];
}
else
rhs[out_id] = mxCreateDoubleMatrix(0, 0, mxREAL);
out_id ++;
// probB
if(model->probB != NULL)
{
rhs[out_id] = mxCreateDoubleMatrix(n, 1, mxREAL);
ptr = mxGetPr(rhs[out_id]);
for(i = 0; i < n; i++)
ptr[i] = model->probB[i];
}
else
rhs[out_id] = mxCreateDoubleMatrix(0, 0, mxREAL);
out_id++;
// prob_density_marks
if(model->prob_density_marks != NULL)
{
int nr_marks = 10;
rhs[out_id] = mxCreateDoubleMatrix(nr_marks, 1, mxREAL);
ptr = mxGetPr(rhs[out_id]);
for(i = 0; i < nr_marks; i++)
ptr[i] = model->prob_density_marks[i];
}
else
rhs[out_id] = mxCreateDoubleMatrix(0, 0, mxREAL);
out_id++;
// nSV
if(model->nSV)
{
rhs[out_id] = mxCreateDoubleMatrix(model->nr_class, 1, mxREAL);
ptr = mxGetPr(rhs[out_id]);
for(i = 0; i < model->nr_class; i++)
ptr[i] = model->nSV[i];
}
else
rhs[out_id] = mxCreateDoubleMatrix(0, 0, mxREAL);
out_id++;
// sv_coef
rhs[out_id] = mxCreateDoubleMatrix(model->l, model->nr_class-1, mxREAL);
ptr = mxGetPr(rhs[out_id]);
for(i = 0; i < model->nr_class-1; i++)
for(j = 0; j < model->l; j++)
ptr[(i*(model->l))+j] = model->sv_coef[i][j];
out_id++;
// SVs
{
int ir_index, nonzero_element;
mwIndex *ir, *jc;
mxArray *pprhs[1], *pplhs[1];
if(model->param.kernel_type == PRECOMPUTED)
{
nonzero_element = model->l;
num_of_feature = 1;
}
else
{
nonzero_element = 0;
for(i = 0; i < model->l; i++) {
j = 0;
while(model->SV[i][j].index != -1)
{
nonzero_element++;
j++;
}
}
}
// SV in column, easier accessing
rhs[out_id] = mxCreateSparse(num_of_feature, model->l, nonzero_element, mxREAL);
ir = mxGetIr(rhs[out_id]);
jc = mxGetJc(rhs[out_id]);
ptr = mxGetPr(rhs[out_id]);
jc[0] = ir_index = 0;
for(i = 0;i < model->l; i++)
{
if(model->param.kernel_type == PRECOMPUTED)
{
// make a (1 x model->l) matrix
ir[ir_index] = 0;
ptr[ir_index] = model->SV[i][0].value;
ir_index++;
jc[i+1] = jc[i] + 1;
}
else
{
int x_index = 0;
while (model->SV[i][x_index].index != -1)
{
ir[ir_index] = model->SV[i][x_index].index - 1;
ptr[ir_index] = model->SV[i][x_index].value;
ir_index++, x_index++;
}
jc[i+1] = jc[i] + x_index;
}
}
// transpose back to SV in row
pprhs[0] = rhs[out_id];
if(mexCallMATLAB(1, pplhs, 1, pprhs, "transpose"))
return "cannot transpose SV matrix";
rhs[out_id] = pplhs[0];
out_id++;
}
/* Create a struct matrix contains NUM_OF_RETURN_FIELD fields */
return_model = mxCreateStructMatrix(1, 1, NUM_OF_RETURN_FIELD, field_names);
/* Fill struct matrix with input arguments */
for(i = 0; i < NUM_OF_RETURN_FIELD; i++)
mxSetField(return_model,0,field_names[i],mxDuplicateArray(rhs[i]));
/* return */
plhs[0] = return_model;
mxFree(rhs);
return NULL;
}
struct svm_model *matlab_matrix_to_model(const mxArray *matlab_struct, const char **msg)
{
int i, j, n, num_of_fields;
double *ptr;
int id = 0;
struct svm_node *x_space;
struct svm_model *model;
mxArray **rhs;
num_of_fields = mxGetNumberOfFields(matlab_struct);
if(num_of_fields != NUM_OF_RETURN_FIELD)
{
*msg = "number of return field is not correct";
return NULL;
}
rhs = (mxArray **) mxMalloc(sizeof(mxArray *)*num_of_fields);
for(i=0;i<num_of_fields;i++)
rhs[i] = mxGetFieldByNumber(matlab_struct, 0, i);
model = Malloc(struct svm_model, 1);
model->rho = NULL;
model->probA = NULL;
model->probB = NULL;
model->prob_density_marks = NULL;
model->label = NULL;
model->sv_indices = NULL;
model->nSV = NULL;
model->free_sv = 1; // XXX
ptr = mxGetPr(rhs[id]);
model->param.svm_type = (int)ptr[0];
model->param.kernel_type = (int)ptr[1];
model->param.degree = (int)ptr[2];
model->param.gamma = ptr[3];
model->param.coef0 = ptr[4];
id++;
ptr = mxGetPr(rhs[id]);
model->nr_class = (int)ptr[0];
id++;
ptr = mxGetPr(rhs[id]);
model->l = (int)ptr[0];
id++;
// rho
n = model->nr_class * (model->nr_class-1)/2;
model->rho = (double*) malloc(n*sizeof(double));
ptr = mxGetPr(rhs[id]);
for(i=0;i<n;i++)
model->rho[i] = ptr[i];
id++;
// label
if(mxIsEmpty(rhs[id]) == 0)
{
model->label = (int*) malloc(model->nr_class*sizeof(int));
ptr = mxGetPr(rhs[id]);
for(i=0;i<model->nr_class;i++)
model->label[i] = (int)ptr[i];
}
id++;
// sv_indices
if(mxIsEmpty(rhs[id]) == 0)
{
model->sv_indices = (int*) malloc(model->l*sizeof(int));
ptr = mxGetPr(rhs[id]);
for(i=0;i<model->l;i++)
model->sv_indices[i] = (int)ptr[i];
}
id++;
// probA
if(mxIsEmpty(rhs[id]) == 0)
{
model->probA = (double*) malloc(n*sizeof(double));
ptr = mxGetPr(rhs[id]);
for(i=0;i<n;i++)
model->probA[i] = ptr[i];
}
id++;
// probB
if(mxIsEmpty(rhs[id]) == 0)
{
model->probB = (double*) malloc(n*sizeof(double));
ptr = mxGetPr(rhs[id]);
for(i=0;i<n;i++)
model->probB[i] = ptr[i];
}
id++;
// prob_density_marks
if(mxIsEmpty(rhs[id]) == 0)
{
int nr_marks = 10;
model->prob_density_marks = (double*) malloc(nr_marks*sizeof(double));
ptr = mxGetPr(rhs[id]);
for(i=0;i<nr_marks;i++)
model->prob_density_marks[i] = ptr[i];
}
id++;
// nSV
if(mxIsEmpty(rhs[id]) == 0)
{
model->nSV = (int*) malloc(model->nr_class*sizeof(int));
ptr = mxGetPr(rhs[id]);
for(i=0;i<model->nr_class;i++)
model->nSV[i] = (int)ptr[i];
}
id++;
// sv_coef
ptr = mxGetPr(rhs[id]);
model->sv_coef = (double**) malloc((model->nr_class-1)*sizeof(double));
for( i=0 ; i< model->nr_class -1 ; i++ )
model->sv_coef[i] = (double*) malloc((model->l)*sizeof(double));
for(i = 0; i < model->nr_class - 1; i++)
for(j = 0; j < model->l; j++)
model->sv_coef[i][j] = ptr[i*(model->l)+j];
id++;
// SV
{
int sr, elements;
int num_samples;
mwIndex *ir, *jc;
mxArray *pprhs[1], *pplhs[1];
// transpose SV
pprhs[0] = rhs[id];
if(mexCallMATLAB(1, pplhs, 1, pprhs, "transpose"))
{
svm_free_and_destroy_model(&model);
*msg = "cannot transpose SV matrix";
return NULL;
}
rhs[id] = pplhs[0];
sr = (int)mxGetN(rhs[id]);
ptr = mxGetPr(rhs[id]);
ir = mxGetIr(rhs[id]);
jc = mxGetJc(rhs[id]);
num_samples = (int)mxGetNzmax(rhs[id]);
elements = num_samples + sr;
model->SV = (struct svm_node **) malloc(sr * sizeof(struct svm_node *));
x_space = (struct svm_node *)malloc(elements * sizeof(struct svm_node));
// SV is in column
for(i=0;i<sr;i++)
{
int low = (int)jc[i], high = (int)jc[i+1];
int x_index = 0;
model->SV[i] = &x_space[low+i];
for(j=low;j<high;j++)
{
model->SV[i][x_index].index = (int)ir[j] + 1;
model->SV[i][x_index].value = ptr[j];
x_index++;
}
model->SV[i][x_index].index = -1;
}
id++;
}
mxFree(rhs);
return model;
}

View File

@@ -0,0 +1,2 @@
const char *model_to_matlab_structure(mxArray *plhs[], int num_of_feature, struct svm_model *model);
struct svm_model *matlab_matrix_to_model(const mxArray *matlab_struct, const char **error_message);

View File

@@ -0,0 +1,373 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "svm.h"
#include "mex.h"
#include "svm_model_matlab.h"
#ifdef MX_API_VER
#if MX_API_VER < 0x07030000
typedef int mwIndex;
#endif
#endif
#define CMD_LEN 2048
int print_null(const char *s,...) {return 0;}
int (*info)(const char *fmt,...) = &mexPrintf;
void read_sparse_instance(const mxArray *prhs, int index, struct svm_node *x)
{
int i, j, low, high;
mwIndex *ir, *jc;
double *samples;
ir = mxGetIr(prhs);
jc = mxGetJc(prhs);
samples = mxGetPr(prhs);
// each column is one instance
j = 0;
low = (int)jc[index], high = (int)jc[index+1];
for(i=low;i<high;i++)
{
x[j].index = (int)ir[i] + 1;
x[j].value = samples[i];
j++;
}
x[j].index = -1;
}
static void fake_answer(int nlhs, mxArray *plhs[])
{
int i;
for(i=0;i<nlhs;i++)
plhs[i] = mxCreateDoubleMatrix(0, 0, mxREAL);
}
void predict(int nlhs, mxArray *plhs[], const mxArray *prhs[], struct svm_model *model, const int predict_probability)
{
int label_vector_row_num, label_vector_col_num;
int feature_number, testing_instance_number;
int instance_index;
double *ptr_instance, *ptr_label, *ptr_predict_label;
double *ptr_prob_estimates, *ptr_dec_values, *ptr;
struct svm_node *x;
mxArray *pplhs[1]; // transposed instance sparse matrix
mxArray *tplhs[3]; // temporary storage for plhs[]
int correct = 0;
int total = 0;
double error = 0;
double sump = 0, sumt = 0, sumpp = 0, sumtt = 0, sumpt = 0;
int svm_type=svm_get_svm_type(model);
int nr_class=svm_get_nr_class(model);
double *prob_estimates=NULL;
// prhs[1] = testing instance matrix
feature_number = (int)mxGetN(prhs[1]);
testing_instance_number = (int)mxGetM(prhs[1]);
label_vector_row_num = (int)mxGetM(prhs[0]);
label_vector_col_num = (int)mxGetN(prhs[0]);
if(label_vector_row_num!=testing_instance_number)
{
mexPrintf("Length of label vector does not match # of instances.\n");
fake_answer(nlhs, plhs);
return;
}
if(label_vector_col_num!=1)
{
mexPrintf("label (1st argument) should be a vector (# of column is 1).\n");
fake_answer(nlhs, plhs);
return;
}
ptr_instance = mxGetPr(prhs[1]);
ptr_label = mxGetPr(prhs[0]);
// transpose instance matrix
if(mxIsSparse(prhs[1]))
{
if(model->param.kernel_type == PRECOMPUTED)
{
// precomputed kernel requires dense matrix, so we make one
mxArray *rhs[1], *lhs[1];
rhs[0] = mxDuplicateArray(prhs[1]);
if(mexCallMATLAB(1, lhs, 1, rhs, "full"))
{
mexPrintf("Error: cannot full testing instance matrix\n");
fake_answer(nlhs, plhs);
return;
}
ptr_instance = mxGetPr(lhs[0]);
mxDestroyArray(rhs[0]);
}
else
{
mxArray *pprhs[1];
pprhs[0] = mxDuplicateArray(prhs[1]);
if(mexCallMATLAB(1, pplhs, 1, pprhs, "transpose"))
{
mexPrintf("Error: cannot transpose testing instance matrix\n");
fake_answer(nlhs, plhs);
return;
}
}
}
if(predict_probability)
{
if(svm_type==NU_SVR || svm_type==EPSILON_SVR)
info("Prob. model for test data: target value = predicted value + z,\nz: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma=%g\n",svm_get_svr_probability(model));
else
prob_estimates = (double *) malloc(nr_class*sizeof(double));
}
tplhs[0] = mxCreateDoubleMatrix(testing_instance_number, 1, mxREAL);
if(predict_probability)
{
// prob estimates are in plhs[2]
if(svm_type==C_SVC || svm_type==NU_SVC || svm_type==ONE_CLASS)
{
// nr_class = 2 for ONE_CLASS
tplhs[2] = mxCreateDoubleMatrix(testing_instance_number, nr_class, mxREAL);
}
else
tplhs[2] = mxCreateDoubleMatrix(0, 0, mxREAL);
}
else
{
// decision values are in plhs[2]
if(svm_type == ONE_CLASS ||
svm_type == EPSILON_SVR ||
svm_type == NU_SVR ||
nr_class == 1) // if only one class in training data, decision values are still returned.
tplhs[2] = mxCreateDoubleMatrix(testing_instance_number, 1, mxREAL);
else
tplhs[2] = mxCreateDoubleMatrix(testing_instance_number, nr_class*(nr_class-1)/2, mxREAL);
}
ptr_predict_label = mxGetPr(tplhs[0]);
ptr_prob_estimates = mxGetPr(tplhs[2]);
ptr_dec_values = mxGetPr(tplhs[2]);
x = (struct svm_node*)malloc((feature_number+1)*sizeof(struct svm_node) );
for(instance_index=0;instance_index<testing_instance_number;instance_index++)
{
int i;
double target_label, predict_label;
target_label = ptr_label[instance_index];
if(mxIsSparse(prhs[1]) && model->param.kernel_type != PRECOMPUTED) // prhs[1]^T is still sparse
read_sparse_instance(pplhs[0], instance_index, x);
else
{
for(i=0;i<feature_number;i++)
{
x[i].index = i+1;
x[i].value = ptr_instance[testing_instance_number*i+instance_index];
}
x[feature_number].index = -1;
}
if(predict_probability)
{
if(svm_type==C_SVC || svm_type==NU_SVC || svm_type==ONE_CLASS)
{
predict_label = svm_predict_probability(model, x, prob_estimates);
ptr_predict_label[instance_index] = predict_label;
for(i=0;i<nr_class;i++)
ptr_prob_estimates[instance_index + i * testing_instance_number] = prob_estimates[i];
} else {
predict_label = svm_predict(model,x);
ptr_predict_label[instance_index] = predict_label;
}
}
else
{
if(svm_type == ONE_CLASS ||
svm_type == EPSILON_SVR ||
svm_type == NU_SVR)
{
double res;
predict_label = svm_predict_values(model, x, &res);
ptr_dec_values[instance_index] = res;
}
else
{
double *dec_values = (double *) malloc(sizeof(double) * nr_class*(nr_class-1)/2);
predict_label = svm_predict_values(model, x, dec_values);
if(nr_class == 1)
ptr_dec_values[instance_index] = 1;
else
for(i=0;i<(nr_class*(nr_class-1))/2;i++)
ptr_dec_values[instance_index + i * testing_instance_number] = dec_values[i];
free(dec_values);
}
ptr_predict_label[instance_index] = predict_label;
}
if(predict_label == target_label)
++correct;
error += (predict_label-target_label)*(predict_label-target_label);
sump += predict_label;
sumt += target_label;
sumpp += predict_label*predict_label;
sumtt += target_label*target_label;
sumpt += predict_label*target_label;
++total;
}
if(svm_type==NU_SVR || svm_type==EPSILON_SVR)
{
info("Mean squared error = %g (regression)\n",error/total);
info("Squared correlation coefficient = %g (regression)\n",
((total*sumpt-sump*sumt)*(total*sumpt-sump*sumt))/
((total*sumpp-sump*sump)*(total*sumtt-sumt*sumt))
);
}
else
info("Accuracy = %g%% (%d/%d) (classification)\n",
(double)correct/total*100,correct,total);
// return accuracy, mean squared error, squared correlation coefficient
tplhs[1] = mxCreateDoubleMatrix(3, 1, mxREAL);
ptr = mxGetPr(tplhs[1]);
ptr[0] = (double)correct/total*100;
ptr[1] = error/total;
ptr[2] = ((total*sumpt-sump*sumt)*(total*sumpt-sump*sumt))/
((total*sumpp-sump*sump)*(total*sumtt-sumt*sumt));
free(x);
if(prob_estimates != NULL)
free(prob_estimates);
switch(nlhs)
{
case 3:
plhs[2] = tplhs[2];
plhs[1] = tplhs[1];
case 1:
case 0:
plhs[0] = tplhs[0];
}
}
void exit_with_help()
{
mexPrintf(
"Usage: [predicted_label, accuracy, decision_values/prob_estimates] = svmpredict(testing_label_vector, testing_instance_matrix, model, 'libsvm_options')\n"
" [predicted_label] = svmpredict(testing_label_vector, testing_instance_matrix, model, 'libsvm_options')\n"
"Parameters:\n"
" model: SVM model structure from svmtrain.\n"
" libsvm_options:\n"
" -b probability_estimates: whether to predict probability estimates, 0 or 1 (default 0); one-class SVM not supported yet\n"
" -q : quiet mode (no outputs)\n"
"Returns:\n"
" predicted_label: SVM prediction output vector.\n"
" accuracy: a vector with accuracy, mean squared error, squared correlation coefficient.\n"
" prob_estimates: If selected, probability estimate vector.\n"
);
}
void mexFunction( int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] )
{
int prob_estimate_flag = 0;
struct svm_model *model;
info = &mexPrintf;
if(nlhs == 2 || nlhs > 3 || nrhs > 4 || nrhs < 3)
{
exit_with_help();
fake_answer(nlhs, plhs);
return;
}
if(!mxIsDouble(prhs[0]) || !mxIsDouble(prhs[1])) {
mexPrintf("Error: label vector and instance matrix must be double\n");
fake_answer(nlhs, plhs);
return;
}
if(mxIsStruct(prhs[2]))
{
const char *error_msg;
// parse options
if(nrhs==4)
{
int i, argc = 1;
char cmd[CMD_LEN], *argv[CMD_LEN/2];
// put options in argv[]
mxGetString(prhs[3], cmd, mxGetN(prhs[3]) + 1);
if((argv[argc] = strtok(cmd, " ")) != NULL)
while((argv[++argc] = strtok(NULL, " ")) != NULL)
;
for(i=1;i<argc;i++)
{
if(argv[i][0] != '-') break;
if((++i>=argc) && argv[i-1][1] != 'q')
{
exit_with_help();
fake_answer(nlhs, plhs);
return;
}
switch(argv[i-1][1])
{
case 'b':
prob_estimate_flag = atoi(argv[i]);
break;
case 'q':
i--;
info = &print_null;
break;
default:
mexPrintf("Unknown option: -%c\n", argv[i-1][1]);
exit_with_help();
fake_answer(nlhs, plhs);
return;
}
}
}
model = matlab_matrix_to_model(prhs[2], &error_msg);
if (model == NULL)
{
mexPrintf("Error: can't read model: %s\n", error_msg);
fake_answer(nlhs, plhs);
return;
}
if(prob_estimate_flag)
{
if(svm_check_probability_model(model)==0)
{
mexPrintf("Model does not support probabiliy estimates\n");
fake_answer(nlhs, plhs);
svm_free_and_destroy_model(&model);
return;
}
}
else
{
if(svm_check_probability_model(model)!=0)
info("Model supports probability estimates, but disabled in predicton.\n");
}
predict(nlhs, plhs, prhs, model, prob_estimate_flag);
// destroy model
svm_free_and_destroy_model(&model);
}
else
{
mexPrintf("model file should be a struct array\n");
fake_answer(nlhs, plhs);
}
return;
}

View File

@@ -0,0 +1,495 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include "svm.h"
#include "mex.h"
#include "svm_model_matlab.h"
#ifdef MX_API_VER
#if MX_API_VER < 0x07030000
typedef int mwIndex;
#endif
#endif
#define CMD_LEN 2048
#define Malloc(type,n) (type *)malloc((n)*sizeof(type))
void print_null(const char *s) {}
void print_string_matlab(const char *s) {mexPrintf(s);}
void exit_with_help()
{
mexPrintf(
"Usage: model = svmtrain(training_label_vector, training_instance_matrix, 'libsvm_options');\n"
"libsvm_options:\n"
"-s svm_type : set type of SVM (default 0)\n"
" 0 -- C-SVC (multi-class classification)\n"
" 1 -- nu-SVC (multi-class classification)\n"
" 2 -- one-class SVM\n"
" 3 -- epsilon-SVR (regression)\n"
" 4 -- nu-SVR (regression)\n"
"-t kernel_type : set type of kernel function (default 2)\n"
" 0 -- linear: u'*v\n"
" 1 -- polynomial: (gamma*u'*v + coef0)^degree\n"
" 2 -- radial basis function: exp(-gamma*|u-v|^2)\n"
" 3 -- sigmoid: tanh(gamma*u'*v + coef0)\n"
" 4 -- precomputed kernel (kernel values in training_instance_matrix)\n"
"-d degree : set degree in kernel function (default 3)\n"
"-g gamma : set gamma in kernel function (default 1/num_features)\n"
"-r coef0 : set coef0 in kernel function (default 0)\n"
"-c cost : set the parameter C of C-SVC, epsilon-SVR, and nu-SVR (default 1)\n"
"-n nu : set the parameter nu of nu-SVC, one-class SVM, and nu-SVR (default 0.5)\n"
"-p epsilon : set the epsilon in loss function of epsilon-SVR (default 0.1)\n"
"-m cachesize : set cache memory size in MB (default 100)\n"
"-e epsilon : set tolerance of termination criterion (default 0.001)\n"
"-h shrinking : whether to use the shrinking heuristics, 0 or 1 (default 1)\n"
"-b probability_estimates : whether to train a SVC or SVR model for probability estimates, 0 or 1 (default 0)\n"
"-wi weight : set the parameter C of class i to weight*C, for C-SVC (default 1)\n"
"-v n: n-fold cross validation mode\n"
"-q : quiet mode (no outputs)\n"
);
}
// svm arguments
struct svm_parameter param; // set by parse_command_line
struct svm_problem prob; // set by read_problem
struct svm_model *model;
struct svm_node *x_space;
int cross_validation;
int nr_fold;
double do_cross_validation()
{
int i;
int total_correct = 0;
double total_error = 0;
double sumv = 0, sumy = 0, sumvv = 0, sumyy = 0, sumvy = 0;
double *target = Malloc(double,prob.l);
double retval = 0.0;
svm_cross_validation(&prob,&param,nr_fold,target);
if(param.svm_type == EPSILON_SVR ||
param.svm_type == NU_SVR)
{
for(i=0;i<prob.l;i++)
{
double y = prob.y[i];
double v = target[i];
total_error += (v-y)*(v-y);
sumv += v;
sumy += y;
sumvv += v*v;
sumyy += y*y;
sumvy += v*y;
}
mexPrintf("Cross Validation Mean squared error = %g\n",total_error/prob.l);
mexPrintf("Cross Validation Squared correlation coefficient = %g\n",
((prob.l*sumvy-sumv*sumy)*(prob.l*sumvy-sumv*sumy))/
((prob.l*sumvv-sumv*sumv)*(prob.l*sumyy-sumy*sumy))
);
retval = total_error/prob.l;
}
else
{
for(i=0;i<prob.l;i++)
if(target[i] == prob.y[i])
++total_correct;
mexPrintf("Cross Validation Accuracy = %g%%\n",100.0*total_correct/prob.l);
retval = 100.0*total_correct/prob.l;
}
free(target);
return retval;
}
// nrhs should be 3
int parse_command_line(int nrhs, const mxArray *prhs[], char *model_file_name)
{
int i, argc = 1;
char cmd[CMD_LEN];
char *argv[CMD_LEN/2];
void (*print_func)(const char *) = print_string_matlab; // default printing to matlab display
// default values
param.svm_type = C_SVC;
param.kernel_type = RBF;
param.degree = 3;
param.gamma = 0; // 1/num_features
param.coef0 = 0;
param.nu = 0.5;
param.cache_size = 100;
param.C = 1;
param.eps = 1e-3;
param.p = 0.1;
param.shrinking = 1;
param.probability = 0;
param.nr_weight = 0;
param.weight_label = NULL;
param.weight = NULL;
cross_validation = 0;
if(nrhs <= 1)
return 1;
if(nrhs > 2)
{
// put options in argv[]
mxGetString(prhs[2], cmd, mxGetN(prhs[2]) + 1);
if((argv[argc] = strtok(cmd, " ")) != NULL)
while((argv[++argc] = strtok(NULL, " ")) != NULL)
;
}
// parse options
for(i=1;i<argc;i++)
{
if(argv[i][0] != '-') break;
++i;
if(i>=argc && argv[i-1][1] != 'q') // since option -q has no parameter
return 1;
switch(argv[i-1][1])
{
case 's':
param.svm_type = atoi(argv[i]);
break;
case 't':
param.kernel_type = atoi(argv[i]);
break;
case 'd':
param.degree = atoi(argv[i]);
break;
case 'g':
param.gamma = atof(argv[i]);
break;
case 'r':
param.coef0 = atof(argv[i]);
break;
case 'n':
param.nu = atof(argv[i]);
break;
case 'm':
param.cache_size = atof(argv[i]);
break;
case 'c':
param.C = atof(argv[i]);
break;
case 'e':
param.eps = atof(argv[i]);
break;
case 'p':
param.p = atof(argv[i]);
break;
case 'h':
param.shrinking = atoi(argv[i]);
break;
case 'b':
param.probability = atoi(argv[i]);
break;
case 'q':
print_func = &print_null;
i--;
break;
case 'v':
cross_validation = 1;
nr_fold = atoi(argv[i]);
if(nr_fold < 2)
{
mexPrintf("n-fold cross validation: n must >= 2\n");
return 1;
}
break;
case 'w':
++param.nr_weight;
param.weight_label = (int *)realloc(param.weight_label,sizeof(int)*param.nr_weight);
param.weight = (double *)realloc(param.weight,sizeof(double)*param.nr_weight);
param.weight_label[param.nr_weight-1] = atoi(&argv[i-1][2]);
param.weight[param.nr_weight-1] = atof(argv[i]);
break;
default:
mexPrintf("Unknown option -%c\n", argv[i-1][1]);
return 1;
}
}
svm_set_print_string_function(print_func);
return 0;
}
// read in a problem (in svmlight format)
int read_problem_dense(const mxArray *label_vec, const mxArray *instance_mat)
{
// using size_t due to the output type of matlab functions
size_t i, j, k, l;
size_t elements, max_index, sc, label_vector_row_num;
double *samples, *labels;
prob.x = NULL;
prob.y = NULL;
x_space = NULL;
labels = mxGetPr(label_vec);
samples = mxGetPr(instance_mat);
sc = mxGetN(instance_mat);
elements = 0;
// number of instances
l = mxGetM(instance_mat);
label_vector_row_num = mxGetM(label_vec);
prob.l = (int)l;
if(label_vector_row_num!=l)
{
mexPrintf("Length of label vector does not match # of instances.\n");
return -1;
}
if(param.kernel_type == PRECOMPUTED)
elements = l * (sc + 1);
else
{
for(i = 0; i < l; i++)
{
for(k = 0; k < sc; k++)
if(samples[k * l + i] != 0)
elements++;
// count the '-1' element
elements++;
}
}
prob.y = Malloc(double,l);
prob.x = Malloc(struct svm_node *,l);
x_space = Malloc(struct svm_node, elements);
max_index = sc;
j = 0;
for(i = 0; i < l; i++)
{
prob.x[i] = &x_space[j];
prob.y[i] = labels[i];
for(k = 0; k < sc; k++)
{
if(param.kernel_type == PRECOMPUTED || samples[k * l + i] != 0)
{
x_space[j].index = (int)k + 1;
x_space[j].value = samples[k * l + i];
j++;
}
}
x_space[j++].index = -1;
}
if(param.gamma == 0 && max_index > 0)
param.gamma = (double)(1.0/max_index);
if(param.kernel_type == PRECOMPUTED)
for(i=0;i<l;i++)
{
if((int)prob.x[i][0].value <= 0 || (int)prob.x[i][0].value > (int)max_index)
{
mexPrintf("Wrong input format: sample_serial_number out of range\n");
return -1;
}
}
return 0;
}
int read_problem_sparse(const mxArray *label_vec, const mxArray *instance_mat)
{
mwIndex *ir, *jc, low, high, k;
// using size_t due to the output type of matlab functions
size_t i, j, l, elements, max_index, label_vector_row_num;
mwSize num_samples;
double *samples, *labels;
mxArray *instance_mat_col; // transposed instance sparse matrix
prob.x = NULL;
prob.y = NULL;
x_space = NULL;
// transpose instance matrix
{
mxArray *prhs[1], *plhs[1];
prhs[0] = mxDuplicateArray(instance_mat);
if(mexCallMATLAB(1, plhs, 1, prhs, "transpose"))
{
mexPrintf("Error: cannot transpose training instance matrix\n");
return -1;
}
instance_mat_col = plhs[0];
mxDestroyArray(prhs[0]);
}
// each column is one instance
labels = mxGetPr(label_vec);
samples = mxGetPr(instance_mat_col);
ir = mxGetIr(instance_mat_col);
jc = mxGetJc(instance_mat_col);
num_samples = mxGetNzmax(instance_mat_col);
// number of instances
l = mxGetN(instance_mat_col);
label_vector_row_num = mxGetM(label_vec);
prob.l = (int) l;
if(label_vector_row_num!=l)
{
mexPrintf("Length of label vector does not match # of instances.\n");
return -1;
}
elements = num_samples + l;
max_index = mxGetM(instance_mat_col);
prob.y = Malloc(double,l);
prob.x = Malloc(struct svm_node *,l);
x_space = Malloc(struct svm_node, elements);
j = 0;
for(i=0;i<l;i++)
{
prob.x[i] = &x_space[j];
prob.y[i] = labels[i];
low = jc[i], high = jc[i+1];
for(k=low;k<high;k++)
{
x_space[j].index = (int)ir[k] + 1;
x_space[j].value = samples[k];
j++;
}
x_space[j++].index = -1;
}
if(param.gamma == 0 && max_index > 0)
param.gamma = (double)(1.0/max_index);
return 0;
}
static void fake_answer(int nlhs, mxArray *plhs[])
{
int i;
for(i=0;i<nlhs;i++)
plhs[i] = mxCreateDoubleMatrix(0, 0, mxREAL);
}
// Interface function of matlab
// now assume prhs[0]: label prhs[1]: features
void mexFunction( int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] )
{
const char *error_msg;
// fix random seed to have same results for each run
// (for cross validation and probability estimation)
srand(1);
if(nlhs > 1)
{
exit_with_help();
fake_answer(nlhs, plhs);
return;
}
// Transform the input Matrix to libsvm format
if(nrhs > 1 && nrhs < 4)
{
int err;
if(!mxIsDouble(prhs[0]) || !mxIsDouble(prhs[1]))
{
mexPrintf("Error: label vector and instance matrix must be double\n");
fake_answer(nlhs, plhs);
return;
}
if(mxIsSparse(prhs[0]))
{
mexPrintf("Error: label vector should not be in sparse format\n");
fake_answer(nlhs, plhs);
return;
}
if(parse_command_line(nrhs, prhs, NULL))
{
exit_with_help();
svm_destroy_param(&param);
fake_answer(nlhs, plhs);
return;
}
if(mxIsSparse(prhs[1]))
{
if(param.kernel_type == PRECOMPUTED)
{
// precomputed kernel requires dense matrix, so we make one
mxArray *rhs[1], *lhs[1];
rhs[0] = mxDuplicateArray(prhs[1]);
if(mexCallMATLAB(1, lhs, 1, rhs, "full"))
{
mexPrintf("Error: cannot generate a full training instance matrix\n");
svm_destroy_param(&param);
fake_answer(nlhs, plhs);
return;
}
err = read_problem_dense(prhs[0], lhs[0]);
mxDestroyArray(lhs[0]);
mxDestroyArray(rhs[0]);
}
else
err = read_problem_sparse(prhs[0], prhs[1]);
}
else
err = read_problem_dense(prhs[0], prhs[1]);
// svmtrain's original code
error_msg = svm_check_parameter(&prob, &param);
if(err || error_msg)
{
if (error_msg != NULL)
mexPrintf("Error: %s\n", error_msg);
svm_destroy_param(&param);
free(prob.y);
free(prob.x);
free(x_space);
fake_answer(nlhs, plhs);
return;
}
if(cross_validation)
{
double *ptr;
plhs[0] = mxCreateDoubleMatrix(1, 1, mxREAL);
ptr = mxGetPr(plhs[0]);
ptr[0] = do_cross_validation();
}
else
{
int nr_feat = (int)mxGetN(prhs[1]);
const char *error_msg;
model = svm_train(&prob, &param);
error_msg = model_to_matlab_structure(plhs, nr_feat, model);
if(error_msg)
mexPrintf("Error: can't convert libsvm model to matrix structure: %s\n", error_msg);
svm_free_and_destroy_model(&model);
}
svm_destroy_param(&param);
free(prob.y);
free(prob.x);
free(x_space);
}
else
{
exit_with_help();
fake_answer(nlhs, plhs);
return;
}
}