First commit
This commit is contained in:
36
liblinear-2.49/.github/workflows/wheel.yml
vendored
Executable file
36
liblinear-2.49/.github/workflows/wheel.yml
vendored
Executable file
@@ -0,0 +1,36 @@
|
||||
name: Build wheels
|
||||
|
||||
on:
|
||||
# on new tag
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
# manually trigger
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build_wheels:
|
||||
name: Build wheels on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [windows-2022, macos-13]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Build wheels
|
||||
uses: pypa/cibuildwheel@v2.10.2
|
||||
env:
|
||||
# don't build for PyPython and windows 32-bit
|
||||
CIBW_SKIP: pp* *win32*
|
||||
with:
|
||||
package-dir: ./python
|
||||
output-dir: ./python/wheelhouse
|
||||
|
||||
- name: Upload a Build Artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: wheels-${{ matrix.os }}
|
||||
path: ./python/wheelhouse
|
31
liblinear-2.49/COPYRIGHT
Normal file
31
liblinear-2.49/COPYRIGHT
Normal file
@@ -0,0 +1,31 @@
|
||||
|
||||
Copyright (c) 2007-2023 The LIBLINEAR Project.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither name of copyright holders nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this software
|
||||
without specific prior written permission.
|
||||
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
|
||||
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
37
liblinear-2.49/Makefile
Normal file
37
liblinear-2.49/Makefile
Normal file
@@ -0,0 +1,37 @@
|
||||
CXX ?= g++
|
||||
CC ?= gcc
|
||||
CFLAGS = -Wall -Wconversion -O3 -fPIC
|
||||
LIBS = blas/blas.a
|
||||
#LIBS = -lblas
|
||||
SHVER = 6
|
||||
OS = $(shell uname)
|
||||
ifeq ($(OS),Darwin)
|
||||
SHARED_LIB_FLAG = -dynamiclib -Wl,-install_name,liblinear.so.$(SHVER)
|
||||
else
|
||||
SHARED_LIB_FLAG = -shared -Wl,-soname,liblinear.so.$(SHVER)
|
||||
endif
|
||||
|
||||
all: train predict
|
||||
|
||||
lib: linear.o newton.o blas/blas.a
|
||||
$(CXX) $(SHARED_LIB_FLAG) linear.o newton.o blas/blas.a -o liblinear.so.$(SHVER)
|
||||
|
||||
train: newton.o linear.o train.c blas/blas.a
|
||||
$(CXX) $(CFLAGS) -o train train.c newton.o linear.o $(LIBS)
|
||||
|
||||
predict: newton.o linear.o predict.c blas/blas.a
|
||||
$(CXX) $(CFLAGS) -o predict predict.c newton.o linear.o $(LIBS)
|
||||
|
||||
newton.o: newton.cpp newton.h
|
||||
$(CXX) $(CFLAGS) -c -o newton.o newton.cpp
|
||||
|
||||
linear.o: linear.cpp linear.h
|
||||
$(CXX) $(CFLAGS) -c -o linear.o linear.cpp
|
||||
|
||||
blas/blas.a: blas/*.c blas/*.h
|
||||
make -C blas OPTFLAGS='$(CFLAGS)' CC='$(CC)';
|
||||
|
||||
clean:
|
||||
make -C blas clean
|
||||
make -C matlab clean
|
||||
rm -f *~ newton.o linear.o train predict liblinear.so.$(SHVER)
|
24
liblinear-2.49/Makefile.win
Normal file
24
liblinear-2.49/Makefile.win
Normal file
@@ -0,0 +1,24 @@
|
||||
CXX = cl.exe
|
||||
CFLAGS = /nologo /O2 /EHsc /I. /D _WIN64 /D _CRT_SECURE_NO_DEPRECATE
|
||||
TARGET = windows
|
||||
|
||||
all: $(TARGET)\train.exe $(TARGET)\predict.exe lib
|
||||
|
||||
$(TARGET)\train.exe: newton.obj linear.obj train.c blas\*.c
|
||||
$(CXX) $(CFLAGS) -Fe$(TARGET)\train.exe newton.obj linear.obj train.c blas\*.c
|
||||
|
||||
$(TARGET)\predict.exe: newton.obj linear.obj predict.c blas\*.c
|
||||
$(CXX) $(CFLAGS) -Fe$(TARGET)\predict.exe newton.obj linear.obj predict.c blas\*.c
|
||||
|
||||
linear.obj: linear.cpp linear.h
|
||||
$(CXX) $(CFLAGS) -c linear.cpp
|
||||
|
||||
newton.obj: newton.cpp newton.h
|
||||
$(CXX) $(CFLAGS) -c newton.cpp
|
||||
|
||||
lib: linear.cpp linear.h linear.def newton.obj
|
||||
$(CXX) $(CFLAGS) -LD linear.cpp newton.obj blas\*.c -Fe$(TARGET)\liblinear -link -DEF:linear.def
|
||||
|
||||
clean:
|
||||
-erase /Q *.obj $(TARGET)\*.exe $(TARGET)\*.dll $(TARGET)\*.exp $(TARGET)\*.lib
|
||||
|
727
liblinear-2.49/README
Executable file
727
liblinear-2.49/README
Executable file
@@ -0,0 +1,727 @@
|
||||
LIBLINEAR is a simple package for solving large-scale regularized linear
|
||||
classification, regression and outlier detection. It currently supports
|
||||
- L2-regularized logistic regression/L2-loss support vector classification/L1-loss support vector classification
|
||||
- L1-regularized L2-loss support vector classification/L1-regularized logistic regression
|
||||
- L2-regularized L2-loss support vector regression/L1-loss support vector regression
|
||||
- one-class support vector machine.
|
||||
This document explains the usage of LIBLINEAR.
|
||||
|
||||
To get started, please read the ``Quick Start'' section first.
|
||||
For developers, please check the ``Library Usage'' section to learn
|
||||
how to integrate LIBLINEAR in your software.
|
||||
|
||||
Table of Contents
|
||||
=================
|
||||
|
||||
- When to use LIBLINEAR but not LIBSVM
|
||||
- Quick Start
|
||||
- Installation
|
||||
- `train' Usage
|
||||
- `predict' Usage
|
||||
- `svm-scale' Usage
|
||||
- Examples
|
||||
- Library Usage
|
||||
- Building Windows Binaries
|
||||
- MATLAB/OCTAVE interface
|
||||
- Python Interface
|
||||
- Additional Information
|
||||
|
||||
When to use LIBLINEAR but not LIBSVM
|
||||
====================================
|
||||
|
||||
There are some large data for which with/without nonlinear mappings
|
||||
gives similar performances. Without using kernels, one can
|
||||
efficiently train a much larger set via linear classification/regression.
|
||||
These data usually have a large number of features. Document classification
|
||||
is an example.
|
||||
|
||||
Warning: While generally liblinear is very fast, its default solver
|
||||
may be slow under certain situations (e.g., data not scaled or C is
|
||||
large). See Appendix B of our SVM guide about how to handle such
|
||||
cases.
|
||||
http://www.csie.ntu.edu.tw/~cjlin/papers/guide/guide.pdf
|
||||
|
||||
Warning: If you are a beginner and your data sets are not large, you
|
||||
should consider LIBSVM first.
|
||||
|
||||
LIBSVM page:
|
||||
http://www.csie.ntu.edu.tw/~cjlin/libsvm
|
||||
|
||||
|
||||
Quick Start
|
||||
===========
|
||||
|
||||
See the section ``Installation'' for installing LIBLINEAR.
|
||||
|
||||
After installation, there are programs `train' and `predict' for
|
||||
training and testing, respectively.
|
||||
|
||||
About the data format, please check the README file of LIBSVM. Note
|
||||
that feature index must start from 1 (but not 0).
|
||||
|
||||
A sample classification data included in this package is `heart_scale'.
|
||||
|
||||
Type `train heart_scale', and the program will read the training
|
||||
data and output the model file `heart_scale.model'. If you have a test
|
||||
set called heart_scale.t, then type `predict heart_scale.t
|
||||
heart_scale.model output' to see the prediction accuracy. The `output'
|
||||
file contains the predicted class labels.
|
||||
|
||||
For more information about `train' and `predict', see the sections
|
||||
`train' Usage and `predict' Usage.
|
||||
|
||||
To obtain good performances, sometimes one needs to scale the
|
||||
data. Please check the program `svm-scale' of LIBSVM. For large and
|
||||
sparse data, use `-l 0' to keep the sparsity.
|
||||
|
||||
Installation
|
||||
============
|
||||
|
||||
On Unix systems, type `make' to build the `train', `predict',
|
||||
and `svm-scale' programs. Run them without arguments to show the usages.
|
||||
|
||||
On other systems, consult `Makefile' to build them (e.g., see
|
||||
'Building Windows binaries' in this file).
|
||||
|
||||
This software uses some level-1 BLAS subroutines. The needed functions are
|
||||
included in this package. If a BLAS library is available on your
|
||||
machine, you may use it by modifying the Makefile: Unmark the following line
|
||||
|
||||
#LIBS = -lblas
|
||||
|
||||
and mark
|
||||
|
||||
LIBS = blas/blas.a
|
||||
|
||||
The tool `svm-scale', borrowed from LIBSVM, is for scaling input data file.
|
||||
|
||||
`train' Usage
|
||||
=============
|
||||
|
||||
Usage: train [options] training_set_file [model_file]
|
||||
options:
|
||||
-s type : set type of solver (default 1)
|
||||
for multi-class classification
|
||||
0 -- L2-regularized logistic regression (primal)
|
||||
1 -- L2-regularized L2-loss support vector classification (dual)
|
||||
2 -- L2-regularized L2-loss support vector classification (primal)
|
||||
3 -- L2-regularized L1-loss support vector classification (dual)
|
||||
4 -- support vector classification by Crammer and Singer
|
||||
5 -- L1-regularized L2-loss support vector classification
|
||||
6 -- L1-regularized logistic regression
|
||||
7 -- L2-regularized logistic regression (dual)
|
||||
for regression
|
||||
11 -- L2-regularized L2-loss support vector regression (primal)
|
||||
12 -- L2-regularized L2-loss support vector regression (dual)
|
||||
13 -- L2-regularized L1-loss support vector regression (dual)
|
||||
for outlier detection
|
||||
21 -- one-class support vector machine (dual)
|
||||
-c cost : set the parameter C (default 1)
|
||||
-p epsilon : set the epsilon in loss function of epsilon-SVR (default 0.1)
|
||||
-n nu : set the parameter nu of one-class SVM (default 0.5)
|
||||
-e epsilon : set tolerance of termination criterion
|
||||
-s 0 and 2
|
||||
|f'(w)|_2 <= eps*min(pos,neg)/l*|f'(w0)|_2,
|
||||
where f is the primal function and pos/neg are # of
|
||||
positive/negative data (default 0.01)
|
||||
-s 11
|
||||
|f'(w)|_2 <= eps*|f'(w0)|_2 (default 0.0001)
|
||||
-s 1, 3, 4, 7, and 21
|
||||
Dual maximal violation <= eps; similar to libsvm (default 0.1 except 0.01 for -s 21)
|
||||
-s 5 and 6
|
||||
|f'(w)|_1 <= eps*min(pos,neg)/l*|f'(w0)|_1,
|
||||
where f is the primal function (default 0.01)
|
||||
-s 12 and 13
|
||||
|f'(alpha)|_1 <= eps |f'(alpha0)|,
|
||||
where f is the dual function (default 0.1)
|
||||
-B bias : if bias >= 0, instance x becomes [x; bias]; if < 0, no bias term added (default -1)
|
||||
-R : not regularize the bias; must with -B 1 to have the bias; DON'T use this unless you know what it is
|
||||
(for -s 0, 2, 5, 6, 11)
|
||||
-wi weight: weights adjust the parameter C of different classes (see README for details)
|
||||
-v n: n-fold cross validation mode
|
||||
-C : find parameters (C for -s 0, 2 and C, p for -s 11)
|
||||
-q : quiet mode (no outputs)
|
||||
|
||||
Option -v randomly splits the data into n parts and calculates cross
|
||||
validation accuracy on them.
|
||||
|
||||
Option -C conducts cross validation under different parameters and finds
|
||||
the best one. This option is supported only by -s 0, -s 2 (for finding
|
||||
C) and -s 11 (for finding C, p). If the solver is not specified, -s 2
|
||||
is used.
|
||||
|
||||
Formulations:
|
||||
|
||||
For L2-regularized logistic regression (-s 0), we solve
|
||||
|
||||
min_w w^Tw/2 + C \sum log(1 + exp(-y_i w^Tx_i))
|
||||
|
||||
For L2-regularized L2-loss SVC dual (-s 1), we solve
|
||||
|
||||
min_alpha 0.5(alpha^T (Q + I/2/C) alpha) - e^T alpha
|
||||
s.t. 0 <= alpha_i,
|
||||
|
||||
For L2-regularized L2-loss SVC (-s 2), we solve
|
||||
|
||||
min_w w^Tw/2 + C \sum max(0, 1- y_i w^Tx_i)^2
|
||||
|
||||
For L2-regularized L1-loss SVC dual (-s 3), we solve
|
||||
|
||||
min_alpha 0.5(alpha^T Q alpha) - e^T alpha
|
||||
s.t. 0 <= alpha_i <= C,
|
||||
|
||||
For L1-regularized L2-loss SVC (-s 5), we solve
|
||||
|
||||
min_w \sum |w_j| + C \sum max(0, 1- y_i w^Tx_i)^2
|
||||
|
||||
For L1-regularized logistic regression (-s 6), we solve
|
||||
|
||||
min_w \sum |w_j| + C \sum log(1 + exp(-y_i w^Tx_i))
|
||||
|
||||
For L2-regularized logistic regression (-s 7), we solve
|
||||
|
||||
min_alpha 0.5(alpha^T Q alpha) + \sum alpha_i*log(alpha_i) + \sum (C-alpha_i)*log(C-alpha_i) - a constant
|
||||
s.t. 0 <= alpha_i <= C,
|
||||
|
||||
where
|
||||
|
||||
Q is a matrix with Q_ij = y_i y_j x_i^T x_j.
|
||||
|
||||
For L2-regularized L2-loss SVR (-s 11), we solve
|
||||
|
||||
min_w w^Tw/2 + C \sum max(0, |y_i-w^Tx_i|-epsilon)^2
|
||||
|
||||
For L2-regularized L2-loss SVR dual (-s 12), we solve
|
||||
|
||||
min_beta 0.5(beta^T (Q + lambda I/2/C) beta) - y^T beta + \sum |beta_i|
|
||||
|
||||
For L2-regularized L1-loss SVR dual (-s 13), we solve
|
||||
|
||||
min_beta 0.5(beta^T Q beta) - y^T beta + \sum |beta_i|
|
||||
s.t. -C <= beta_i <= C,
|
||||
|
||||
where
|
||||
|
||||
Q is a matrix with Q_ij = x_i^T x_j.
|
||||
|
||||
For one-class SVM dual (-s 21), we solve
|
||||
|
||||
min_alpha 0.5(alpha^T Q alpha)
|
||||
s.t. 0 <= alpha_i <= 1 and \sum alpha_i = nu*l,
|
||||
|
||||
where
|
||||
|
||||
Q is a matrix with Q_ij = x_i^T x_j.
|
||||
|
||||
If bias >= 0, w becomes [w; w_{n+1}] and x becomes [x; bias]. For
|
||||
example, L2-regularized logistic regression (-s 0) becomes
|
||||
|
||||
min_w w^Tw/2 + (w_{n+1})^2/2 + C \sum log(1 + exp(-y_i [w; w_{n+1}]^T[x_i; bias]))
|
||||
|
||||
Some may prefer not having (w_{n+1})^2/2 (i.e., bias variable not
|
||||
regularized). For primal solvers (-s 0, 2, 5, 6, 11), we provide an
|
||||
option -R to remove (w_{n+1})^2/2. However, -R is generally not needed
|
||||
as for most data with/without (w_{n+1})^2/2 give similar performances.
|
||||
|
||||
The primal-dual relationship implies that -s 1 and -s 2 give the same
|
||||
model, -s 0 and -s 7 give the same, and -s 11 and -s 12 give the same.
|
||||
|
||||
We implement 1-vs-the rest multi-class strategy for classification.
|
||||
In training i vs. non_i, their C parameters are (weight from -wi)*C
|
||||
and C, respectively. If there are only two classes, we train only one
|
||||
model. Thus weight1*C vs. weight2*C is used. See examples below.
|
||||
|
||||
We also implement multi-class SVM by Crammer and Singer (-s 4):
|
||||
|
||||
min_{w_m, \xi_i} 0.5 \sum_m ||w_m||^2 + C \sum_i \xi_i
|
||||
s.t. w^T_{y_i} x_i - w^T_m x_i >= \e^m_i - \xi_i \forall m,i
|
||||
|
||||
where e^m_i = 0 if y_i = m,
|
||||
e^m_i = 1 if y_i != m,
|
||||
|
||||
Here we solve the dual problem:
|
||||
|
||||
min_{\alpha} 0.5 \sum_m ||w_m(\alpha)||^2 + \sum_i \sum_m e^m_i alpha^m_i
|
||||
s.t. \alpha^m_i <= C^m_i \forall m,i , \sum_m \alpha^m_i=0 \forall i
|
||||
|
||||
where w_m(\alpha) = \sum_i \alpha^m_i x_i,
|
||||
and C^m_i = C if m = y_i,
|
||||
C^m_i = 0 if m != y_i.
|
||||
|
||||
`predict' Usage
|
||||
===============
|
||||
|
||||
Usage: predict [options] test_file model_file output_file
|
||||
options:
|
||||
-b probability_estimates: whether to output probability estimates, 0 or 1 (default 0); currently for logistic regression only
|
||||
-q : quiet mode (no outputs)
|
||||
|
||||
Note that -b is only needed in the prediction phase. This is different
|
||||
from the setting of LIBSVM.
|
||||
|
||||
`svm-scale' Usage
|
||||
=================
|
||||
|
||||
See LIBSVM README.
|
||||
|
||||
Examples
|
||||
========
|
||||
|
||||
> train data_file
|
||||
|
||||
Train linear SVM with L2-loss function.
|
||||
|
||||
> train -s 0 data_file
|
||||
|
||||
Train a logistic regression model.
|
||||
|
||||
> train -s 21 -n 0.1 data_file
|
||||
|
||||
Train a linear one-class SVM which selects roughly 10% data as outliers.
|
||||
|
||||
> train -v 5 -e 0.001 data_file
|
||||
|
||||
Do five-fold cross-validation using L2-loss SVM.
|
||||
Use a smaller stopping tolerance 0.001 than the default
|
||||
0.1 if you want more accurate solutions.
|
||||
|
||||
> train -C data_file
|
||||
...
|
||||
Best C = 0.000488281 CV accuracy = 83.3333%
|
||||
> train -c 0.000488281 data_file
|
||||
|
||||
Conduct cross validation many times by L2-loss SVM and find the
|
||||
parameter C which achieves the best cross validation accuracy. Then
|
||||
use the selected C to train the data for getting a model.
|
||||
|
||||
> train -C -s 0 -v 3 -c 0.5 -e 0.0001 data_file
|
||||
|
||||
For parameter selection by -C, users can specify other
|
||||
solvers (currently -s 0, -s 2 and -s 11 are supported) and
|
||||
different number of CV folds. Further, users can use
|
||||
the -c option to specify the smallest C value of the
|
||||
search range. This option is useful when users want to
|
||||
rerun the parameter selection procedure from a specified
|
||||
C under a different setting, such as a stricter stopping
|
||||
tolerance -e 0.0001 in the above example. Similarly, for
|
||||
-s 11, users can use the -p option to specify the
|
||||
maximal p value of the search range.
|
||||
|
||||
> train -c 10 -w1 2 -w2 5 -w3 2 four_class_data_file
|
||||
|
||||
Train four classifiers:
|
||||
positive negative Cp Cn
|
||||
class 1 class 2,3,4. 20 10
|
||||
class 2 class 1,3,4. 50 10
|
||||
class 3 class 1,2,4. 20 10
|
||||
class 4 class 1,2,3. 10 10
|
||||
|
||||
> train -c 10 -w3 1 -w2 5 two_class_data_file
|
||||
|
||||
If there are only two classes, we train ONE model.
|
||||
The C values for the two classes are 10 and 50.
|
||||
|
||||
> predict -b 1 test_file data_file.model output_file
|
||||
|
||||
Output probability estimates (for logistic regression only).
|
||||
|
||||
Library Usage
|
||||
=============
|
||||
|
||||
These functions and structures are declared in the header file `linear.h'.
|
||||
You can see `train.c' and `predict.c' for examples showing how to use them.
|
||||
We define LIBLINEAR_VERSION and declare `extern int liblinear_version; '
|
||||
in linear.h, so you can check the version number.
|
||||
|
||||
- Function: model* train(const struct problem *prob,
|
||||
const struct parameter *param);
|
||||
|
||||
This function constructs and returns a linear classification
|
||||
or regression model according to the given training data and
|
||||
parameters.
|
||||
|
||||
struct problem describes the problem:
|
||||
|
||||
struct problem
|
||||
{
|
||||
int l, n;
|
||||
double *y;
|
||||
struct feature_node **x;
|
||||
double bias;
|
||||
};
|
||||
|
||||
where `l' is the number of training data. If bias >= 0, we assume
|
||||
that one additional feature is added to the end of each data
|
||||
instance. `n' is the number of feature (including the bias feature
|
||||
if bias >= 0). `y' is an array containing the target values. (integers
|
||||
in classification, real numbers in regression) And `x' is an array
|
||||
of pointers, each of which points to a sparse representation (array
|
||||
of feature_node) of one training vector.
|
||||
|
||||
For example, if we have the following training data:
|
||||
|
||||
LABEL ATTR1 ATTR2 ATTR3 ATTR4 ATTR5
|
||||
----- ----- ----- ----- ----- -----
|
||||
1 0 0.1 0.2 0 0
|
||||
2 0 0.1 0.3 -1.2 0
|
||||
1 0.4 0 0 0 0
|
||||
2 0 0.1 0 1.4 0.5
|
||||
3 -0.1 -0.2 0.1 1.1 0.1
|
||||
|
||||
and bias = 1, then the components of problem are:
|
||||
|
||||
l = 5
|
||||
n = 6
|
||||
|
||||
y -> 1 2 1 2 3
|
||||
|
||||
x -> [ ] -> (2,0.1) (3,0.2) (6,1) (-1,?)
|
||||
[ ] -> (2,0.1) (3,0.3) (4,-1.2) (6,1) (-1,?)
|
||||
[ ] -> (1,0.4) (6,1) (-1,?)
|
||||
[ ] -> (2,0.1) (4,1.4) (5,0.5) (6,1) (-1,?)
|
||||
[ ] -> (1,-0.1) (2,-0.2) (3,0.1) (4,1.1) (5,0.1) (6,1) (-1,?)
|
||||
|
||||
struct parameter describes the parameters of a linear classification
|
||||
or regression model:
|
||||
|
||||
struct parameter
|
||||
{
|
||||
int solver_type;
|
||||
|
||||
/* these are for training only */
|
||||
double eps; /* stopping tolerance */
|
||||
double C;
|
||||
double nu; /* one-class SVM only */
|
||||
int nr_weight;
|
||||
int *weight_label;
|
||||
double* weight;
|
||||
double p;
|
||||
double *init_sol;
|
||||
int regularize_bias;
|
||||
bool w_recalc; /* for -s 1, 3; may be extended to -s 12, 13, 21 */
|
||||
};
|
||||
|
||||
solver_type can be one of L2R_LR, L2R_L2LOSS_SVC_DUAL, L2R_L2LOSS_SVC, L2R_L1LOSS_SVC_DUAL, MCSVM_CS, L1R_L2LOSS_SVC, L1R_LR, L2R_LR_DUAL, L2R_L2LOSS_SVR, L2R_L2LOSS_SVR_DUAL, L2R_L1LOSS_SVR_DUAL, ONECLASS_SVM.
|
||||
for classification
|
||||
L2R_LR L2-regularized logistic regression (primal)
|
||||
L2R_L2LOSS_SVC_DUAL L2-regularized L2-loss support vector classification (dual)
|
||||
L2R_L2LOSS_SVC L2-regularized L2-loss support vector classification (primal)
|
||||
L2R_L1LOSS_SVC_DUAL L2-regularized L1-loss support vector classification (dual)
|
||||
MCSVM_CS support vector classification by Crammer and Singer
|
||||
L1R_L2LOSS_SVC L1-regularized L2-loss support vector classification
|
||||
L1R_LR L1-regularized logistic regression
|
||||
L2R_LR_DUAL L2-regularized logistic regression (dual)
|
||||
for regression
|
||||
L2R_L2LOSS_SVR L2-regularized L2-loss support vector regression (primal)
|
||||
L2R_L2LOSS_SVR_DUAL L2-regularized L2-loss support vector regression (dual)
|
||||
L2R_L1LOSS_SVR_DUAL L2-regularized L1-loss support vector regression (dual)
|
||||
for outlier detection
|
||||
ONECLASS_SVM one-class support vector machine (dual)
|
||||
|
||||
C is the cost of constraints violation.
|
||||
p is the sensitiveness of loss of support vector regression.
|
||||
nu in ONECLASS_SVM approximates the fraction of data as outliers.
|
||||
eps is the stopping criterion.
|
||||
|
||||
nr_weight, weight_label, and weight are used to change the penalty
|
||||
for some classes (If the weight for a class is not changed, it is
|
||||
set to 1). This is useful for training classifier using unbalanced
|
||||
input data or with asymmetric misclassification cost.
|
||||
|
||||
nr_weight is the number of elements in the array weight_label and
|
||||
weight. Each weight[i] corresponds to weight_label[i], meaning that
|
||||
the penalty of class weight_label[i] is scaled by a factor of weight[i].
|
||||
|
||||
If you do not want to change penalty for any of the classes,
|
||||
just set nr_weight to 0.
|
||||
|
||||
init_sol includes the initial weight vectors (supported for only some
|
||||
solvers). See the explanation of the vector w in the model
|
||||
structure.
|
||||
|
||||
regularize_bias is the flag for bias regularization. By default it is set
|
||||
to be 1. If you don't want to regularize the bias, set it to 0 with
|
||||
specifying the bias in the problem structure to be 1. (DON'T use it unless
|
||||
you know what it is.)
|
||||
|
||||
w_recalc is the flag for recalculating w after optimization
|
||||
with a dual-based solver. This may further reduces the weight density
|
||||
when the data is sparse. The default value is set as false for time
|
||||
efficiency. Currently it only takes effect in -s 1 and 3.
|
||||
|
||||
*NOTE* To avoid wrong parameters, check_parameter() should be
|
||||
called before train().
|
||||
|
||||
struct model stores the model obtained from the training procedure:
|
||||
|
||||
struct model
|
||||
{
|
||||
struct parameter param;
|
||||
int nr_class; /* number of classes */
|
||||
int nr_feature;
|
||||
double *w;
|
||||
int *label; /* label of each class */
|
||||
double bias;
|
||||
double rho; /* one-class SVM only */
|
||||
};
|
||||
|
||||
param describes the parameters used to obtain the model.
|
||||
|
||||
nr_class is the number of classes for classification. It is a
|
||||
non-negative integer with special cases of 0 (no training data at
|
||||
all) and 1 (all training data in one class). For regression and
|
||||
one-class SVM, nr_class = 2.
|
||||
|
||||
nr_feature is the number of features.
|
||||
|
||||
The array w gives feature weights. Its size is
|
||||
nr_feature*nr_class but is nr_feature if nr_class = 2 and the
|
||||
solver is not MCSVM_CS (see more explanation below). We use one
|
||||
against the rest for multi-class classification, so each feature
|
||||
index corresponds to nr_class weight values. Weights are
|
||||
organized in the following way
|
||||
|
||||
+------------------+------------------+------------+
|
||||
| nr_class weights | nr_class weights | ...
|
||||
| for 1st feature | for 2nd feature |
|
||||
+------------------+------------------+------------+
|
||||
|
||||
The array label stores class labels.
|
||||
|
||||
When nr_class = 1 or 2, classification solvers (MCSVM_CS
|
||||
excluded) return a single vector of weights by considering
|
||||
label[0] as positive in training.
|
||||
|
||||
If bias >= 0, x becomes [x; bias]. The number of features is
|
||||
increased by one, so w is a (nr_feature+1)*nr_class array. The
|
||||
value of bias is stored in the variable bias.
|
||||
|
||||
rho is the bias term used in one-class SVM only.
|
||||
|
||||
- Function: void cross_validation(const problem *prob, const parameter *param, int nr_fold, double *target);
|
||||
|
||||
This function conducts cross validation. Data are separated to
|
||||
nr_fold folds. Under given parameters, sequentially each fold is
|
||||
validated using the model from training the remaining. Predicted
|
||||
labels in the validation process are stored in the array called
|
||||
target.
|
||||
|
||||
The format of prob is same as that for train().
|
||||
|
||||
- Function: void find_parameters(const struct problem *prob,
|
||||
const struct parameter *param, int nr_fold, double start_C,
|
||||
double start_p, double *best_C, double *best_p, double *best_score);
|
||||
|
||||
This function is similar to cross_validation. However, instead of
|
||||
conducting cross validation under specified parameters. For -s 0, 2, it
|
||||
conducts cross validation many times under parameters C = start_C,
|
||||
2*start_C, 4*start_C, 8*start_C, ..., and finds the best one with
|
||||
the highest cross validation accuracy. For -s 11, it conducts cross
|
||||
validation many times with a two-fold loop. The outer loop considers a
|
||||
default sequence of p = 19/20*max_p, ..., 1/20*max_p, 0 and
|
||||
under each p value the inner loop considers a sequence of parameters
|
||||
C = start_C, 2*start_C, 4*start_C, ..., and finds the best one with the
|
||||
lowest mean squared error.
|
||||
|
||||
If start_C <= 0, then this procedure calculates a small enough C
|
||||
for prob as the start_C. The procedure stops when the models of
|
||||
all folds become stable or C reaches max_C.
|
||||
|
||||
If start_p <= 0, then this procedure calculates a maximal p for prob as
|
||||
the start_p. Otherwise, the procedure starts with the first
|
||||
i/20*max_p <= start_p so the outer sequence is i/20*max_p,
|
||||
(i-1)/20*max_p, ..., 0.
|
||||
|
||||
The best C, the best p, and the corresponding accuracy (or MSE) are
|
||||
assigned to *best_C, *best_p and *best_score, respectively. For
|
||||
classification, *best_p is not used, and the returned value is -1.
|
||||
|
||||
- Function: double predict(const model *model_, const feature_node *x);
|
||||
|
||||
For a classification model, the predicted class for x is returned.
|
||||
For a regression model, the function value of x calculated using
|
||||
the model is returned.
|
||||
|
||||
- Function: double predict_values(const struct model *model_,
|
||||
const struct feature_node *x, double* dec_values);
|
||||
|
||||
This function gives nr_w decision values in the array dec_values.
|
||||
nr_w=1 if regression is applied or the number of classes is two. An exception is
|
||||
multi-class SVM by Crammer and Singer (-s 4), where nr_w = 2 if there are two classes. For all other situations, nr_w is the
|
||||
number of classes.
|
||||
|
||||
We implement one-vs-the rest multi-class strategy (-s 0,1,2,3,5,6,7)
|
||||
and multi-class SVM by Crammer and Singer (-s 4) for multi-class SVM.
|
||||
The class with the highest decision value is returned.
|
||||
|
||||
- Function: double predict_probability(const struct model *model_,
|
||||
const struct feature_node *x, double* prob_estimates);
|
||||
|
||||
This function gives nr_class probability estimates in the array
|
||||
prob_estimates. nr_class can be obtained from the function
|
||||
get_nr_class. The class with the highest probability is
|
||||
returned. Currently, we support only the probability outputs of
|
||||
logistic regression.
|
||||
|
||||
- Function: int get_nr_feature(const model *model_);
|
||||
|
||||
The function gives the number of attributes of the model.
|
||||
|
||||
- Function: int get_nr_class(const model *model_);
|
||||
|
||||
The function gives the number of classes of the model.
|
||||
For a regression model, 2 is returned.
|
||||
|
||||
- Function: void get_labels(const model *model_, int* label);
|
||||
|
||||
This function outputs the name of labels into an array called label.
|
||||
For a regression model, label is unchanged.
|
||||
|
||||
- Function: double get_decfun_coef(const struct model *model_, int feat_idx,
|
||||
int label_idx);
|
||||
|
||||
This function gives the coefficient for the feature with feature index =
|
||||
feat_idx and the class with label index = label_idx. Note that feat_idx
|
||||
starts from 1, while label_idx starts from 0. If feat_idx is not in the
|
||||
valid range (1 to nr_feature), then a zero value will be returned. For
|
||||
classification models, if label_idx is not in the valid range (0 to
|
||||
nr_class-1), then a zero value will be returned; for regression models
|
||||
and one-class SVM models, label_idx is ignored.
|
||||
|
||||
- Function: double get_decfun_bias(const struct model *model_, int label_idx);
|
||||
|
||||
This function gives the bias term corresponding to the class with the
|
||||
label_idx. For classification models, if label_idx is not in a valid range
|
||||
(0 to nr_class-1), then a zero value will be returned; for regression
|
||||
models, label_idx is ignored. This function cannot be called for a one-class
|
||||
SVM model.
|
||||
|
||||
- Function: double get_decfun_rho(const struct model *model_);
|
||||
|
||||
This function gives rho, the bias term used in one-class SVM only. This
|
||||
function can only be called for a one-class SVM model.
|
||||
|
||||
- Function: const char *check_parameter(const struct problem *prob,
|
||||
const struct parameter *param);
|
||||
|
||||
This function checks whether the parameters are within the feasible
|
||||
range of the problem. This function should be called before calling
|
||||
train() and cross_validation(). It returns NULL if the
|
||||
parameters are feasible, otherwise an error message is returned.
|
||||
|
||||
- Function: int check_probability_model(const struct model *model);
|
||||
|
||||
This function returns 1 if the model supports probability output;
|
||||
otherwise, it returns 0.
|
||||
|
||||
- Function: int check_regression_model(const struct model *model);
|
||||
|
||||
This function returns 1 if the model is a regression model; otherwise
|
||||
it returns 0.
|
||||
|
||||
- Function: int check_oneclass_model(const struct model *model);
|
||||
|
||||
This function returns 1 if the model is a one-class SVM model; otherwise
|
||||
it returns 0.
|
||||
|
||||
- Function: int save_model(const char *model_file_name,
|
||||
const struct model *model_);
|
||||
|
||||
This function saves a model to a file; returns 0 on success, or -1
|
||||
if an error occurs.
|
||||
|
||||
- Function: struct model *load_model(const char *model_file_name);
|
||||
|
||||
This function returns a pointer to the model read from the file,
|
||||
or a null pointer if the model could not be loaded.
|
||||
|
||||
- Function: void free_model_content(struct model *model_ptr);
|
||||
|
||||
This function frees the memory used by the entries in a model structure.
|
||||
|
||||
- Function: void free_and_destroy_model(struct model **model_ptr_ptr);
|
||||
|
||||
This function frees the memory used by a model and destroys the model
|
||||
structure.
|
||||
|
||||
- Function: void destroy_param(struct parameter *param);
|
||||
|
||||
This function frees the memory used by a parameter set.
|
||||
|
||||
- Function: void set_print_string_function(void (*print_func)(const char *));
|
||||
|
||||
Users can specify their output format by a function. Use
|
||||
set_print_string_function(NULL);
|
||||
for default printing to stdout.
|
||||
|
||||
Please note that this function is not thread-safe. When multiple threads load or
|
||||
use the same dynamic library (for example, liblinear.so.6), they actually share the
|
||||
same memory space of the dynamic library, which results in all threads modifying
|
||||
the same static function pointer, liblinear_print_string, in linear.cpp when they
|
||||
call this function.
|
||||
|
||||
For example, suppose we have threads A and B. They call this function sequentially
|
||||
and pass their own thread-local print_func into it. After that, they both call (*liblinear_print_string)(str)
|
||||
once. When the last thread finishes setting it (say B), liblinear_print_string
|
||||
is set to B.print_func. Now, if thread A wants to access liblinear_print_string,
|
||||
it is actually accessing B.print_func rather than A.print_func, which is incorrect
|
||||
since we expect to use the functionality of A.print_func.
|
||||
|
||||
Even if A.print_func and B.print_func have identical functionality, it is still risky.
|
||||
Suppose liblinear_print_string is now set to B.print_func, and B deletes B.print_func
|
||||
after finishing its work. Later, thread A calls liblinear_print_string, but the address
|
||||
points to, which is B.print_func, has already been deleted. This invalid memory access
|
||||
will crash the program. To mitigate this issue, in this example, you should ensure that
|
||||
A.print_func and B.print_func remain valid after threads finish their work. For example,
|
||||
in Python, you can assign them as global variables.
|
||||
|
||||
Building Windows Binaries
|
||||
=========================
|
||||
|
||||
Starting from version 2.48, we no longer provide pre-built Windows binaries,
|
||||
to build them via Visual C++, use the following steps:
|
||||
|
||||
1. Open a dos command box and change to liblinear directory. If
|
||||
environment variables of VC++ have not been set, type
|
||||
|
||||
"C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvars64.bat"
|
||||
|
||||
You may have to modify the above command according which version of
|
||||
VC++ or where it is installed.
|
||||
|
||||
2. Type
|
||||
|
||||
nmake -f Makefile.win clean all
|
||||
|
||||
3. (optional) To build shared library liblinear.dll, type
|
||||
|
||||
nmake -f Makefile.win lib
|
||||
|
||||
4. (Optional) To build 32-bit windows binaries, you must
|
||||
(1) Setup "C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvars32.bat" instead of vcvars64.bat
|
||||
(2) Change CFLAGS in Makefile.win: /D _WIN64 to /D _WIN32
|
||||
|
||||
MATLAB/OCTAVE Interface
|
||||
=======================
|
||||
|
||||
Please check the file README in the directory `matlab'.
|
||||
|
||||
Python Interface
|
||||
================
|
||||
|
||||
Please check the file README in the directory `python'.
|
||||
|
||||
Additional Information
|
||||
======================
|
||||
|
||||
If you find LIBLINEAR helpful, please cite it as
|
||||
|
||||
R.-E. Fan, K.-W. Chang, C.-J. Hsieh, X.-R. Wang, and C.-J. Lin.
|
||||
LIBLINEAR: A Library for Large Linear Classification, Journal of
|
||||
Machine Learning Research 9(2008), 1871-1874. Software available at
|
||||
http://www.csie.ntu.edu.tw/~cjlin/liblinear
|
||||
|
||||
For any questions and comments, please send your email to
|
||||
cjlin@csie.ntu.edu.tw
|
||||
|
||||
|
22
liblinear-2.49/blas/Makefile
Normal file
22
liblinear-2.49/blas/Makefile
Normal file
@@ -0,0 +1,22 @@
|
||||
AR ?= ar
|
||||
RANLIB ?= ranlib
|
||||
|
||||
HEADERS = blas.h blasp.h
|
||||
FILES = dnrm2.o daxpy.o ddot.o dscal.o
|
||||
|
||||
CFLAGS = $(OPTFLAGS)
|
||||
FFLAGS = $(OPTFLAGS)
|
||||
|
||||
blas: $(FILES) $(HEADERS)
|
||||
$(AR) rcv blas.a $(FILES)
|
||||
$(RANLIB) blas.a
|
||||
|
||||
clean:
|
||||
- rm -f *.o
|
||||
- rm -f *.a
|
||||
- rm -f *~
|
||||
|
||||
.c.o:
|
||||
$(CC) $(CFLAGS) -c $*.c
|
||||
|
||||
|
25
liblinear-2.49/blas/blas.h
Normal file
25
liblinear-2.49/blas/blas.h
Normal file
@@ -0,0 +1,25 @@
|
||||
/* blas.h -- C header file for BLAS Ver 1.0 */
|
||||
/* Jesse Bennett March 23, 2000 */
|
||||
|
||||
/** barf [ba:rf] 2. "He suggested using FORTRAN, and everybody barfed."
|
||||
|
||||
- From The Shogakukan DICTIONARY OF NEW ENGLISH (Second edition) */
|
||||
|
||||
#ifndef BLAS_INCLUDE
|
||||
#define BLAS_INCLUDE
|
||||
|
||||
/* Data types specific to BLAS implementation */
|
||||
typedef struct { float r, i; } fcomplex;
|
||||
typedef struct { double r, i; } dcomplex;
|
||||
typedef int blasbool;
|
||||
|
||||
#include "blasp.h" /* Prototypes for all BLAS functions */
|
||||
|
||||
#define FALSE 0
|
||||
#define TRUE 1
|
||||
|
||||
/* Macro functions */
|
||||
#define MIN(a,b) ((a) <= (b) ? (a) : (b))
|
||||
#define MAX(a,b) ((a) >= (b) ? (a) : (b))
|
||||
|
||||
#endif
|
438
liblinear-2.49/blas/blasp.h
Normal file
438
liblinear-2.49/blas/blasp.h
Normal file
@@ -0,0 +1,438 @@
|
||||
/* blasp.h -- C prototypes for BLAS Ver 1.0 */
|
||||
/* Jesse Bennett March 23, 2000 */
|
||||
|
||||
/* Functions listed in alphabetical order */
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifdef F2C_COMPAT
|
||||
|
||||
void cdotc_(fcomplex *dotval, int *n, fcomplex *cx, int *incx,
|
||||
fcomplex *cy, int *incy);
|
||||
|
||||
void cdotu_(fcomplex *dotval, int *n, fcomplex *cx, int *incx,
|
||||
fcomplex *cy, int *incy);
|
||||
|
||||
double sasum_(int *n, float *sx, int *incx);
|
||||
|
||||
double scasum_(int *n, fcomplex *cx, int *incx);
|
||||
|
||||
double scnrm2_(int *n, fcomplex *x, int *incx);
|
||||
|
||||
double sdot_(int *n, float *sx, int *incx, float *sy, int *incy);
|
||||
|
||||
double snrm2_(int *n, float *x, int *incx);
|
||||
|
||||
void zdotc_(dcomplex *dotval, int *n, dcomplex *cx, int *incx,
|
||||
dcomplex *cy, int *incy);
|
||||
|
||||
void zdotu_(dcomplex *dotval, int *n, dcomplex *cx, int *incx,
|
||||
dcomplex *cy, int *incy);
|
||||
|
||||
#else
|
||||
|
||||
fcomplex cdotc_(int *n, fcomplex *cx, int *incx, fcomplex *cy, int *incy);
|
||||
|
||||
fcomplex cdotu_(int *n, fcomplex *cx, int *incx, fcomplex *cy, int *incy);
|
||||
|
||||
float sasum_(int *n, float *sx, int *incx);
|
||||
|
||||
float scasum_(int *n, fcomplex *cx, int *incx);
|
||||
|
||||
float scnrm2_(int *n, fcomplex *x, int *incx);
|
||||
|
||||
float sdot_(int *n, float *sx, int *incx, float *sy, int *incy);
|
||||
|
||||
float snrm2_(int *n, float *x, int *incx);
|
||||
|
||||
dcomplex zdotc_(int *n, dcomplex *cx, int *incx, dcomplex *cy, int *incy);
|
||||
|
||||
dcomplex zdotu_(int *n, dcomplex *cx, int *incx, dcomplex *cy, int *incy);
|
||||
|
||||
#endif
|
||||
|
||||
/* Remaining functions listed in alphabetical order */
|
||||
|
||||
int caxpy_(int *n, fcomplex *ca, fcomplex *cx, int *incx, fcomplex *cy,
|
||||
int *incy);
|
||||
|
||||
int ccopy_(int *n, fcomplex *cx, int *incx, fcomplex *cy, int *incy);
|
||||
|
||||
int cgbmv_(char *trans, int *m, int *n, int *kl, int *ku,
|
||||
fcomplex *alpha, fcomplex *a, int *lda, fcomplex *x, int *incx,
|
||||
fcomplex *beta, fcomplex *y, int *incy);
|
||||
|
||||
int cgemm_(char *transa, char *transb, int *m, int *n, int *k,
|
||||
fcomplex *alpha, fcomplex *a, int *lda, fcomplex *b, int *ldb,
|
||||
fcomplex *beta, fcomplex *c, int *ldc);
|
||||
|
||||
int cgemv_(char *trans, int *m, int *n, fcomplex *alpha, fcomplex *a,
|
||||
int *lda, fcomplex *x, int *incx, fcomplex *beta, fcomplex *y,
|
||||
int *incy);
|
||||
|
||||
int cgerc_(int *m, int *n, fcomplex *alpha, fcomplex *x, int *incx,
|
||||
fcomplex *y, int *incy, fcomplex *a, int *lda);
|
||||
|
||||
int cgeru_(int *m, int *n, fcomplex *alpha, fcomplex *x, int *incx,
|
||||
fcomplex *y, int *incy, fcomplex *a, int *lda);
|
||||
|
||||
int chbmv_(char *uplo, int *n, int *k, fcomplex *alpha, fcomplex *a,
|
||||
int *lda, fcomplex *x, int *incx, fcomplex *beta, fcomplex *y,
|
||||
int *incy);
|
||||
|
||||
int chemm_(char *side, char *uplo, int *m, int *n, fcomplex *alpha,
|
||||
fcomplex *a, int *lda, fcomplex *b, int *ldb, fcomplex *beta,
|
||||
fcomplex *c, int *ldc);
|
||||
|
||||
int chemv_(char *uplo, int *n, fcomplex *alpha, fcomplex *a, int *lda,
|
||||
fcomplex *x, int *incx, fcomplex *beta, fcomplex *y, int *incy);
|
||||
|
||||
int cher_(char *uplo, int *n, float *alpha, fcomplex *x, int *incx,
|
||||
fcomplex *a, int *lda);
|
||||
|
||||
int cher2_(char *uplo, int *n, fcomplex *alpha, fcomplex *x, int *incx,
|
||||
fcomplex *y, int *incy, fcomplex *a, int *lda);
|
||||
|
||||
int cher2k_(char *uplo, char *trans, int *n, int *k, fcomplex *alpha,
|
||||
fcomplex *a, int *lda, fcomplex *b, int *ldb, float *beta,
|
||||
fcomplex *c, int *ldc);
|
||||
|
||||
int cherk_(char *uplo, char *trans, int *n, int *k, float *alpha,
|
||||
fcomplex *a, int *lda, float *beta, fcomplex *c, int *ldc);
|
||||
|
||||
int chpmv_(char *uplo, int *n, fcomplex *alpha, fcomplex *ap, fcomplex *x,
|
||||
int *incx, fcomplex *beta, fcomplex *y, int *incy);
|
||||
|
||||
int chpr_(char *uplo, int *n, float *alpha, fcomplex *x, int *incx,
|
||||
fcomplex *ap);
|
||||
|
||||
int chpr2_(char *uplo, int *n, fcomplex *alpha, fcomplex *x, int *incx,
|
||||
fcomplex *y, int *incy, fcomplex *ap);
|
||||
|
||||
int crotg_(fcomplex *ca, fcomplex *cb, float *c, fcomplex *s);
|
||||
|
||||
int cscal_(int *n, fcomplex *ca, fcomplex *cx, int *incx);
|
||||
|
||||
int csscal_(int *n, float *sa, fcomplex *cx, int *incx);
|
||||
|
||||
int cswap_(int *n, fcomplex *cx, int *incx, fcomplex *cy, int *incy);
|
||||
|
||||
int csymm_(char *side, char *uplo, int *m, int *n, fcomplex *alpha,
|
||||
fcomplex *a, int *lda, fcomplex *b, int *ldb, fcomplex *beta,
|
||||
fcomplex *c, int *ldc);
|
||||
|
||||
int csyr2k_(char *uplo, char *trans, int *n, int *k, fcomplex *alpha,
|
||||
fcomplex *a, int *lda, fcomplex *b, int *ldb, fcomplex *beta,
|
||||
fcomplex *c, int *ldc);
|
||||
|
||||
int csyrk_(char *uplo, char *trans, int *n, int *k, fcomplex *alpha,
|
||||
fcomplex *a, int *lda, fcomplex *beta, fcomplex *c, int *ldc);
|
||||
|
||||
int ctbmv_(char *uplo, char *trans, char *diag, int *n, int *k,
|
||||
fcomplex *a, int *lda, fcomplex *x, int *incx);
|
||||
|
||||
int ctbsv_(char *uplo, char *trans, char *diag, int *n, int *k,
|
||||
fcomplex *a, int *lda, fcomplex *x, int *incx);
|
||||
|
||||
int ctpmv_(char *uplo, char *trans, char *diag, int *n, fcomplex *ap,
|
||||
fcomplex *x, int *incx);
|
||||
|
||||
int ctpsv_(char *uplo, char *trans, char *diag, int *n, fcomplex *ap,
|
||||
fcomplex *x, int *incx);
|
||||
|
||||
int ctrmm_(char *side, char *uplo, char *transa, char *diag, int *m,
|
||||
int *n, fcomplex *alpha, fcomplex *a, int *lda, fcomplex *b,
|
||||
int *ldb);
|
||||
|
||||
int ctrmv_(char *uplo, char *trans, char *diag, int *n, fcomplex *a,
|
||||
int *lda, fcomplex *x, int *incx);
|
||||
|
||||
int ctrsm_(char *side, char *uplo, char *transa, char *diag, int *m,
|
||||
int *n, fcomplex *alpha, fcomplex *a, int *lda, fcomplex *b,
|
||||
int *ldb);
|
||||
|
||||
int ctrsv_(char *uplo, char *trans, char *diag, int *n, fcomplex *a,
|
||||
int *lda, fcomplex *x, int *incx);
|
||||
|
||||
int daxpy_(int *n, double *sa, double *sx, int *incx, double *sy,
|
||||
int *incy);
|
||||
|
||||
int dcopy_(int *n, double *sx, int *incx, double *sy, int *incy);
|
||||
|
||||
int dgbmv_(char *trans, int *m, int *n, int *kl, int *ku,
|
||||
double *alpha, double *a, int *lda, double *x, int *incx,
|
||||
double *beta, double *y, int *incy);
|
||||
|
||||
int dgemm_(char *transa, char *transb, int *m, int *n, int *k,
|
||||
double *alpha, double *a, int *lda, double *b, int *ldb,
|
||||
double *beta, double *c, int *ldc);
|
||||
|
||||
int dgemv_(char *trans, int *m, int *n, double *alpha, double *a,
|
||||
int *lda, double *x, int *incx, double *beta, double *y,
|
||||
int *incy);
|
||||
|
||||
int dger_(int *m, int *n, double *alpha, double *x, int *incx,
|
||||
double *y, int *incy, double *a, int *lda);
|
||||
|
||||
int drot_(int *n, double *sx, int *incx, double *sy, int *incy,
|
||||
double *c, double *s);
|
||||
|
||||
int drotg_(double *sa, double *sb, double *c, double *s);
|
||||
|
||||
int dsbmv_(char *uplo, int *n, int *k, double *alpha, double *a,
|
||||
int *lda, double *x, int *incx, double *beta, double *y,
|
||||
int *incy);
|
||||
|
||||
int dscal_(int *n, double *sa, double *sx, int *incx);
|
||||
|
||||
int dspmv_(char *uplo, int *n, double *alpha, double *ap, double *x,
|
||||
int *incx, double *beta, double *y, int *incy);
|
||||
|
||||
int dspr_(char *uplo, int *n, double *alpha, double *x, int *incx,
|
||||
double *ap);
|
||||
|
||||
int dspr2_(char *uplo, int *n, double *alpha, double *x, int *incx,
|
||||
double *y, int *incy, double *ap);
|
||||
|
||||
int dswap_(int *n, double *sx, int *incx, double *sy, int *incy);
|
||||
|
||||
int dsymm_(char *side, char *uplo, int *m, int *n, double *alpha,
|
||||
double *a, int *lda, double *b, int *ldb, double *beta,
|
||||
double *c, int *ldc);
|
||||
|
||||
int dsymv_(char *uplo, int *n, double *alpha, double *a, int *lda,
|
||||
double *x, int *incx, double *beta, double *y, int *incy);
|
||||
|
||||
int dsyr_(char *uplo, int *n, double *alpha, double *x, int *incx,
|
||||
double *a, int *lda);
|
||||
|
||||
int dsyr2_(char *uplo, int *n, double *alpha, double *x, int *incx,
|
||||
double *y, int *incy, double *a, int *lda);
|
||||
|
||||
int dsyr2k_(char *uplo, char *trans, int *n, int *k, double *alpha,
|
||||
double *a, int *lda, double *b, int *ldb, double *beta,
|
||||
double *c, int *ldc);
|
||||
|
||||
int dsyrk_(char *uplo, char *trans, int *n, int *k, double *alpha,
|
||||
double *a, int *lda, double *beta, double *c, int *ldc);
|
||||
|
||||
int dtbmv_(char *uplo, char *trans, char *diag, int *n, int *k,
|
||||
double *a, int *lda, double *x, int *incx);
|
||||
|
||||
int dtbsv_(char *uplo, char *trans, char *diag, int *n, int *k,
|
||||
double *a, int *lda, double *x, int *incx);
|
||||
|
||||
int dtpmv_(char *uplo, char *trans, char *diag, int *n, double *ap,
|
||||
double *x, int *incx);
|
||||
|
||||
int dtpsv_(char *uplo, char *trans, char *diag, int *n, double *ap,
|
||||
double *x, int *incx);
|
||||
|
||||
int dtrmm_(char *side, char *uplo, char *transa, char *diag, int *m,
|
||||
int *n, double *alpha, double *a, int *lda, double *b,
|
||||
int *ldb);
|
||||
|
||||
int dtrmv_(char *uplo, char *trans, char *diag, int *n, double *a,
|
||||
int *lda, double *x, int *incx);
|
||||
|
||||
int dtrsm_(char *side, char *uplo, char *transa, char *diag, int *m,
|
||||
int *n, double *alpha, double *a, int *lda, double *b,
|
||||
int *ldb);
|
||||
|
||||
int dtrsv_(char *uplo, char *trans, char *diag, int *n, double *a,
|
||||
int *lda, double *x, int *incx);
|
||||
|
||||
|
||||
int saxpy_(int *n, float *sa, float *sx, int *incx, float *sy, int *incy);
|
||||
|
||||
int scopy_(int *n, float *sx, int *incx, float *sy, int *incy);
|
||||
|
||||
int sgbmv_(char *trans, int *m, int *n, int *kl, int *ku,
|
||||
float *alpha, float *a, int *lda, float *x, int *incx,
|
||||
float *beta, float *y, int *incy);
|
||||
|
||||
int sgemm_(char *transa, char *transb, int *m, int *n, int *k,
|
||||
float *alpha, float *a, int *lda, float *b, int *ldb,
|
||||
float *beta, float *c, int *ldc);
|
||||
|
||||
int sgemv_(char *trans, int *m, int *n, float *alpha, float *a,
|
||||
int *lda, float *x, int *incx, float *beta, float *y,
|
||||
int *incy);
|
||||
|
||||
int sger_(int *m, int *n, float *alpha, float *x, int *incx,
|
||||
float *y, int *incy, float *a, int *lda);
|
||||
|
||||
int srot_(int *n, float *sx, int *incx, float *sy, int *incy,
|
||||
float *c, float *s);
|
||||
|
||||
int srotg_(float *sa, float *sb, float *c, float *s);
|
||||
|
||||
int ssbmv_(char *uplo, int *n, int *k, float *alpha, float *a,
|
||||
int *lda, float *x, int *incx, float *beta, float *y,
|
||||
int *incy);
|
||||
|
||||
int sscal_(int *n, float *sa, float *sx, int *incx);
|
||||
|
||||
int sspmv_(char *uplo, int *n, float *alpha, float *ap, float *x,
|
||||
int *incx, float *beta, float *y, int *incy);
|
||||
|
||||
int sspr_(char *uplo, int *n, float *alpha, float *x, int *incx,
|
||||
float *ap);
|
||||
|
||||
int sspr2_(char *uplo, int *n, float *alpha, float *x, int *incx,
|
||||
float *y, int *incy, float *ap);
|
||||
|
||||
int sswap_(int *n, float *sx, int *incx, float *sy, int *incy);
|
||||
|
||||
int ssymm_(char *side, char *uplo, int *m, int *n, float *alpha,
|
||||
float *a, int *lda, float *b, int *ldb, float *beta,
|
||||
float *c, int *ldc);
|
||||
|
||||
int ssymv_(char *uplo, int *n, float *alpha, float *a, int *lda,
|
||||
float *x, int *incx, float *beta, float *y, int *incy);
|
||||
|
||||
int ssyr_(char *uplo, int *n, float *alpha, float *x, int *incx,
|
||||
float *a, int *lda);
|
||||
|
||||
int ssyr2_(char *uplo, int *n, float *alpha, float *x, int *incx,
|
||||
float *y, int *incy, float *a, int *lda);
|
||||
|
||||
int ssyr2k_(char *uplo, char *trans, int *n, int *k, float *alpha,
|
||||
float *a, int *lda, float *b, int *ldb, float *beta,
|
||||
float *c, int *ldc);
|
||||
|
||||
int ssyrk_(char *uplo, char *trans, int *n, int *k, float *alpha,
|
||||
float *a, int *lda, float *beta, float *c, int *ldc);
|
||||
|
||||
int stbmv_(char *uplo, char *trans, char *diag, int *n, int *k,
|
||||
float *a, int *lda, float *x, int *incx);
|
||||
|
||||
int stbsv_(char *uplo, char *trans, char *diag, int *n, int *k,
|
||||
float *a, int *lda, float *x, int *incx);
|
||||
|
||||
int stpmv_(char *uplo, char *trans, char *diag, int *n, float *ap,
|
||||
float *x, int *incx);
|
||||
|
||||
int stpsv_(char *uplo, char *trans, char *diag, int *n, float *ap,
|
||||
float *x, int *incx);
|
||||
|
||||
int strmm_(char *side, char *uplo, char *transa, char *diag, int *m,
|
||||
int *n, float *alpha, float *a, int *lda, float *b,
|
||||
int *ldb);
|
||||
|
||||
int strmv_(char *uplo, char *trans, char *diag, int *n, float *a,
|
||||
int *lda, float *x, int *incx);
|
||||
|
||||
int strsm_(char *side, char *uplo, char *transa, char *diag, int *m,
|
||||
int *n, float *alpha, float *a, int *lda, float *b,
|
||||
int *ldb);
|
||||
|
||||
int strsv_(char *uplo, char *trans, char *diag, int *n, float *a,
|
||||
int *lda, float *x, int *incx);
|
||||
|
||||
int zaxpy_(int *n, dcomplex *ca, dcomplex *cx, int *incx, dcomplex *cy,
|
||||
int *incy);
|
||||
|
||||
int zcopy_(int *n, dcomplex *cx, int *incx, dcomplex *cy, int *incy);
|
||||
|
||||
int zdscal_(int *n, double *sa, dcomplex *cx, int *incx);
|
||||
|
||||
int zgbmv_(char *trans, int *m, int *n, int *kl, int *ku,
|
||||
dcomplex *alpha, dcomplex *a, int *lda, dcomplex *x, int *incx,
|
||||
dcomplex *beta, dcomplex *y, int *incy);
|
||||
|
||||
int zgemm_(char *transa, char *transb, int *m, int *n, int *k,
|
||||
dcomplex *alpha, dcomplex *a, int *lda, dcomplex *b, int *ldb,
|
||||
dcomplex *beta, dcomplex *c, int *ldc);
|
||||
|
||||
int zgemv_(char *trans, int *m, int *n, dcomplex *alpha, dcomplex *a,
|
||||
int *lda, dcomplex *x, int *incx, dcomplex *beta, dcomplex *y,
|
||||
int *incy);
|
||||
|
||||
int zgerc_(int *m, int *n, dcomplex *alpha, dcomplex *x, int *incx,
|
||||
dcomplex *y, int *incy, dcomplex *a, int *lda);
|
||||
|
||||
int zgeru_(int *m, int *n, dcomplex *alpha, dcomplex *x, int *incx,
|
||||
dcomplex *y, int *incy, dcomplex *a, int *lda);
|
||||
|
||||
int zhbmv_(char *uplo, int *n, int *k, dcomplex *alpha, dcomplex *a,
|
||||
int *lda, dcomplex *x, int *incx, dcomplex *beta, dcomplex *y,
|
||||
int *incy);
|
||||
|
||||
int zhemm_(char *side, char *uplo, int *m, int *n, dcomplex *alpha,
|
||||
dcomplex *a, int *lda, dcomplex *b, int *ldb, dcomplex *beta,
|
||||
dcomplex *c, int *ldc);
|
||||
|
||||
int zhemv_(char *uplo, int *n, dcomplex *alpha, dcomplex *a, int *lda,
|
||||
dcomplex *x, int *incx, dcomplex *beta, dcomplex *y, int *incy);
|
||||
|
||||
int zher_(char *uplo, int *n, double *alpha, dcomplex *x, int *incx,
|
||||
dcomplex *a, int *lda);
|
||||
|
||||
int zher2_(char *uplo, int *n, dcomplex *alpha, dcomplex *x, int *incx,
|
||||
dcomplex *y, int *incy, dcomplex *a, int *lda);
|
||||
|
||||
int zher2k_(char *uplo, char *trans, int *n, int *k, dcomplex *alpha,
|
||||
dcomplex *a, int *lda, dcomplex *b, int *ldb, double *beta,
|
||||
dcomplex *c, int *ldc);
|
||||
|
||||
int zherk_(char *uplo, char *trans, int *n, int *k, double *alpha,
|
||||
dcomplex *a, int *lda, double *beta, dcomplex *c, int *ldc);
|
||||
|
||||
int zhpmv_(char *uplo, int *n, dcomplex *alpha, dcomplex *ap, dcomplex *x,
|
||||
int *incx, dcomplex *beta, dcomplex *y, int *incy);
|
||||
|
||||
int zhpr_(char *uplo, int *n, double *alpha, dcomplex *x, int *incx,
|
||||
dcomplex *ap);
|
||||
|
||||
int zhpr2_(char *uplo, int *n, dcomplex *alpha, dcomplex *x, int *incx,
|
||||
dcomplex *y, int *incy, dcomplex *ap);
|
||||
|
||||
int zrotg_(dcomplex *ca, dcomplex *cb, double *c, dcomplex *s);
|
||||
|
||||
int zscal_(int *n, dcomplex *ca, dcomplex *cx, int *incx);
|
||||
|
||||
int zswap_(int *n, dcomplex *cx, int *incx, dcomplex *cy, int *incy);
|
||||
|
||||
int zsymm_(char *side, char *uplo, int *m, int *n, dcomplex *alpha,
|
||||
dcomplex *a, int *lda, dcomplex *b, int *ldb, dcomplex *beta,
|
||||
dcomplex *c, int *ldc);
|
||||
|
||||
int zsyr2k_(char *uplo, char *trans, int *n, int *k, dcomplex *alpha,
|
||||
dcomplex *a, int *lda, dcomplex *b, int *ldb, dcomplex *beta,
|
||||
dcomplex *c, int *ldc);
|
||||
|
||||
int zsyrk_(char *uplo, char *trans, int *n, int *k, dcomplex *alpha,
|
||||
dcomplex *a, int *lda, dcomplex *beta, dcomplex *c, int *ldc);
|
||||
|
||||
int ztbmv_(char *uplo, char *trans, char *diag, int *n, int *k,
|
||||
dcomplex *a, int *lda, dcomplex *x, int *incx);
|
||||
|
||||
int ztbsv_(char *uplo, char *trans, char *diag, int *n, int *k,
|
||||
dcomplex *a, int *lda, dcomplex *x, int *incx);
|
||||
|
||||
int ztpmv_(char *uplo, char *trans, char *diag, int *n, dcomplex *ap,
|
||||
dcomplex *x, int *incx);
|
||||
|
||||
int ztpsv_(char *uplo, char *trans, char *diag, int *n, dcomplex *ap,
|
||||
dcomplex *x, int *incx);
|
||||
|
||||
int ztrmm_(char *side, char *uplo, char *transa, char *diag, int *m,
|
||||
int *n, dcomplex *alpha, dcomplex *a, int *lda, dcomplex *b,
|
||||
int *ldb);
|
||||
|
||||
int ztrmv_(char *uplo, char *trans, char *diag, int *n, dcomplex *a,
|
||||
int *lda, dcomplex *x, int *incx);
|
||||
|
||||
int ztrsm_(char *side, char *uplo, char *transa, char *diag, int *m,
|
||||
int *n, dcomplex *alpha, dcomplex *a, int *lda, dcomplex *b,
|
||||
int *ldb);
|
||||
|
||||
int ztrsv_(char *uplo, char *trans, char *diag, int *n, dcomplex *a,
|
||||
int *lda, dcomplex *x, int *incx);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
57
liblinear-2.49/blas/daxpy.c
Normal file
57
liblinear-2.49/blas/daxpy.c
Normal file
@@ -0,0 +1,57 @@
|
||||
#include "blas.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
int daxpy_(int *n, double *sa, double *sx, int *incx, double *sy,
|
||||
int *incy)
|
||||
{
|
||||
long int i, m, ix, iy, nn, iincx, iincy;
|
||||
register double ssa;
|
||||
|
||||
/* constant times a vector plus a vector.
|
||||
uses unrolled loop for increments equal to one.
|
||||
jack dongarra, linpack, 3/11/78.
|
||||
modified 12/3/93, array(1) declarations changed to array(*) */
|
||||
|
||||
/* Dereference inputs */
|
||||
nn = *n;
|
||||
ssa = *sa;
|
||||
iincx = *incx;
|
||||
iincy = *incy;
|
||||
|
||||
if( nn > 0 && ssa != 0.0 )
|
||||
{
|
||||
if (iincx == 1 && iincy == 1) /* code for both increments equal to 1 */
|
||||
{
|
||||
m = nn-3;
|
||||
for (i = 0; i < m; i += 4)
|
||||
{
|
||||
sy[i] += ssa * sx[i];
|
||||
sy[i+1] += ssa * sx[i+1];
|
||||
sy[i+2] += ssa * sx[i+2];
|
||||
sy[i+3] += ssa * sx[i+3];
|
||||
}
|
||||
for ( ; i < nn; ++i) /* clean-up loop */
|
||||
sy[i] += ssa * sx[i];
|
||||
}
|
||||
else /* code for unequal increments or equal increments not equal to 1 */
|
||||
{
|
||||
ix = iincx >= 0 ? 0 : (1 - nn) * iincx;
|
||||
iy = iincy >= 0 ? 0 : (1 - nn) * iincy;
|
||||
for (i = 0; i < nn; i++)
|
||||
{
|
||||
sy[iy] += ssa * sx[ix];
|
||||
ix += iincx;
|
||||
iy += iincy;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
} /* daxpy_ */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
58
liblinear-2.49/blas/ddot.c
Normal file
58
liblinear-2.49/blas/ddot.c
Normal file
@@ -0,0 +1,58 @@
|
||||
#include "blas.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
double ddot_(int *n, double *sx, int *incx, double *sy, int *incy)
|
||||
{
|
||||
long int i, m, nn, iincx, iincy;
|
||||
double stemp;
|
||||
long int ix, iy;
|
||||
|
||||
/* forms the dot product of two vectors.
|
||||
uses unrolled loops for increments equal to one.
|
||||
jack dongarra, linpack, 3/11/78.
|
||||
modified 12/3/93, array(1) declarations changed to array(*) */
|
||||
|
||||
/* Dereference inputs */
|
||||
nn = *n;
|
||||
iincx = *incx;
|
||||
iincy = *incy;
|
||||
|
||||
stemp = 0.0;
|
||||
if (nn > 0)
|
||||
{
|
||||
if (iincx == 1 && iincy == 1) /* code for both increments equal to 1 */
|
||||
{
|
||||
m = nn-4;
|
||||
for (i = 0; i < m; i += 5)
|
||||
stemp += sx[i] * sy[i] + sx[i+1] * sy[i+1] + sx[i+2] * sy[i+2] +
|
||||
sx[i+3] * sy[i+3] + sx[i+4] * sy[i+4];
|
||||
|
||||
for ( ; i < nn; i++) /* clean-up loop */
|
||||
stemp += sx[i] * sy[i];
|
||||
}
|
||||
else /* code for unequal increments or equal increments not equal to 1 */
|
||||
{
|
||||
ix = 0;
|
||||
iy = 0;
|
||||
if (iincx < 0)
|
||||
ix = (1 - nn) * iincx;
|
||||
if (iincy < 0)
|
||||
iy = (1 - nn) * iincy;
|
||||
for (i = 0; i < nn; i++)
|
||||
{
|
||||
stemp += sx[ix] * sy[iy];
|
||||
ix += iincx;
|
||||
iy += iincy;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return stemp;
|
||||
} /* ddot_ */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
70
liblinear-2.49/blas/dnrm2.c
Normal file
70
liblinear-2.49/blas/dnrm2.c
Normal file
@@ -0,0 +1,70 @@
|
||||
#include <math.h> /* Needed for fabs() and sqrt() */
|
||||
#include "blas.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
double dnrm2_(int *n, double *x, int *incx)
|
||||
{
|
||||
long int ix, nn, iincx;
|
||||
double norm, scale, absxi, ssq, temp;
|
||||
|
||||
/* DNRM2 returns the euclidean norm of a vector via the function
|
||||
name, so that
|
||||
|
||||
DNRM2 := sqrt( x'*x )
|
||||
|
||||
-- This version written on 25-October-1982.
|
||||
Modified on 14-October-1993 to inline the call to SLASSQ.
|
||||
Sven Hammarling, Nag Ltd. */
|
||||
|
||||
/* Dereference inputs */
|
||||
nn = *n;
|
||||
iincx = *incx;
|
||||
|
||||
if( nn > 0 && iincx > 0 )
|
||||
{
|
||||
if (nn == 1)
|
||||
{
|
||||
norm = fabs(x[0]);
|
||||
}
|
||||
else
|
||||
{
|
||||
scale = 0.0;
|
||||
ssq = 1.0;
|
||||
|
||||
/* The following loop is equivalent to this call to the LAPACK
|
||||
auxiliary routine: CALL SLASSQ( N, X, INCX, SCALE, SSQ ) */
|
||||
|
||||
for (ix=(nn-1)*iincx; ix>=0; ix-=iincx)
|
||||
{
|
||||
if (x[ix] != 0.0)
|
||||
{
|
||||
absxi = fabs(x[ix]);
|
||||
if (scale < absxi)
|
||||
{
|
||||
temp = scale / absxi;
|
||||
ssq = ssq * (temp * temp) + 1.0;
|
||||
scale = absxi;
|
||||
}
|
||||
else
|
||||
{
|
||||
temp = absxi / scale;
|
||||
ssq += temp * temp;
|
||||
}
|
||||
}
|
||||
}
|
||||
norm = scale * sqrt(ssq);
|
||||
}
|
||||
}
|
||||
else
|
||||
norm = 0.0;
|
||||
|
||||
return norm;
|
||||
|
||||
} /* dnrm2_ */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
52
liblinear-2.49/blas/dscal.c
Normal file
52
liblinear-2.49/blas/dscal.c
Normal file
@@ -0,0 +1,52 @@
|
||||
#include "blas.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
int dscal_(int *n, double *sa, double *sx, int *incx)
|
||||
{
|
||||
long int i, m, nincx, nn, iincx;
|
||||
double ssa;
|
||||
|
||||
/* scales a vector by a constant.
|
||||
uses unrolled loops for increment equal to 1.
|
||||
jack dongarra, linpack, 3/11/78.
|
||||
modified 3/93 to return if incx .le. 0.
|
||||
modified 12/3/93, array(1) declarations changed to array(*) */
|
||||
|
||||
/* Dereference inputs */
|
||||
nn = *n;
|
||||
iincx = *incx;
|
||||
ssa = *sa;
|
||||
|
||||
if (nn > 0 && iincx > 0)
|
||||
{
|
||||
if (iincx == 1) /* code for increment equal to 1 */
|
||||
{
|
||||
m = nn-4;
|
||||
for (i = 0; i < m; i += 5)
|
||||
{
|
||||
sx[i] = ssa * sx[i];
|
||||
sx[i+1] = ssa * sx[i+1];
|
||||
sx[i+2] = ssa * sx[i+2];
|
||||
sx[i+3] = ssa * sx[i+3];
|
||||
sx[i+4] = ssa * sx[i+4];
|
||||
}
|
||||
for ( ; i < nn; ++i) /* clean-up loop */
|
||||
sx[i] = ssa * sx[i];
|
||||
}
|
||||
else /* code for increment not equal to 1 */
|
||||
{
|
||||
nincx = nn * iincx;
|
||||
for (i = 0; i < nincx; i += iincx)
|
||||
sx[i] = ssa * sx[i];
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
} /* dscal_ */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
270
liblinear-2.49/heart_scale
Normal file
270
liblinear-2.49/heart_scale
Normal file
@@ -0,0 +1,270 @@
|
||||
+1 1:0.708333 2:1 3:1 4:-0.320755 5:-0.105023 6:-1 7:1 8:-0.419847 9:-1 10:-0.225806 12:1 13:-1
|
||||
-1 1:0.583333 2:-1 3:0.333333 4:-0.603774 5:1 6:-1 7:1 8:0.358779 9:-1 10:-0.483871 12:-1 13:1
|
||||
+1 1:0.166667 2:1 3:-0.333333 4:-0.433962 5:-0.383562 6:-1 7:-1 8:0.0687023 9:-1 10:-0.903226 11:-1 12:-1 13:1
|
||||
-1 1:0.458333 2:1 3:1 4:-0.358491 5:-0.374429 6:-1 7:-1 8:-0.480916 9:1 10:-0.935484 12:-0.333333 13:1
|
||||
-1 1:0.875 2:-1 3:-0.333333 4:-0.509434 5:-0.347032 6:-1 7:1 8:-0.236641 9:1 10:-0.935484 11:-1 12:-0.333333 13:-1
|
||||
-1 1:0.5 2:1 3:1 4:-0.509434 5:-0.767123 6:-1 7:-1 8:0.0534351 9:-1 10:-0.870968 11:-1 12:-1 13:1
|
||||
+1 1:0.125 2:1 3:0.333333 4:-0.320755 5:-0.406393 6:1 7:1 8:0.0839695 9:1 10:-0.806452 12:-0.333333 13:0.5
|
||||
+1 1:0.25 2:1 3:1 4:-0.698113 5:-0.484018 6:-1 7:1 8:0.0839695 9:1 10:-0.612903 12:-0.333333 13:1
|
||||
+1 1:0.291667 2:1 3:1 4:-0.132075 5:-0.237443 6:-1 7:1 8:0.51145 9:-1 10:-0.612903 12:0.333333 13:1
|
||||
+1 1:0.416667 2:-1 3:1 4:0.0566038 5:0.283105 6:-1 7:1 8:0.267176 9:-1 10:0.290323 12:1 13:1
|
||||
-1 1:0.25 2:1 3:1 4:-0.226415 5:-0.506849 6:-1 7:-1 8:0.374046 9:-1 10:-0.83871 12:-1 13:1
|
||||
-1 2:1 3:1 4:-0.0943396 5:-0.543379 6:-1 7:1 8:-0.389313 9:1 10:-1 11:-1 12:-1 13:1
|
||||
-1 1:-0.375 2:1 3:0.333333 4:-0.132075 5:-0.502283 6:-1 7:1 8:0.664122 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
+1 1:0.333333 2:1 3:-1 4:-0.245283 5:-0.506849 6:-1 7:-1 8:0.129771 9:-1 10:-0.16129 12:0.333333 13:-1
|
||||
-1 1:0.166667 2:-1 3:1 4:-0.358491 5:-0.191781 6:-1 7:1 8:0.343511 9:-1 10:-1 11:-1 12:-0.333333 13:-1
|
||||
-1 1:0.75 2:-1 3:1 4:-0.660377 5:-0.894977 6:-1 7:-1 8:-0.175573 9:-1 10:-0.483871 12:-1 13:-1
|
||||
+1 1:-0.291667 2:1 3:1 4:-0.132075 5:-0.155251 6:-1 7:-1 8:-0.251908 9:1 10:-0.419355 12:0.333333 13:1
|
||||
+1 2:1 3:1 4:-0.132075 5:-0.648402 6:1 7:1 8:0.282443 9:1 11:1 12:-1 13:1
|
||||
-1 1:0.458333 2:1 3:-1 4:-0.698113 5:-0.611872 6:-1 7:1 8:0.114504 9:1 10:-0.419355 12:-1 13:-1
|
||||
-1 1:-0.541667 2:1 3:-1 4:-0.132075 5:-0.666667 6:-1 7:-1 8:0.633588 9:1 10:-0.548387 11:-1 12:-1 13:1
|
||||
+1 1:0.583333 2:1 3:1 4:-0.509434 5:-0.52968 6:-1 7:1 8:-0.114504 9:1 10:-0.16129 12:0.333333 13:1
|
||||
-1 1:-0.208333 2:1 3:-0.333333 4:-0.320755 5:-0.456621 6:-1 7:1 8:0.664122 9:-1 10:-0.935484 12:-1 13:-1
|
||||
-1 1:-0.416667 2:1 3:1 4:-0.603774 5:-0.191781 6:-1 7:-1 8:0.679389 9:-1 10:-0.612903 12:-1 13:-1
|
||||
-1 1:-0.25 2:1 3:1 4:-0.660377 5:-0.643836 6:-1 7:-1 8:0.0992366 9:-1 10:-0.967742 11:-1 12:-1 13:-1
|
||||
-1 1:0.0416667 2:-1 3:-0.333333 4:-0.283019 5:-0.260274 6:1 7:1 8:0.343511 9:1 10:-1 11:-1 12:-0.333333 13:-1
|
||||
-1 1:-0.208333 2:-1 3:0.333333 4:-0.320755 5:-0.319635 6:-1 7:-1 8:0.0381679 9:-1 10:-0.935484 11:-1 12:-1 13:-1
|
||||
-1 1:-0.291667 2:-1 3:1 4:-0.169811 5:-0.465753 6:-1 7:1 8:0.236641 9:1 10:-1 12:-1 13:-1
|
||||
-1 1:-0.0833333 2:-1 3:0.333333 4:-0.509434 5:-0.228311 6:-1 7:1 8:0.312977 9:-1 10:-0.806452 11:-1 12:-1 13:-1
|
||||
+1 1:0.208333 2:1 3:0.333333 4:-0.660377 5:-0.525114 6:-1 7:1 8:0.435115 9:-1 10:-0.193548 12:-0.333333 13:1
|
||||
-1 1:0.75 2:-1 3:0.333333 4:-0.698113 5:-0.365297 6:1 7:1 8:-0.0992366 9:-1 10:-1 11:-1 12:-0.333333 13:-1
|
||||
+1 1:0.166667 2:1 3:0.333333 4:-0.358491 5:-0.52968 6:-1 7:1 8:0.206107 9:-1 10:-0.870968 12:-0.333333 13:1
|
||||
-1 1:0.541667 2:1 3:1 4:0.245283 5:-0.534247 6:-1 7:1 8:0.0229008 9:-1 10:-0.258065 11:-1 12:-1 13:0.5
|
||||
-1 1:-0.666667 2:-1 3:0.333333 4:-0.509434 5:-0.593607 6:-1 7:-1 8:0.51145 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
+1 1:0.25 2:1 3:1 4:0.433962 5:-0.086758 6:-1 7:1 8:0.0534351 9:1 10:0.0967742 11:1 12:-1 13:1
|
||||
+1 1:-0.125 2:1 3:1 4:-0.0566038 5:-0.6621 6:-1 7:1 8:-0.160305 9:1 10:-0.709677 12:-1 13:1
|
||||
+1 1:-0.208333 2:1 3:1 4:-0.320755 5:-0.406393 6:1 7:1 8:0.206107 9:1 10:-1 11:-1 12:0.333333 13:1
|
||||
+1 1:0.333333 2:1 3:1 4:-0.132075 5:-0.630137 6:-1 7:1 8:0.0229008 9:1 10:-0.387097 11:-1 12:-0.333333 13:1
|
||||
+1 1:0.25 2:1 3:-1 4:0.245283 5:-0.328767 6:-1 7:1 8:-0.175573 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
-1 1:-0.458333 2:1 3:0.333333 4:-0.320755 5:-0.753425 6:-1 7:-1 8:0.206107 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
-1 1:-0.208333 2:1 3:1 4:-0.471698 5:-0.561644 6:-1 7:1 8:0.755725 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
+1 1:-0.541667 2:1 3:1 4:0.0943396 5:-0.557078 6:-1 7:-1 8:0.679389 9:-1 10:-1 11:-1 12:-1 13:1
|
||||
-1 1:0.375 2:-1 3:1 4:-0.433962 5:-0.621005 6:-1 7:-1 8:0.40458 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
-1 1:-0.375 2:1 3:0.333333 4:-0.320755 5:-0.511416 6:-1 7:-1 8:0.648855 9:1 10:-0.870968 11:-1 12:-1 13:-1
|
||||
-1 1:-0.291667 2:1 3:-0.333333 4:-0.867925 5:-0.675799 6:1 7:-1 8:0.29771 9:-1 10:-1 11:-1 12:-1 13:1
|
||||
+1 1:0.25 2:1 3:0.333333 4:-0.396226 5:-0.579909 6:1 7:-1 8:-0.0381679 9:-1 10:-0.290323 12:-0.333333 13:0.5
|
||||
-1 1:0.208333 2:1 3:0.333333 4:-0.132075 5:-0.611872 6:1 7:1 8:0.435115 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
+1 1:-0.166667 2:1 3:0.333333 4:-0.54717 5:-0.894977 6:-1 7:1 8:-0.160305 9:-1 10:-0.741935 11:-1 12:1 13:-1
|
||||
+1 1:-0.375 2:1 3:1 4:-0.698113 5:-0.675799 6:-1 7:1 8:0.618321 9:-1 10:-1 11:-1 12:-0.333333 13:-1
|
||||
+1 1:0.541667 2:1 3:-0.333333 4:0.245283 5:-0.452055 6:-1 7:-1 8:-0.251908 9:1 10:-1 12:1 13:0.5
|
||||
+1 1:0.5 2:-1 3:1 4:0.0566038 5:-0.547945 6:-1 7:1 8:-0.343511 9:-1 10:-0.677419 12:1 13:1
|
||||
+1 1:-0.458333 2:1 3:1 4:-0.207547 5:-0.136986 6:-1 7:-1 8:-0.175573 9:1 10:-0.419355 12:-1 13:0.5
|
||||
-1 1:-0.0416667 2:1 3:-0.333333 4:-0.358491 5:-0.639269 6:1 7:-1 8:0.725191 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
-1 1:0.5 2:-1 3:0.333333 4:-0.132075 5:0.328767 6:1 7:1 8:0.312977 9:-1 10:-0.741935 11:-1 12:-0.333333 13:-1
|
||||
-1 1:0.416667 2:-1 3:-0.333333 4:-0.132075 5:-0.684932 6:-1 7:-1 8:0.648855 9:-1 10:-1 11:-1 12:0.333333 13:-1
|
||||
-1 1:-0.333333 2:-1 3:-0.333333 4:-0.320755 5:-0.506849 6:-1 7:1 8:0.587786 9:-1 10:-0.806452 12:-1 13:-1
|
||||
-1 1:-0.5 2:-1 3:-0.333333 4:-0.792453 5:-0.671233 6:-1 7:-1 8:0.480916 9:-1 10:-1 11:-1 12:-0.333333 13:-1
|
||||
+1 1:0.333333 2:1 3:1 4:-0.169811 5:-0.817352 6:-1 7:1 8:-0.175573 9:1 10:0.16129 12:-0.333333 13:-1
|
||||
-1 1:0.291667 2:-1 3:0.333333 4:-0.509434 5:-0.762557 6:1 7:-1 8:-0.618321 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
+1 1:0.25 2:-1 3:1 4:0.509434 5:-0.438356 6:-1 7:-1 8:0.0992366 9:1 10:-1 12:-1 13:-1
|
||||
+1 1:0.375 2:1 3:-0.333333 4:-0.509434 5:-0.292237 6:-1 7:1 8:-0.51145 9:-1 10:-0.548387 12:-0.333333 13:1
|
||||
-1 1:0.166667 2:1 3:0.333333 4:0.0566038 5:-1 6:1 7:-1 8:0.557252 9:-1 10:-0.935484 11:-1 12:-0.333333 13:1
|
||||
+1 1:-0.0833333 2:-1 3:1 4:-0.320755 5:-0.182648 6:-1 7:-1 8:0.0839695 9:1 10:-0.612903 12:-1 13:1
|
||||
-1 1:-0.375 2:1 3:0.333333 4:-0.509434 5:-0.543379 6:-1 7:-1 8:0.496183 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
-1 1:0.291667 2:-1 3:-1 4:0.0566038 5:-0.479452 6:-1 7:-1 8:0.526718 9:-1 10:-0.709677 11:-1 12:-1 13:-1
|
||||
-1 1:0.416667 2:1 3:-1 4:-0.0377358 5:-0.511416 6:1 7:1 8:0.206107 9:-1 10:-0.258065 11:1 12:-1 13:0.5
|
||||
+1 1:0.166667 2:1 3:1 4:0.0566038 5:-0.315068 6:-1 7:1 8:-0.374046 9:1 10:-0.806452 12:-0.333333 13:0.5
|
||||
-1 1:-0.0833333 2:1 3:1 4:-0.132075 5:-0.383562 6:-1 7:1 8:0.755725 9:1 10:-1 11:-1 12:-1 13:-1
|
||||
+1 1:0.208333 2:-1 3:-0.333333 4:-0.207547 5:-0.118721 6:1 7:1 8:0.236641 9:-1 10:-1 11:-1 12:0.333333 13:-1
|
||||
-1 1:-0.375 2:-1 3:0.333333 4:-0.54717 5:-0.47032 6:-1 7:-1 8:0.19084 9:-1 10:-0.903226 12:-0.333333 13:-1
|
||||
+1 1:-0.25 2:1 3:0.333333 4:-0.735849 5:-0.465753 6:-1 7:-1 8:0.236641 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
+1 1:0.333333 2:1 3:1 4:-0.509434 5:-0.388128 6:-1 7:-1 8:0.0534351 9:1 10:0.16129 12:-0.333333 13:1
|
||||
-1 1:0.166667 2:-1 3:1 4:-0.509434 5:0.0410959 6:-1 7:-1 8:0.40458 9:1 10:-0.806452 11:-1 12:-1 13:-1
|
||||
-1 1:0.708333 2:1 3:-0.333333 4:0.169811 5:-0.456621 6:-1 7:1 8:0.0992366 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
-1 1:0.958333 2:-1 3:0.333333 4:-0.132075 5:-0.675799 6:-1 8:-0.312977 9:-1 10:-0.645161 12:-1 13:-1
|
||||
-1 1:0.583333 2:-1 3:1 4:-0.773585 5:-0.557078 6:-1 7:-1 8:0.0839695 9:-1 10:-0.903226 11:-1 12:0.333333 13:-1
|
||||
+1 1:-0.333333 2:1 3:1 4:-0.0943396 5:-0.164384 6:-1 7:1 8:0.160305 9:1 10:-1 12:1 13:1
|
||||
-1 1:-0.333333 2:1 3:1 4:-0.811321 5:-0.625571 6:-1 7:1 8:0.175573 9:1 10:-0.0322581 12:-1 13:-1
|
||||
-1 1:-0.583333 2:-1 3:0.333333 4:-1 5:-0.666667 6:-1 7:-1 8:0.648855 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
-1 1:-0.458333 2:-1 3:0.333333 4:-0.509434 5:-0.621005 6:-1 7:-1 8:0.557252 9:-1 10:-1 12:-1 13:-1
|
||||
-1 1:0.125 2:1 3:-0.333333 4:-0.509434 5:-0.497717 6:-1 7:-1 8:0.633588 9:-1 10:-0.741935 11:-1 12:-1 13:-1
|
||||
+1 1:0.208333 2:1 3:1 4:-0.0188679 5:-0.579909 6:-1 7:-1 8:-0.480916 9:-1 10:-0.354839 12:-0.333333 13:1
|
||||
+1 1:-0.75 2:1 3:1 4:-0.509434 5:-0.671233 6:-1 7:-1 8:-0.0992366 9:1 10:-0.483871 12:-1 13:1
|
||||
+1 1:0.208333 2:1 3:1 4:0.0566038 5:-0.342466 6:-1 7:1 8:-0.389313 9:1 10:-0.741935 11:-1 12:-1 13:1
|
||||
-1 1:-0.5 2:1 3:0.333333 4:-0.320755 5:-0.598174 6:-1 7:1 8:0.480916 9:-1 10:-0.354839 12:-1 13:-1
|
||||
-1 1:0.166667 2:1 3:1 4:-0.698113 5:-0.657534 6:-1 7:-1 8:-0.160305 9:1 10:-0.516129 12:-1 13:0.5
|
||||
-1 1:-0.458333 2:1 3:-1 4:0.0188679 5:-0.461187 6:-1 7:1 8:0.633588 9:-1 10:-0.741935 11:-1 12:0.333333 13:-1
|
||||
-1 1:0.375 2:1 3:-0.333333 4:-0.358491 5:-0.625571 6:1 7:1 8:0.0534351 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
-1 1:0.25 2:1 3:-1 4:0.584906 5:-0.342466 6:-1 7:1 8:0.129771 9:-1 10:0.354839 11:1 12:-1 13:1
|
||||
-1 1:-0.5 2:-1 3:-0.333333 4:-0.396226 5:-0.178082 6:-1 7:-1 8:0.40458 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
+1 1:-0.125 2:1 3:1 4:0.0566038 5:-0.465753 6:-1 7:1 8:-0.129771 9:-1 10:-0.16129 12:-1 13:1
|
||||
-1 1:0.25 2:1 3:-0.333333 4:-0.132075 5:-0.56621 6:-1 7:-1 8:0.419847 9:1 10:-1 11:-1 12:-1 13:-1
|
||||
+1 1:0.333333 2:-1 3:1 4:-0.320755 5:-0.0684932 6:-1 7:1 8:0.496183 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
+1 1:0.0416667 2:1 3:1 4:-0.433962 5:-0.360731 6:-1 7:1 8:-0.419847 9:1 10:-0.290323 12:-0.333333 13:1
|
||||
+1 1:0.0416667 2:1 3:1 4:-0.698113 5:-0.634703 6:-1 7:1 8:-0.435115 9:1 10:-1 12:-0.333333 13:-1
|
||||
+1 1:-0.0416667 2:1 3:1 4:-0.415094 5:-0.607306 6:-1 7:-1 8:0.480916 9:-1 10:-0.677419 11:-1 12:0.333333 13:1
|
||||
+1 1:-0.25 2:1 3:1 4:-0.698113 5:-0.319635 6:-1 7:1 8:-0.282443 9:1 10:-0.677419 12:-0.333333 13:-1
|
||||
-1 1:0.541667 2:1 3:1 4:-0.509434 5:-0.196347 6:-1 7:1 8:0.221374 9:-1 10:-0.870968 12:-1 13:-1
|
||||
+1 1:0.208333 2:1 3:1 4:-0.886792 5:-0.506849 6:-1 7:-1 8:0.29771 9:-1 10:-0.967742 11:-1 12:-0.333333 13:1
|
||||
-1 1:0.458333 2:-1 3:0.333333 4:-0.132075 5:-0.146119 6:-1 7:-1 8:-0.0534351 9:-1 10:-0.935484 11:-1 12:-1 13:1
|
||||
-1 1:-0.125 2:-1 3:-0.333333 4:-0.509434 5:-0.461187 6:-1 7:-1 8:0.389313 9:-1 10:-0.645161 11:-1 12:-1 13:-1
|
||||
-1 1:-0.375 2:-1 3:0.333333 4:-0.735849 5:-0.931507 6:-1 7:-1 8:0.587786 9:-1 10:-0.806452 12:-1 13:-1
|
||||
+1 1:0.583333 2:1 3:1 4:-0.509434 5:-0.493151 6:-1 7:-1 8:-1 9:-1 10:-0.677419 12:-1 13:-1
|
||||
-1 1:-0.166667 2:-1 3:1 4:-0.320755 5:-0.347032 6:-1 7:-1 8:0.40458 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
+1 1:0.166667 2:1 3:1 4:0.339623 5:-0.255708 6:1 7:1 8:-0.19084 9:-1 10:-0.677419 12:1 13:1
|
||||
+1 1:0.416667 2:1 3:1 4:-0.320755 5:-0.415525 6:-1 7:1 8:0.160305 9:-1 10:-0.548387 12:-0.333333 13:1
|
||||
+1 1:-0.208333 2:1 3:1 4:-0.433962 5:-0.324201 6:-1 7:1 8:0.450382 9:-1 10:-0.83871 12:-1 13:1
|
||||
-1 1:-0.0833333 2:1 3:0.333333 4:-0.886792 5:-0.561644 6:-1 7:-1 8:0.0992366 9:1 10:-0.612903 12:-1 13:-1
|
||||
+1 1:0.291667 2:-1 3:1 4:0.0566038 5:-0.39726 6:-1 7:1 8:0.312977 9:-1 10:-0.16129 12:0.333333 13:1
|
||||
+1 1:0.25 2:1 3:1 4:-0.132075 5:-0.767123 6:-1 7:-1 8:0.389313 9:1 10:-1 11:-1 12:-0.333333 13:1
|
||||
-1 1:-0.333333 2:-1 3:-0.333333 4:-0.660377 5:-0.844749 6:-1 7:-1 8:0.0229008 9:-1 10:-1 12:-1 13:-1
|
||||
+1 1:0.0833333 2:-1 3:1 4:0.622642 5:-0.0821918 6:-1 8:-0.29771 9:1 10:0.0967742 12:-1 13:-1
|
||||
-1 1:-0.5 2:1 3:-0.333333 4:-0.698113 5:-0.502283 6:-1 7:-1 8:0.251908 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
+1 1:0.291667 2:-1 3:1 4:0.207547 5:-0.182648 6:-1 7:1 8:0.374046 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
-1 1:0.0416667 2:-1 3:0.333333 4:-0.226415 5:-0.187215 6:1 7:-1 8:0.51145 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
-1 1:-0.458333 2:1 3:-0.333333 4:-0.509434 5:-0.228311 6:-1 7:-1 8:0.389313 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
-1 1:-0.166667 2:-1 3:-0.333333 4:-0.245283 5:-0.3379 6:-1 7:-1 8:0.389313 9:-1 10:-1 12:-1 13:-1
|
||||
+1 1:-0.291667 2:1 3:1 4:-0.509434 5:-0.438356 6:-1 7:1 8:0.114504 9:-1 10:-0.741935 11:-1 12:-1 13:1
|
||||
+1 1:0.125 2:-1 3:1 4:1 5:-0.260274 6:1 7:1 8:-0.0534351 9:1 10:0.290323 11:1 12:0.333333 13:1
|
||||
-1 1:0.541667 2:-1 3:-1 4:0.0566038 5:-0.543379 6:-1 7:-1 8:-0.343511 9:-1 10:-0.16129 11:1 12:-1 13:-1
|
||||
+1 1:0.125 2:1 3:1 4:-0.320755 5:-0.283105 6:1 7:1 8:-0.51145 9:1 10:-0.483871 11:1 12:-1 13:1
|
||||
+1 1:-0.166667 2:1 3:0.333333 4:-0.509434 5:-0.716895 6:-1 7:-1 8:0.0381679 9:-1 10:-0.354839 12:1 13:1
|
||||
+1 1:0.0416667 2:1 3:1 4:-0.471698 5:-0.269406 6:-1 7:1 8:-0.312977 9:1 10:0.0322581 12:0.333333 13:-1
|
||||
+1 1:0.166667 2:1 3:1 4:0.0943396 5:-0.324201 6:-1 7:-1 8:-0.740458 9:1 10:-0.612903 12:-0.333333 13:1
|
||||
-1 1:0.5 2:-1 3:0.333333 4:0.245283 5:0.0684932 6:-1 7:1 8:0.221374 9:-1 10:-0.741935 11:-1 12:-1 13:-1
|
||||
-1 1:0.0416667 2:1 3:0.333333 4:-0.415094 5:-0.328767 6:-1 7:1 8:0.236641 9:-1 10:-0.83871 11:1 12:-0.333333 13:-1
|
||||
-1 1:0.0416667 2:-1 3:0.333333 4:0.245283 5:-0.657534 6:-1 7:-1 8:0.40458 9:-1 10:-1 11:-1 12:-0.333333 13:-1
|
||||
+1 1:0.375 2:1 3:1 4:-0.509434 5:-0.356164 6:-1 7:-1 8:-0.572519 9:1 10:-0.419355 12:0.333333 13:1
|
||||
-1 1:-0.0416667 2:-1 3:0.333333 4:-0.207547 5:-0.680365 6:-1 7:1 8:0.496183 9:-1 10:-0.967742 12:-1 13:-1
|
||||
-1 1:-0.0416667 2:1 3:-0.333333 4:-0.245283 5:-0.657534 6:-1 7:-1 8:0.328244 9:-1 10:-0.741935 11:-1 12:-0.333333 13:-1
|
||||
+1 1:0.291667 2:1 3:1 4:-0.566038 5:-0.525114 6:1 7:-1 8:0.358779 9:1 10:-0.548387 11:-1 12:0.333333 13:1
|
||||
+1 1:0.416667 2:-1 3:1 4:-0.735849 5:-0.347032 6:-1 7:-1 8:0.496183 9:1 10:-0.419355 12:0.333333 13:-1
|
||||
+1 1:0.541667 2:1 3:1 4:-0.660377 5:-0.607306 6:-1 7:1 8:-0.0687023 9:1 10:-0.967742 11:-1 12:-0.333333 13:-1
|
||||
-1 1:-0.458333 2:1 3:1 4:-0.132075 5:-0.543379 6:-1 7:-1 8:0.633588 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
+1 1:0.458333 2:1 3:1 4:-0.509434 5:-0.452055 6:-1 7:1 8:-0.618321 9:1 10:-0.290323 11:1 12:-0.333333 13:-1
|
||||
-1 1:0.0416667 2:1 3:0.333333 4:0.0566038 5:-0.515982 6:-1 7:1 8:0.435115 9:-1 10:-0.483871 11:-1 12:-1 13:1
|
||||
-1 1:-0.291667 2:-1 3:0.333333 4:-0.0943396 5:-0.767123 6:-1 7:1 8:0.358779 9:1 10:-0.548387 11:1 12:-1 13:-1
|
||||
-1 1:0.583333 2:-1 3:0.333333 4:0.0943396 5:-0.310502 6:-1 7:-1 8:0.541985 9:-1 10:-1 11:-1 12:-0.333333 13:-1
|
||||
+1 1:0.125 2:1 3:1 4:-0.415094 5:-0.438356 6:1 7:1 8:0.114504 9:1 10:-0.612903 12:-0.333333 13:-1
|
||||
-1 1:-0.791667 2:-1 3:-0.333333 4:-0.54717 5:-0.616438 6:-1 7:-1 8:0.847328 9:-1 10:-0.774194 11:-1 12:-1 13:-1
|
||||
-1 1:0.166667 2:1 3:1 4:-0.283019 5:-0.630137 6:-1 7:-1 8:0.480916 9:1 10:-1 11:-1 12:-1 13:1
|
||||
+1 1:0.458333 2:1 3:1 4:-0.0377358 5:-0.607306 6:-1 7:1 8:-0.0687023 9:-1 10:-0.354839 12:0.333333 13:0.5
|
||||
-1 1:0.25 2:1 3:1 4:-0.169811 5:-0.3379 6:-1 7:1 8:0.694656 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
+1 1:-0.125 2:1 3:0.333333 4:-0.132075 5:-0.511416 6:-1 7:-1 8:0.40458 9:-1 10:-0.806452 12:-0.333333 13:1
|
||||
-1 1:-0.0833333 2:1 3:-1 4:-0.415094 5:-0.60274 6:-1 7:1 8:-0.175573 9:1 10:-0.548387 11:-1 12:-0.333333 13:-1
|
||||
+1 1:0.0416667 2:1 3:-0.333333 4:0.849057 5:-0.283105 6:-1 7:1 8:0.89313 9:-1 10:-1 11:-1 12:-0.333333 13:1
|
||||
+1 2:1 3:1 4:-0.45283 5:-0.287671 6:-1 7:-1 8:-0.633588 9:1 10:-0.354839 12:0.333333 13:1
|
||||
+1 1:-0.0416667 2:1 3:1 4:-0.660377 5:-0.525114 6:-1 7:-1 8:0.358779 9:-1 10:-1 11:-1 12:-0.333333 13:-1
|
||||
+1 1:-0.541667 2:1 3:1 4:-0.698113 5:-0.812785 6:-1 7:1 8:-0.343511 9:1 10:-0.354839 12:-1 13:1
|
||||
+1 1:0.208333 2:1 3:0.333333 4:-0.283019 5:-0.552511 6:-1 7:1 8:0.557252 9:-1 10:0.0322581 11:-1 12:0.333333 13:1
|
||||
-1 1:-0.5 2:-1 3:0.333333 4:-0.660377 5:-0.351598 6:-1 7:1 8:0.541985 9:1 10:-1 11:-1 12:-1 13:-1
|
||||
-1 1:-0.5 2:1 3:0.333333 4:-0.660377 5:-0.43379 6:-1 7:-1 8:0.648855 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
-1 1:-0.125 2:-1 3:0.333333 4:-0.509434 5:-0.575342 6:-1 7:-1 8:0.328244 9:-1 10:-0.483871 12:-1 13:-1
|
||||
-1 1:0.0416667 2:-1 3:0.333333 4:-0.735849 5:-0.356164 6:-1 7:1 8:0.465649 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
-1 1:0.458333 2:-1 3:1 4:-0.320755 5:-0.191781 6:-1 7:-1 8:-0.221374 9:-1 10:-0.354839 12:0.333333 13:-1
|
||||
-1 1:-0.0833333 2:-1 3:0.333333 4:-0.320755 5:-0.406393 6:-1 7:1 8:0.19084 9:-1 10:-0.83871 11:-1 12:-1 13:-1
|
||||
-1 1:-0.291667 2:-1 3:-0.333333 4:-0.792453 5:-0.643836 6:-1 7:-1 8:0.541985 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
+1 1:0.0833333 2:1 3:1 4:-0.132075 5:-0.584475 6:-1 7:-1 8:-0.389313 9:1 10:0.806452 11:1 12:-1 13:1
|
||||
-1 1:-0.333333 2:1 3:-0.333333 4:-0.358491 5:-0.16895 6:-1 7:1 8:0.51145 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
-1 1:0.125 2:1 3:-1 4:-0.509434 5:-0.694064 6:-1 7:1 8:0.389313 9:-1 10:-0.387097 12:-1 13:1
|
||||
+1 1:0.541667 2:-1 3:1 4:0.584906 5:-0.534247 6:1 7:-1 8:0.435115 9:1 10:-0.677419 12:0.333333 13:1
|
||||
+1 1:-0.625 2:1 3:-1 4:-0.509434 5:-0.520548 6:-1 7:-1 8:0.694656 9:1 10:0.225806 12:-1 13:1
|
||||
+1 1:0.375 2:-1 3:1 4:0.0566038 5:-0.461187 6:-1 7:-1 8:0.267176 9:1 10:-0.548387 12:-1 13:-1
|
||||
-1 1:0.0833333 2:1 3:-0.333333 4:-0.320755 5:-0.378995 6:-1 7:-1 8:0.282443 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
+1 1:0.208333 2:1 3:1 4:-0.358491 5:-0.392694 6:-1 7:1 8:-0.0992366 9:1 10:-0.0322581 12:0.333333 13:1
|
||||
-1 1:-0.416667 2:1 3:1 4:-0.698113 5:-0.611872 6:-1 7:-1 8:0.374046 9:-1 10:-1 11:-1 12:-1 13:1
|
||||
-1 1:0.458333 2:-1 3:1 4:0.622642 5:-0.0913242 6:-1 7:-1 8:0.267176 9:1 10:-1 11:-1 12:-1 13:-1
|
||||
-1 1:-0.125 2:-1 3:1 4:-0.698113 5:-0.415525 6:-1 7:1 8:0.343511 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
-1 2:1 3:0.333333 4:-0.320755 5:-0.675799 6:1 7:1 8:0.236641 9:-1 10:-0.612903 11:1 12:-1 13:-1
|
||||
-1 1:-0.333333 2:-1 3:1 4:-0.169811 5:-0.497717 6:-1 7:1 8:0.236641 9:1 10:-0.935484 12:-1 13:-1
|
||||
+1 1:0.5 2:1 3:-1 4:-0.169811 5:-0.287671 6:1 7:1 8:0.572519 9:-1 10:-0.548387 12:-0.333333 13:-1
|
||||
-1 1:0.666667 2:1 3:-1 4:0.245283 5:-0.506849 6:1 7:1 8:-0.0839695 9:-1 10:-0.967742 12:-0.333333 13:-1
|
||||
+1 1:0.666667 2:1 3:0.333333 4:-0.132075 5:-0.415525 6:-1 7:1 8:0.145038 9:-1 10:-0.354839 12:1 13:1
|
||||
+1 1:0.583333 2:1 3:1 4:-0.886792 5:-0.210046 6:-1 7:1 8:-0.175573 9:1 10:-0.709677 12:0.333333 13:-1
|
||||
-1 1:0.625 2:-1 3:0.333333 4:-0.509434 5:-0.611872 6:-1 7:1 8:-0.328244 9:-1 10:-0.516129 12:-1 13:-1
|
||||
-1 1:-0.791667 2:1 3:-1 4:-0.54717 5:-0.744292 6:-1 7:1 8:0.572519 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
+1 1:0.375 2:-1 3:1 4:-0.169811 5:-0.232877 6:1 7:-1 8:-0.465649 9:-1 10:-0.387097 12:1 13:-1
|
||||
+1 1:-0.0833333 2:1 3:1 4:-0.132075 5:-0.214612 6:-1 7:-1 8:-0.221374 9:1 10:0.354839 12:1 13:1
|
||||
+1 1:-0.291667 2:1 3:0.333333 4:0.0566038 5:-0.520548 6:-1 7:-1 8:0.160305 9:-1 10:0.16129 12:-1 13:-1
|
||||
+1 1:0.583333 2:1 3:1 4:-0.415094 5:-0.415525 6:1 7:-1 8:0.40458 9:-1 10:-0.935484 12:0.333333 13:1
|
||||
-1 1:-0.125 2:1 3:0.333333 4:-0.339623 5:-0.680365 6:-1 7:-1 8:0.40458 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
-1 1:-0.458333 2:1 3:0.333333 4:-0.509434 5:-0.479452 6:1 7:-1 8:0.877863 9:-1 10:-0.741935 11:1 12:-1 13:1
|
||||
+1 1:0.125 2:-1 3:1 4:-0.245283 5:0.292237 6:-1 7:1 8:0.206107 9:1 10:-0.387097 12:0.333333 13:1
|
||||
+1 1:-0.5 2:1 3:1 4:-0.698113 5:-0.789954 6:-1 7:1 8:0.328244 9:-1 10:-1 11:-1 12:-1 13:1
|
||||
-1 1:-0.458333 2:-1 3:1 4:-0.849057 5:-0.365297 6:-1 7:1 8:-0.221374 9:-1 10:-0.806452 12:-1 13:-1
|
||||
-1 2:1 3:0.333333 4:-0.320755 5:-0.452055 6:1 7:1 8:0.557252 9:-1 10:-1 11:-1 12:1 13:-1
|
||||
-1 1:-0.416667 2:1 3:0.333333 4:-0.320755 5:-0.136986 6:-1 7:-1 8:0.389313 9:-1 10:-0.387097 11:-1 12:-0.333333 13:-1
|
||||
+1 1:0.125 2:1 3:1 4:-0.283019 5:-0.73516 6:-1 7:1 8:-0.480916 9:1 10:-0.322581 12:-0.333333 13:0.5
|
||||
-1 1:-0.0416667 2:1 3:1 4:-0.735849 5:-0.511416 6:1 7:-1 8:0.160305 9:-1 10:-0.967742 11:-1 12:1 13:1
|
||||
-1 1:0.375 2:-1 3:1 4:-0.132075 5:0.223744 6:-1 7:1 8:0.312977 9:-1 10:-0.612903 12:-1 13:-1
|
||||
+1 1:0.708333 2:1 3:0.333333 4:0.245283 5:-0.347032 6:-1 7:-1 8:-0.374046 9:1 10:-0.0645161 12:-0.333333 13:1
|
||||
-1 1:0.0416667 2:1 3:1 4:-0.132075 5:-0.484018 6:-1 7:-1 8:0.358779 9:-1 10:-0.612903 11:-1 12:-1 13:-1
|
||||
+1 1:0.708333 2:1 3:1 4:-0.0377358 5:-0.780822 6:-1 7:-1 8:-0.175573 9:1 10:-0.16129 11:1 12:-1 13:1
|
||||
-1 1:0.0416667 2:1 3:-0.333333 4:-0.735849 5:-0.164384 6:-1 7:-1 8:0.29771 9:-1 10:-1 11:-1 12:-1 13:1
|
||||
+1 1:-0.75 2:1 3:1 4:-0.396226 5:-0.287671 6:-1 7:1 8:0.29771 9:1 10:-1 11:-1 12:-1 13:1
|
||||
-1 1:-0.208333 2:1 3:0.333333 4:-0.433962 5:-0.410959 6:1 7:-1 8:0.587786 9:-1 10:-1 11:-1 12:0.333333 13:-1
|
||||
-1 1:0.0833333 2:-1 3:-0.333333 4:-0.226415 5:-0.43379 6:-1 7:1 8:0.374046 9:-1 10:-0.548387 12:-1 13:-1
|
||||
-1 1:0.208333 2:-1 3:1 4:-0.886792 5:-0.442922 6:-1 7:1 8:-0.221374 9:-1 10:-0.677419 12:-1 13:-1
|
||||
-1 1:0.0416667 2:-1 3:0.333333 4:-0.698113 5:-0.598174 6:-1 7:-1 8:0.328244 9:-1 10:-0.483871 12:-1 13:-1
|
||||
-1 1:0.666667 2:-1 3:-1 4:-0.132075 5:-0.484018 6:-1 7:-1 8:0.221374 9:-1 10:-0.419355 11:-1 12:0.333333 13:-1
|
||||
+1 1:1 2:1 3:1 4:-0.415094 5:-0.187215 6:-1 7:1 8:0.389313 9:1 10:-1 11:-1 12:1 13:-1
|
||||
-1 1:0.625 2:1 3:0.333333 4:-0.54717 5:-0.310502 6:-1 7:-1 8:0.221374 9:-1 10:-0.677419 11:-1 12:-0.333333 13:1
|
||||
+1 1:0.208333 2:1 3:1 4:-0.415094 5:-0.205479 6:-1 7:1 8:0.526718 9:-1 10:-1 11:-1 12:0.333333 13:1
|
||||
+1 1:0.291667 2:1 3:1 4:-0.415094 5:-0.39726 6:-1 7:1 8:0.0687023 9:1 10:-0.0967742 12:-0.333333 13:1
|
||||
+1 1:-0.0833333 2:1 3:1 4:-0.132075 5:-0.210046 6:-1 7:-1 8:0.557252 9:1 10:-0.483871 11:-1 12:-1 13:1
|
||||
+1 1:0.0833333 2:1 3:1 4:0.245283 5:-0.255708 6:-1 7:1 8:0.129771 9:1 10:-0.741935 12:-0.333333 13:1
|
||||
-1 1:-0.0416667 2:1 3:-1 4:0.0943396 5:-0.214612 6:1 7:-1 8:0.633588 9:-1 10:-0.612903 12:-1 13:1
|
||||
-1 1:0.291667 2:-1 3:0.333333 4:-0.849057 5:-0.123288 6:-1 7:-1 8:0.358779 9:-1 10:-1 11:-1 12:-0.333333 13:-1
|
||||
-1 1:0.208333 2:1 3:0.333333 4:-0.792453 5:-0.479452 6:-1 7:1 8:0.267176 9:1 10:-0.806452 12:-1 13:1
|
||||
+1 1:0.458333 2:1 3:0.333333 4:-0.415094 5:-0.164384 6:-1 7:-1 8:-0.0839695 9:1 10:-0.419355 12:-1 13:1
|
||||
-1 1:-0.666667 2:1 3:0.333333 4:-0.320755 5:-0.43379 6:-1 7:-1 8:0.770992 9:-1 10:0.129032 11:1 12:-1 13:-1
|
||||
+1 1:0.25 2:1 3:-1 4:0.433962 5:-0.260274 6:-1 7:1 8:0.343511 9:-1 10:-0.935484 12:-1 13:1
|
||||
-1 1:-0.0833333 2:1 3:0.333333 4:-0.415094 5:-0.456621 6:1 7:1 8:0.450382 9:-1 10:-0.225806 12:-1 13:-1
|
||||
-1 1:-0.416667 2:-1 3:0.333333 4:-0.471698 5:-0.60274 6:-1 7:-1 8:0.435115 9:-1 10:-0.935484 12:-1 13:-1
|
||||
+1 1:0.208333 2:1 3:1 4:-0.358491 5:-0.589041 6:-1 7:1 8:-0.0839695 9:1 10:-0.290323 12:1 13:1
|
||||
-1 1:-1 2:1 3:-0.333333 4:-0.320755 5:-0.643836 6:-1 7:1 8:1 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
-1 1:-0.5 2:-1 3:-0.333333 4:-0.320755 5:-0.643836 6:-1 7:1 8:0.541985 9:-1 10:-0.548387 11:-1 12:-1 13:-1
|
||||
-1 1:0.416667 2:-1 3:0.333333 4:-0.226415 5:-0.424658 6:-1 7:1 8:0.541985 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
-1 1:-0.0833333 2:1 3:0.333333 4:-1 5:-0.538813 6:-1 7:-1 8:0.267176 9:1 10:-1 11:-1 12:-0.333333 13:1
|
||||
-1 1:0.0416667 2:1 3:0.333333 4:-0.509434 5:-0.39726 6:-1 7:1 8:0.160305 9:-1 10:-0.870968 12:-1 13:1
|
||||
-1 1:-0.375 2:1 3:-0.333333 4:-0.509434 5:-0.570776 6:-1 7:-1 8:0.51145 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
+1 1:0.0416667 2:1 3:1 4:-0.698113 5:-0.484018 6:-1 7:-1 8:-0.160305 9:1 10:-0.0967742 12:-0.333333 13:1
|
||||
+1 1:0.5 2:1 3:1 4:-0.226415 5:-0.415525 6:-1 7:1 8:-0.145038 9:-1 10:-0.0967742 12:-0.333333 13:1
|
||||
-1 1:0.166667 2:1 3:0.333333 4:0.0566038 5:-0.808219 6:-1 7:-1 8:0.572519 9:-1 10:-0.483871 11:-1 12:-1 13:-1
|
||||
+1 1:0.416667 2:1 3:1 4:-0.320755 5:-0.0684932 6:1 7:1 8:-0.0687023 9:1 10:-0.419355 11:-1 12:1 13:1
|
||||
-1 1:-0.75 2:-1 3:1 4:-0.169811 5:-0.739726 6:-1 7:-1 8:0.694656 9:-1 10:-0.548387 11:-1 12:-1 13:-1
|
||||
-1 1:-0.5 2:1 3:-0.333333 4:-0.226415 5:-0.648402 6:-1 7:-1 8:-0.0687023 9:-1 10:-1 12:-1 13:0.5
|
||||
+1 1:0.375 2:-1 3:0.333333 4:-0.320755 5:-0.374429 6:-1 7:-1 8:-0.603053 9:-1 10:-0.612903 12:-0.333333 13:1
|
||||
+1 1:-0.416667 2:-1 3:1 4:-0.283019 5:-0.0182648 6:1 7:1 8:-0.00763359 9:1 10:-0.0322581 12:-1 13:1
|
||||
-1 1:0.208333 2:-1 3:-1 4:0.0566038 5:-0.283105 6:1 7:1 8:0.389313 9:-1 10:-0.677419 11:-1 12:-1 13:-1
|
||||
-1 1:-0.0416667 2:1 3:-1 4:-0.54717 5:-0.726027 6:-1 7:1 8:0.816794 9:-1 10:-1 12:-1 13:0.5
|
||||
+1 1:0.333333 2:-1 3:1 4:-0.0377358 5:-0.173516 6:-1 7:1 8:0.145038 9:1 10:-0.677419 12:-1 13:1
|
||||
+1 1:-0.583333 2:1 3:1 4:-0.54717 5:-0.575342 6:-1 7:-1 8:0.0534351 9:-1 10:-0.612903 12:-1 13:1
|
||||
-1 1:-0.333333 2:1 3:1 4:-0.603774 5:-0.388128 6:-1 7:1 8:0.740458 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
+1 1:-0.0416667 2:1 3:1 4:-0.358491 5:-0.410959 6:-1 7:-1 8:0.374046 9:1 10:-1 11:-1 12:-0.333333 13:1
|
||||
-1 1:0.375 2:1 3:0.333333 4:-0.320755 5:-0.520548 6:-1 7:-1 8:0.145038 9:-1 10:-0.419355 12:1 13:1
|
||||
+1 1:0.375 2:-1 3:1 4:0.245283 5:-0.826484 6:-1 7:1 8:0.129771 9:-1 10:1 11:1 12:1 13:1
|
||||
-1 2:-1 3:1 4:-0.169811 5:-0.506849 6:-1 7:1 8:0.358779 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
+1 1:-0.416667 2:1 3:1 4:-0.509434 5:-0.767123 6:-1 7:1 8:-0.251908 9:1 10:-0.193548 12:-1 13:1
|
||||
-1 1:-0.25 2:1 3:0.333333 4:-0.169811 5:-0.401826 6:-1 7:1 8:0.29771 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
-1 1:-0.0416667 2:1 3:-0.333333 4:-0.509434 5:-0.0913242 6:-1 7:-1 8:0.541985 9:-1 10:-0.935484 11:-1 12:-1 13:-1
|
||||
+1 1:0.625 2:1 3:0.333333 4:0.622642 5:-0.324201 6:1 7:1 8:0.206107 9:1 10:-0.483871 12:-1 13:1
|
||||
-1 1:-0.583333 2:1 3:0.333333 4:-0.132075 5:-0.109589 6:-1 7:1 8:0.694656 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
-1 2:-1 3:1 4:-0.320755 5:-0.369863 6:-1 7:1 8:0.0992366 9:-1 10:-0.870968 12:-1 13:-1
|
||||
+1 1:0.375 2:-1 3:1 4:-0.132075 5:-0.351598 6:-1 7:1 8:0.358779 9:-1 10:0.16129 11:1 12:0.333333 13:-1
|
||||
-1 1:-0.0833333 2:-1 3:0.333333 4:-0.132075 5:-0.16895 6:-1 7:1 8:0.0839695 9:-1 10:-0.516129 11:-1 12:-0.333333 13:-1
|
||||
+1 1:0.291667 2:1 3:1 4:-0.320755 5:-0.420091 6:-1 7:-1 8:0.114504 9:1 10:-0.548387 11:-1 12:-0.333333 13:1
|
||||
+1 1:0.5 2:1 3:1 4:-0.698113 5:-0.442922 6:-1 7:1 8:0.328244 9:-1 10:-0.806452 11:-1 12:0.333333 13:0.5
|
||||
-1 1:0.5 2:-1 3:0.333333 4:0.150943 5:-0.347032 6:-1 7:-1 8:0.175573 9:-1 10:-0.741935 11:-1 12:-1 13:-1
|
||||
+1 1:0.291667 2:1 3:0.333333 4:-0.132075 5:-0.730594 6:-1 7:1 8:0.282443 9:-1 10:-0.0322581 12:-1 13:-1
|
||||
+1 1:0.291667 2:1 3:1 4:-0.0377358 5:-0.287671 6:-1 7:1 8:0.0839695 9:1 10:-0.0967742 12:0.333333 13:1
|
||||
+1 1:0.0416667 2:1 3:1 4:-0.509434 5:-0.716895 6:-1 7:-1 8:-0.358779 9:-1 10:-0.548387 12:-0.333333 13:1
|
||||
-1 1:-0.375 2:1 3:-0.333333 4:-0.320755 5:-0.575342 6:-1 7:1 8:0.78626 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
+1 1:-0.375 2:1 3:1 4:-0.660377 5:-0.251142 6:-1 7:1 8:0.251908 9:-1 10:-1 11:-1 12:-0.333333 13:-1
|
||||
-1 1:-0.0833333 2:1 3:0.333333 4:-0.698113 5:-0.776256 6:-1 7:-1 8:-0.206107 9:-1 10:-0.806452 11:-1 12:-1 13:-1
|
||||
-1 1:0.25 2:1 3:0.333333 4:0.0566038 5:-0.607306 6:1 7:-1 8:0.312977 9:-1 10:-0.483871 11:-1 12:-1 13:-1
|
||||
-1 1:0.75 2:-1 3:-0.333333 4:0.245283 5:-0.196347 6:-1 7:-1 8:0.389313 9:-1 10:-0.870968 11:-1 12:0.333333 13:-1
|
||||
-1 1:0.333333 2:1 3:0.333333 4:0.0566038 5:-0.465753 6:1 7:-1 8:0.00763359 9:1 10:-0.677419 12:-1 13:-1
|
||||
+1 1:0.0833333 2:1 3:1 4:-0.283019 5:0.0365297 6:-1 7:-1 8:-0.0687023 9:1 10:-0.612903 12:-0.333333 13:1
|
||||
+1 1:0.458333 2:1 3:0.333333 4:-0.132075 5:-0.0456621 6:-1 7:-1 8:0.328244 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
-1 1:-0.416667 2:1 3:1 4:0.0566038 5:-0.447489 6:-1 7:-1 8:0.526718 9:-1 10:-0.516129 11:-1 12:-1 13:-1
|
||||
-1 1:0.208333 2:-1 3:0.333333 4:-0.509434 5:-0.0228311 6:-1 7:-1 8:0.541985 9:-1 10:-1 11:-1 12:-1 13:-1
|
||||
+1 1:0.291667 2:1 3:1 4:-0.320755 5:-0.634703 6:-1 7:1 8:-0.0687023 9:1 10:-0.225806 12:0.333333 13:1
|
||||
+1 1:0.208333 2:1 3:-0.333333 4:-0.509434 5:-0.278539 6:-1 7:1 8:0.358779 9:-1 10:-0.419355 12:-1 13:-1
|
||||
-1 1:-0.166667 2:1 3:-0.333333 4:-0.320755 5:-0.360731 6:-1 7:-1 8:0.526718 9:-1 10:-0.806452 11:-1 12:-1 13:-1
|
||||
+1 1:-0.208333 2:1 3:-0.333333 4:-0.698113 5:-0.52968 6:-1 7:-1 8:0.480916 9:-1 10:-0.677419 11:1 12:-1 13:1
|
||||
-1 1:-0.0416667 2:1 3:0.333333 4:0.471698 5:-0.666667 6:1 7:-1 8:0.389313 9:-1 10:-0.83871 11:-1 12:-1 13:1
|
||||
-1 1:-0.375 2:1 3:-0.333333 4:-0.509434 5:-0.374429 6:-1 7:-1 8:0.557252 9:-1 10:-1 11:-1 12:-1 13:1
|
||||
-1 1:0.125 2:-1 3:-0.333333 4:-0.132075 5:-0.232877 6:-1 7:1 8:0.251908 9:-1 10:-0.580645 12:-1 13:-1
|
||||
-1 1:0.166667 2:1 3:1 4:-0.132075 5:-0.69863 6:-1 7:-1 8:0.175573 9:-1 10:-0.870968 12:-1 13:0.5
|
||||
+1 1:0.583333 2:1 3:1 4:0.245283 5:-0.269406 6:-1 7:1 8:-0.435115 9:1 10:-0.516129 12:1 13:-1
|
BIN
liblinear-2.49/liblinear.so.6
Executable file
BIN
liblinear-2.49/liblinear.so.6
Executable file
Binary file not shown.
3773
liblinear-2.49/linear.cpp
Normal file
3773
liblinear-2.49/linear.cpp
Normal file
File diff suppressed because it is too large
Load Diff
24
liblinear-2.49/linear.def
Normal file
24
liblinear-2.49/linear.def
Normal file
@@ -0,0 +1,24 @@
|
||||
LIBRARY liblinear
|
||||
EXPORTS
|
||||
train @1
|
||||
cross_validation @2
|
||||
save_model @3
|
||||
load_model @4
|
||||
get_nr_feature @5
|
||||
get_nr_class @6
|
||||
get_labels @7
|
||||
predict_values @8
|
||||
predict @9
|
||||
predict_probability @10
|
||||
free_and_destroy_model @11
|
||||
free_model_content @12
|
||||
destroy_param @13
|
||||
check_parameter @14
|
||||
check_probability_model @15
|
||||
set_print_string_function @16
|
||||
get_decfun_coef @17
|
||||
get_decfun_bias @18
|
||||
check_regression_model @19
|
||||
find_parameters @20
|
||||
get_decfun_rho @21
|
||||
check_oneclass_model @22
|
90
liblinear-2.49/linear.h
Normal file
90
liblinear-2.49/linear.h
Normal file
@@ -0,0 +1,90 @@
|
||||
#include <stdbool.h>
|
||||
#ifndef _LIBLINEAR_H
|
||||
#define _LIBLINEAR_H
|
||||
|
||||
#define LIBLINEAR_VERSION 249
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
extern int liblinear_version;
|
||||
|
||||
struct feature_node
|
||||
{
|
||||
int index;
|
||||
double value;
|
||||
};
|
||||
|
||||
struct problem
|
||||
{
|
||||
int l, n;
|
||||
double *y;
|
||||
struct feature_node **x;
|
||||
double bias; /* < 0 if no bias term */
|
||||
};
|
||||
|
||||
enum { L2R_LR, L2R_L2LOSS_SVC_DUAL, L2R_L2LOSS_SVC, L2R_L1LOSS_SVC_DUAL, MCSVM_CS, L1R_L2LOSS_SVC, L1R_LR, L2R_LR_DUAL, L2R_L2LOSS_SVR = 11, L2R_L2LOSS_SVR_DUAL, L2R_L1LOSS_SVR_DUAL, ONECLASS_SVM = 21 }; /* solver_type */
|
||||
|
||||
struct parameter
|
||||
{
|
||||
int solver_type;
|
||||
|
||||
/* these are for training only */
|
||||
double eps; /* stopping tolerance */
|
||||
double C;
|
||||
int nr_weight;
|
||||
int *weight_label;
|
||||
double* weight;
|
||||
double p;
|
||||
double nu;
|
||||
double *init_sol;
|
||||
int regularize_bias;
|
||||
bool w_recalc; /* for -s 1, 3; may be extended to -s 12, 13, 21 */
|
||||
};
|
||||
|
||||
struct model
|
||||
{
|
||||
struct parameter param;
|
||||
int nr_class; /* number of classes */
|
||||
int nr_feature;
|
||||
double *w;
|
||||
int *label; /* label of each class */
|
||||
double bias;
|
||||
double rho; /* one-class SVM only */
|
||||
};
|
||||
|
||||
struct model* train(const struct problem *prob, const struct parameter *param);
|
||||
void cross_validation(const struct problem *prob, const struct parameter *param, int nr_fold, double *target);
|
||||
void find_parameters(const struct problem *prob, const struct parameter *param, int nr_fold, double start_C, double start_p, double *best_C, double *best_p, double *best_score);
|
||||
|
||||
double predict_values(const struct model *model_, const struct feature_node *x, double* dec_values);
|
||||
double predict(const struct model *model_, const struct feature_node *x);
|
||||
double predict_probability(const struct model *model_, const struct feature_node *x, double* prob_estimates);
|
||||
|
||||
int save_model(const char *model_file_name, const struct model *model_);
|
||||
struct model *load_model(const char *model_file_name);
|
||||
|
||||
int get_nr_feature(const struct model *model_);
|
||||
int get_nr_class(const struct model *model_);
|
||||
void get_labels(const struct model *model_, int* label);
|
||||
double get_decfun_coef(const struct model *model_, int feat_idx, int label_idx);
|
||||
double get_decfun_bias(const struct model *model_, int label_idx);
|
||||
double get_decfun_rho(const struct model *model_);
|
||||
|
||||
void free_model_content(struct model *model_ptr);
|
||||
void free_and_destroy_model(struct model **model_ptr_ptr);
|
||||
void destroy_param(struct parameter *param);
|
||||
|
||||
const char *check_parameter(const struct problem *prob, const struct parameter *param);
|
||||
int check_probability_model(const struct model *model);
|
||||
int check_regression_model(const struct model *model);
|
||||
int check_oneclass_model(const struct model *model);
|
||||
void set_print_string_function(void (*print_func) (const char*));
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _LIBLINEAR_H */
|
||||
|
41
liblinear-2.49/matlab/Makefile
Normal file
41
liblinear-2.49/matlab/Makefile
Normal file
@@ -0,0 +1,41 @@
|
||||
# This Makefile is used under Linux
|
||||
|
||||
MATLABDIR ?= /usr/local/matlab
|
||||
CXX ?= g++
|
||||
#CXX = g++-3.3
|
||||
CC ?= gcc
|
||||
CFLAGS = -Wall -Wconversion -O3 -fPIC -I$(MATLABDIR)/extern/include -I..
|
||||
|
||||
MEX = $(MATLABDIR)/bin/mex
|
||||
MEX_OPTION = CC="$(CXX)" CXX="$(CXX)" CFLAGS="$(CFLAGS)" CXXFLAGS="$(CFLAGS)"
|
||||
# comment the following line if you use MATLAB on a 32-bit computer
|
||||
MEX_OPTION += -largeArrayDims
|
||||
MEX_EXT = $(shell $(MATLABDIR)/bin/mexext)
|
||||
|
||||
all: matlab
|
||||
|
||||
matlab: binary
|
||||
|
||||
octave:
|
||||
@echo "please type make under Octave"
|
||||
|
||||
binary: train.$(MEX_EXT) predict.$(MEX_EXT) libsvmread.$(MEX_EXT) libsvmwrite.$(MEX_EXT)
|
||||
|
||||
train.$(MEX_EXT): train.c ../linear.h ../newton.cpp ../linear.cpp linear_model_matlab.c \
|
||||
../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
|
||||
$(MEX) $(MEX_OPTION) train.c ../newton.cpp ../linear.cpp linear_model_matlab.c \
|
||||
../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
|
||||
|
||||
predict.$(MEX_EXT): predict.c ../linear.h ../newton.cpp ../linear.cpp linear_model_matlab.c \
|
||||
../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
|
||||
$(MEX) $(MEX_OPTION) predict.c ../newton.cpp ../linear.cpp linear_model_matlab.c \
|
||||
../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
|
||||
|
||||
libsvmread.$(MEX_EXT): libsvmread.c
|
||||
$(MEX) $(MEX_OPTION) libsvmread.c
|
||||
|
||||
libsvmwrite.$(MEX_EXT): libsvmwrite.c
|
||||
$(MEX) $(MEX_OPTION) libsvmwrite.c
|
||||
|
||||
clean:
|
||||
rm -f *~ *.o *.mex* *.obj
|
205
liblinear-2.49/matlab/README
Executable file
205
liblinear-2.49/matlab/README
Executable file
@@ -0,0 +1,205 @@
|
||||
--------------------------------------------
|
||||
--- MATLAB/OCTAVE interface of LIBLINEAR ---
|
||||
--------------------------------------------
|
||||
|
||||
Table of Contents
|
||||
=================
|
||||
|
||||
- Introduction
|
||||
- Installation
|
||||
- Usage
|
||||
- Returned Model Structure
|
||||
- Other Utilities
|
||||
- Examples
|
||||
- Additional Information
|
||||
|
||||
|
||||
Introduction
|
||||
============
|
||||
|
||||
This tool provides a simple interface to LIBLINEAR, a library for
|
||||
large-scale regularized linear classification and regression
|
||||
(http://www.csie.ntu.edu.tw/~cjlin/liblinear). It is very easy to use
|
||||
as the usage and the way of specifying parameters are the same as that
|
||||
of LIBLINEAR.
|
||||
|
||||
Installation
|
||||
============
|
||||
|
||||
On Windows systems, starting from version 2.48, we no longer provide
|
||||
pre-built mex files. If you would like to build the package, please
|
||||
rely on the following steps.
|
||||
|
||||
We recommend using make.m on both MATLAB and OCTAVE. Just type 'make'
|
||||
to build 'libsvmread.mex', 'libsvmwrite.mex', 'train.mex', and
|
||||
'predict.mex'.
|
||||
|
||||
On MATLAB or Octave:
|
||||
|
||||
>> make
|
||||
|
||||
If make.m does not work on MATLAB (especially for Windows), try 'mex
|
||||
-setup' to choose a suitable compiler for mex. Make sure your compiler
|
||||
is accessible and workable. Then type 'make' to do the installation.
|
||||
|
||||
Example:
|
||||
|
||||
matlab>> mex -setup
|
||||
|
||||
MATLAB will choose the default compiler. If you have multiple compliers,
|
||||
a list is given and you can choose one from the list. For more details,
|
||||
please check the following page:
|
||||
|
||||
https://www.mathworks.com/help/matlab/matlab_external/choose-c-or-c-compilers.html
|
||||
|
||||
On Windows, make.m has been tested via using Visual C++.
|
||||
|
||||
On Unix systems, if neither make.m nor 'mex -setup' works, please use
|
||||
Makefile and type 'make' in a command window. Note that we assume
|
||||
your MATLAB is installed in '/usr/local/matlab'. If not, please change
|
||||
MATLABDIR in Makefile.
|
||||
|
||||
Example:
|
||||
linux> make
|
||||
|
||||
To use octave, type 'make octave':
|
||||
|
||||
Example:
|
||||
linux> make octave
|
||||
|
||||
For a list of supported/compatible compilers for MATLAB, please check
|
||||
the following page:
|
||||
|
||||
http://www.mathworks.com/support/compilers/current_release/
|
||||
|
||||
Usage
|
||||
=====
|
||||
|
||||
matlab> model = train(training_label_vector, training_instance_matrix [,'liblinear_options', 'col']);
|
||||
|
||||
-training_label_vector:
|
||||
An m by 1 vector of training labels. (type must be double)
|
||||
-training_instance_matrix:
|
||||
An m by n matrix of m training instances with n features.
|
||||
It must be a sparse matrix. (type must be double)
|
||||
-liblinear_options:
|
||||
A string of training options in the same format as that of LIBLINEAR.
|
||||
-col:
|
||||
if 'col' is set, each column of training_instance_matrix is a data instance. Otherwise each row is a data instance.
|
||||
|
||||
matlab> [predicted_label, accuracy, decision_values/prob_estimates] = predict(testing_label_vector, testing_instance_matrix, model [, 'liblinear_options', 'col']);
|
||||
matlab> [predicted_label] = predict(testing_label_vector, testing_instance_matrix, model [, 'liblinear_options', 'col']);
|
||||
|
||||
-testing_label_vector:
|
||||
An m by 1 vector of prediction labels. If labels of test
|
||||
data are unknown, simply use any random values. (type must be double)
|
||||
-testing_instance_matrix:
|
||||
An m by n matrix of m testing instances with n features.
|
||||
It must be a sparse matrix. (type must be double)
|
||||
-model:
|
||||
The output of train.
|
||||
-liblinear_options:
|
||||
A string of testing options in the same format as that of LIBLINEAR.
|
||||
-col:
|
||||
if 'col' is set, each column of testing_instance_matrix is a data instance. Otherwise each row is a data instance.
|
||||
|
||||
Returned Model Structure
|
||||
========================
|
||||
|
||||
The 'train' function returns a model which can be used for future
|
||||
prediction. It is a structure and is organized as [Parameters, nr_class,
|
||||
nr_feature, bias, Label, w, rho]:
|
||||
|
||||
-Parameters: Parameters (now only solver type is provided)
|
||||
-nr_class: number of classes; = 2 for regression
|
||||
-nr_feature: number of features in training data (without including the bias term)
|
||||
-bias: If >= 0, we assume one additional feature is added to the end
|
||||
of each data instance.
|
||||
-Label: label of each class; empty for regression
|
||||
-w: a nr_w-by-n matrix for the weights, where n is nr_feature
|
||||
or nr_feature+1 depending on the existence of the bias term.
|
||||
nr_w is 1 if nr_class=2 and -s is not 4 (i.e., not
|
||||
multi-class svm by Crammer and Singer). It is
|
||||
nr_class otherwise.
|
||||
-rho: the bias term of one-class SVM.
|
||||
|
||||
If the '-v' option is specified, cross validation is conducted and the
|
||||
returned model is just a scalar: cross-validation accuracy for
|
||||
classification and mean-squared error for regression.
|
||||
|
||||
If the '-C' option is specified, best parameters are found by cross
|
||||
validation. The parameter selection utility is supported only by -s 0,
|
||||
-s 2 (for finding C) and -s 11 (for finding C, p). The returned
|
||||
model is a three dimensional vector with the best C, the best p, and
|
||||
the corresponding cross-validation accuracy or mean squared error. The
|
||||
returned best p for -s 0 and -s 2 is set to -1 because the p parameter
|
||||
is not used by classification models.
|
||||
|
||||
Result of Prediction
|
||||
====================
|
||||
|
||||
The function 'predict' has three outputs. The first one,
|
||||
predicted_label, is a vector of predicted labels. The second output,
|
||||
accuracy, is a vector including accuracy (for classification), mean
|
||||
squared error, and squared correlation coefficient (for regression).
|
||||
The third is a matrix containing decision values or probability
|
||||
estimates (if '-b 1' is specified). If k is the number of classes
|
||||
and k' is the number of classifiers (k'=1 if k=2, otherwise k'=k), for decision values,
|
||||
each row includes results of k' binary linear classifiers. For probabilities,
|
||||
each row contains k values indicating the probability that the testing instance is in
|
||||
each class. Note that the order of classes here is the same as 'Label'
|
||||
field in the model structure.
|
||||
|
||||
Other Utilities
|
||||
===============
|
||||
|
||||
A matlab function libsvmread reads files in LIBSVM format:
|
||||
|
||||
[label_vector, instance_matrix] = libsvmread('data.txt');
|
||||
|
||||
Two outputs are labels and instances, which can then be used as inputs
|
||||
of svmtrain or svmpredict.
|
||||
|
||||
A matlab function libsvmwrite writes Matlab matrix to a file in LIBSVM format:
|
||||
|
||||
libsvmwrite('data.txt', label_vector, instance_matrix]
|
||||
|
||||
The instance_matrix must be a sparse matrix. (type must be double)
|
||||
For windows, `libsvmread.mexw64' and `libsvmwrite.mexw64' are ready in
|
||||
the directory `..\windows'.
|
||||
|
||||
These codes are prepared by Rong-En Fan and Kai-Wei Chang from National
|
||||
Taiwan University.
|
||||
|
||||
Examples
|
||||
========
|
||||
|
||||
Train and test on the provided data heart_scale:
|
||||
|
||||
matlab> [heart_scale_label, heart_scale_inst] = libsvmread('../heart_scale');
|
||||
matlab> model = train(heart_scale_label, heart_scale_inst, '-c 1');
|
||||
matlab> [predict_label, accuracy, dec_values] = predict(heart_scale_label, heart_scale_inst, model); % test the training data
|
||||
|
||||
Note that for testing, you can put anything in the testing_label_vector.
|
||||
|
||||
For probability estimates, you need '-b 1' only in the testing phase:
|
||||
|
||||
matlab> [predict_label, accuracy, prob_estimates] = predict(heart_scale_label, heart_scale_inst, model, '-b 1');
|
||||
|
||||
Use the best parameter to train (C for -s 0, 2 and C, p for -s 11):
|
||||
|
||||
matlab> best = train(heart_scale_label, heart_scale_inst, '-C -s 0');
|
||||
matlab> model = train(heart_scale_label, heart_scale_inst, sprintf('-c %f -s 0', best(1))); % use the same solver: -s 0
|
||||
|
||||
Additional Information
|
||||
======================
|
||||
|
||||
Please cite LIBLINEAR as follows
|
||||
|
||||
R.-E. Fan, K.-W. Chang, C.-J. Hsieh, X.-R. Wang, and C.-J. Lin.
|
||||
LIBLINEAR: A Library for Large Linear Classification, Journal of
|
||||
Machine Learning Research 9(2008), 1871-1874.Software available at
|
||||
http://www.csie.ntu.edu.tw/~cjlin/liblinear
|
||||
|
||||
For any question, please contact Chih-Jen Lin <cjlin@csie.ntu.edu.tw>.
|
||||
|
212
liblinear-2.49/matlab/libsvmread.c
Normal file
212
liblinear-2.49/matlab/libsvmread.c
Normal file
@@ -0,0 +1,212 @@
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include <ctype.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include "mex.h"
|
||||
|
||||
#ifdef MX_API_VER
|
||||
#if MX_API_VER < 0x07030000
|
||||
typedef int mwIndex;
|
||||
#endif
|
||||
#endif
|
||||
#ifndef max
|
||||
#define max(x,y) (((x)>(y))?(x):(y))
|
||||
#endif
|
||||
#ifndef min
|
||||
#define min(x,y) (((x)<(y))?(x):(y))
|
||||
#endif
|
||||
|
||||
void exit_with_help()
|
||||
{
|
||||
mexPrintf(
|
||||
"Usage: [label_vector, instance_matrix] = libsvmread('filename');\n"
|
||||
);
|
||||
}
|
||||
|
||||
static void fake_answer(int nlhs, mxArray *plhs[])
|
||||
{
|
||||
int i;
|
||||
for(i=0;i<nlhs;i++)
|
||||
plhs[i] = mxCreateDoubleMatrix(0, 0, mxREAL);
|
||||
}
|
||||
|
||||
static char *line;
|
||||
static int max_line_len;
|
||||
|
||||
static char* readline(FILE *input)
|
||||
{
|
||||
int len;
|
||||
|
||||
if(fgets(line,max_line_len,input) == NULL)
|
||||
return NULL;
|
||||
|
||||
while(strrchr(line,'\n') == NULL)
|
||||
{
|
||||
max_line_len *= 2;
|
||||
line = (char *) realloc(line, max_line_len);
|
||||
len = (int) strlen(line);
|
||||
if(fgets(line+len,max_line_len-len,input) == NULL)
|
||||
break;
|
||||
}
|
||||
return line;
|
||||
}
|
||||
|
||||
// read in a problem (in libsvm format)
|
||||
void read_problem(const char *filename, int nlhs, mxArray *plhs[])
|
||||
{
|
||||
int max_index, min_index, inst_max_index;
|
||||
size_t elements, k, i, l=0;
|
||||
FILE *fp = fopen(filename,"r");
|
||||
char *endptr;
|
||||
mwIndex *ir, *jc;
|
||||
double *labels, *samples;
|
||||
|
||||
if(fp == NULL)
|
||||
{
|
||||
mexPrintf("can't open input file %s\n",filename);
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
|
||||
max_line_len = 1024;
|
||||
line = (char *) malloc(max_line_len*sizeof(char));
|
||||
|
||||
max_index = 0;
|
||||
min_index = 1; // our index starts from 1
|
||||
elements = 0;
|
||||
while(readline(fp) != NULL)
|
||||
{
|
||||
char *idx, *val;
|
||||
// features
|
||||
int index = 0;
|
||||
|
||||
inst_max_index = -1; // strtol gives 0 if wrong format, and precomputed kernel has <index> start from 0
|
||||
strtok(line," \t"); // label
|
||||
while (1)
|
||||
{
|
||||
idx = strtok(NULL,":"); // index:value
|
||||
val = strtok(NULL," \t");
|
||||
if(val == NULL)
|
||||
break;
|
||||
|
||||
errno = 0;
|
||||
index = (int) strtol(idx,&endptr,10);
|
||||
if(endptr == idx || errno != 0 || *endptr != '\0' || index <= inst_max_index)
|
||||
{
|
||||
mexPrintf("Wrong input format at line %d\n",l+1);
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
else
|
||||
inst_max_index = index;
|
||||
|
||||
min_index = min(min_index, index);
|
||||
elements++;
|
||||
}
|
||||
max_index = max(max_index, inst_max_index);
|
||||
l++;
|
||||
}
|
||||
rewind(fp);
|
||||
|
||||
// y
|
||||
plhs[0] = mxCreateDoubleMatrix(l, 1, mxREAL);
|
||||
// x^T
|
||||
if (min_index <= 0)
|
||||
plhs[1] = mxCreateSparse(max_index-min_index+1, l, elements, mxREAL);
|
||||
else
|
||||
plhs[1] = mxCreateSparse(max_index, l, elements, mxREAL);
|
||||
|
||||
labels = mxGetPr(plhs[0]);
|
||||
samples = mxGetPr(plhs[1]);
|
||||
ir = mxGetIr(plhs[1]);
|
||||
jc = mxGetJc(plhs[1]);
|
||||
|
||||
k=0;
|
||||
for(i=0;i<l;i++)
|
||||
{
|
||||
char *idx, *val, *label;
|
||||
jc[i] = k;
|
||||
|
||||
readline(fp);
|
||||
|
||||
label = strtok(line," \t\n");
|
||||
if(label == NULL)
|
||||
{
|
||||
mexPrintf("Empty line at line %d\n",i+1);
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
labels[i] = strtod(label,&endptr);
|
||||
if(endptr == label || *endptr != '\0')
|
||||
{
|
||||
mexPrintf("Wrong input format at line %d\n",i+1);
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
|
||||
// features
|
||||
while(1)
|
||||
{
|
||||
idx = strtok(NULL,":");
|
||||
val = strtok(NULL," \t");
|
||||
if(val == NULL)
|
||||
break;
|
||||
|
||||
ir[k] = (mwIndex) (strtol(idx,&endptr,10) - min_index); // precomputed kernel has <index> start from 0
|
||||
|
||||
errno = 0;
|
||||
samples[k] = strtod(val,&endptr);
|
||||
if (endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr)))
|
||||
{
|
||||
mexPrintf("Wrong input format at line %d\n",i+1);
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
++k;
|
||||
}
|
||||
}
|
||||
jc[l] = k;
|
||||
|
||||
fclose(fp);
|
||||
free(line);
|
||||
|
||||
{
|
||||
mxArray *rhs[1], *lhs[1];
|
||||
rhs[0] = plhs[1];
|
||||
if(mexCallMATLAB(1, lhs, 1, rhs, "transpose"))
|
||||
{
|
||||
mexPrintf("Error: cannot transpose problem\n");
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
plhs[1] = lhs[0];
|
||||
}
|
||||
}
|
||||
|
||||
void mexFunction( int nlhs, mxArray *plhs[],
|
||||
int nrhs, const mxArray *prhs[] )
|
||||
{
|
||||
#define filename_size 256
|
||||
|
||||
char filename[filename_size];
|
||||
|
||||
if(nrhs != 1 || nlhs != 2)
|
||||
{
|
||||
exit_with_help();
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
|
||||
if(mxGetString(prhs[0], filename, filename_size) == 1){
|
||||
mexPrintf("Error: wrong or too long filename\n");
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
|
||||
read_problem(filename, nlhs, plhs);
|
||||
|
||||
return;
|
||||
}
|
||||
|
119
liblinear-2.49/matlab/libsvmwrite.c
Normal file
119
liblinear-2.49/matlab/libsvmwrite.c
Normal file
@@ -0,0 +1,119 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "mex.h"
|
||||
|
||||
#ifdef MX_API_VER
|
||||
#if MX_API_VER < 0x07030000
|
||||
typedef int mwIndex;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
void exit_with_help()
|
||||
{
|
||||
mexPrintf(
|
||||
"Usage: libsvmwrite('filename', label_vector, instance_matrix);\n"
|
||||
);
|
||||
}
|
||||
|
||||
static void fake_answer(int nlhs, mxArray *plhs[])
|
||||
{
|
||||
int i;
|
||||
for(i=0;i<nlhs;i++)
|
||||
plhs[i] = mxCreateDoubleMatrix(0, 0, mxREAL);
|
||||
}
|
||||
|
||||
void libsvmwrite(const char *filename, const mxArray *label_vec, const mxArray *instance_mat)
|
||||
{
|
||||
FILE *fp = fopen(filename,"w");
|
||||
mwIndex *ir, *jc, k, low, high;
|
||||
size_t i, l, label_vector_row_num;
|
||||
double *samples, *labels;
|
||||
mxArray *instance_mat_col; // instance sparse matrix in column format
|
||||
|
||||
if(fp ==NULL)
|
||||
{
|
||||
mexPrintf("can't open output file %s\n",filename);
|
||||
return;
|
||||
}
|
||||
|
||||
// transpose instance matrix
|
||||
{
|
||||
mxArray *prhs[1], *plhs[1];
|
||||
prhs[0] = mxDuplicateArray(instance_mat);
|
||||
if(mexCallMATLAB(1, plhs, 1, prhs, "transpose"))
|
||||
{
|
||||
mexPrintf("Error: cannot transpose instance matrix\n");
|
||||
return;
|
||||
}
|
||||
instance_mat_col = plhs[0];
|
||||
mxDestroyArray(prhs[0]);
|
||||
}
|
||||
|
||||
// the number of instance
|
||||
l = mxGetN(instance_mat_col);
|
||||
label_vector_row_num = mxGetM(label_vec);
|
||||
|
||||
if(label_vector_row_num!=l)
|
||||
{
|
||||
mexPrintf("Length of label vector does not match # of instances.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
// each column is one instance
|
||||
labels = mxGetPr(label_vec);
|
||||
samples = mxGetPr(instance_mat_col);
|
||||
ir = mxGetIr(instance_mat_col);
|
||||
jc = mxGetJc(instance_mat_col);
|
||||
|
||||
for(i=0;i<l;i++)
|
||||
{
|
||||
fprintf(fp,"%.17g", labels[i]);
|
||||
|
||||
low = jc[i], high = jc[i+1];
|
||||
for(k=low;k<high;k++)
|
||||
fprintf(fp," %lu:%g", (size_t)ir[k]+1, samples[k]);
|
||||
|
||||
fprintf(fp,"\n");
|
||||
}
|
||||
|
||||
fclose(fp);
|
||||
return;
|
||||
}
|
||||
|
||||
void mexFunction( int nlhs, mxArray *plhs[],
|
||||
int nrhs, const mxArray *prhs[] )
|
||||
{
|
||||
if(nlhs > 0)
|
||||
{
|
||||
exit_with_help();
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
|
||||
// Transform the input Matrix to libsvm format
|
||||
if(nrhs == 3)
|
||||
{
|
||||
char filename[256];
|
||||
if(!mxIsDouble(prhs[1]) || !mxIsDouble(prhs[2]))
|
||||
{
|
||||
mexPrintf("Error: label vector and instance matrix must be double\n");
|
||||
return;
|
||||
}
|
||||
|
||||
mxGetString(prhs[0], filename, mxGetN(prhs[0])+1);
|
||||
|
||||
if(mxIsSparse(prhs[2]))
|
||||
libsvmwrite(filename, prhs[1], prhs[2]);
|
||||
else
|
||||
{
|
||||
mexPrintf("Instance_matrix must be sparse\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
exit_with_help();
|
||||
return;
|
||||
}
|
||||
}
|
190
liblinear-2.49/matlab/linear_model_matlab.c
Normal file
190
liblinear-2.49/matlab/linear_model_matlab.c
Normal file
@@ -0,0 +1,190 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "linear.h"
|
||||
|
||||
#include "mex.h"
|
||||
|
||||
#ifdef MX_API_VER
|
||||
#if MX_API_VER < 0x07030000
|
||||
typedef int mwIndex;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define Malloc(type,n) (type *)malloc((n)*sizeof(type))
|
||||
|
||||
#define NUM_OF_RETURN_FIELD 7
|
||||
|
||||
static const char *field_names[] = {
|
||||
"Parameters",
|
||||
"nr_class",
|
||||
"nr_feature",
|
||||
"bias",
|
||||
"Label",
|
||||
"w",
|
||||
"rho",
|
||||
};
|
||||
|
||||
const char *model_to_matlab_structure(mxArray *plhs[], struct model *model_)
|
||||
{
|
||||
int i;
|
||||
int nr_w;
|
||||
double *ptr;
|
||||
mxArray *return_model, **rhs;
|
||||
int out_id = 0;
|
||||
int n, w_size;
|
||||
|
||||
rhs = (mxArray **)mxMalloc(sizeof(mxArray *)*NUM_OF_RETURN_FIELD);
|
||||
|
||||
// Parameters
|
||||
// for now, only solver_type is needed
|
||||
rhs[out_id] = mxCreateDoubleMatrix(1, 1, mxREAL);
|
||||
ptr = mxGetPr(rhs[out_id]);
|
||||
ptr[0] = model_->param.solver_type;
|
||||
out_id++;
|
||||
|
||||
// nr_class
|
||||
rhs[out_id] = mxCreateDoubleMatrix(1, 1, mxREAL);
|
||||
ptr = mxGetPr(rhs[out_id]);
|
||||
ptr[0] = model_->nr_class;
|
||||
out_id++;
|
||||
|
||||
if(model_->nr_class==2 && model_->param.solver_type != MCSVM_CS)
|
||||
nr_w=1;
|
||||
else
|
||||
nr_w=model_->nr_class;
|
||||
|
||||
// nr_feature
|
||||
rhs[out_id] = mxCreateDoubleMatrix(1, 1, mxREAL);
|
||||
ptr = mxGetPr(rhs[out_id]);
|
||||
ptr[0] = model_->nr_feature;
|
||||
out_id++;
|
||||
|
||||
// bias
|
||||
rhs[out_id] = mxCreateDoubleMatrix(1, 1, mxREAL);
|
||||
ptr = mxGetPr(rhs[out_id]);
|
||||
ptr[0] = model_->bias;
|
||||
out_id++;
|
||||
|
||||
if(model_->bias>=0)
|
||||
n=model_->nr_feature+1;
|
||||
else
|
||||
n=model_->nr_feature;
|
||||
|
||||
w_size = n;
|
||||
// Label
|
||||
if(model_->label)
|
||||
{
|
||||
rhs[out_id] = mxCreateDoubleMatrix(model_->nr_class, 1, mxREAL);
|
||||
ptr = mxGetPr(rhs[out_id]);
|
||||
for(i = 0; i < model_->nr_class; i++)
|
||||
ptr[i] = model_->label[i];
|
||||
}
|
||||
else
|
||||
rhs[out_id] = mxCreateDoubleMatrix(0, 0, mxREAL);
|
||||
out_id++;
|
||||
|
||||
// w
|
||||
rhs[out_id] = mxCreateDoubleMatrix(nr_w, w_size, mxREAL);
|
||||
ptr = mxGetPr(rhs[out_id]);
|
||||
for(i = 0; i < w_size*nr_w; i++)
|
||||
ptr[i]=model_->w[i];
|
||||
out_id++;
|
||||
|
||||
// rho
|
||||
rhs[out_id] = mxCreateDoubleMatrix(1, 1, mxREAL);
|
||||
ptr = mxGetPr(rhs[out_id]);
|
||||
ptr[0] = model_->rho;
|
||||
out_id++;
|
||||
|
||||
/* Create a struct matrix contains NUM_OF_RETURN_FIELD fields */
|
||||
return_model = mxCreateStructMatrix(1, 1, NUM_OF_RETURN_FIELD, field_names);
|
||||
|
||||
/* Fill struct matrix with input arguments */
|
||||
for(i = 0; i < NUM_OF_RETURN_FIELD; i++)
|
||||
mxSetField(return_model,0,field_names[i],mxDuplicateArray(rhs[i]));
|
||||
/* return */
|
||||
plhs[0] = return_model;
|
||||
mxFree(rhs);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const char *matlab_matrix_to_model(struct model *model_, const mxArray *matlab_struct)
|
||||
{
|
||||
int i, num_of_fields;
|
||||
int nr_w;
|
||||
double *ptr;
|
||||
int id = 0;
|
||||
int n, w_size;
|
||||
mxArray **rhs;
|
||||
|
||||
num_of_fields = mxGetNumberOfFields(matlab_struct);
|
||||
rhs = (mxArray **) mxMalloc(sizeof(mxArray *)*num_of_fields);
|
||||
|
||||
for(i=0;i<num_of_fields;i++)
|
||||
rhs[i] = mxGetFieldByNumber(matlab_struct, 0, i);
|
||||
|
||||
model_->nr_class=0;
|
||||
nr_w=0;
|
||||
model_->nr_feature=0;
|
||||
model_->w=NULL;
|
||||
model_->label=NULL;
|
||||
|
||||
// Parameters
|
||||
ptr = mxGetPr(rhs[id]);
|
||||
model_->param.solver_type = (int)ptr[0];
|
||||
id++;
|
||||
|
||||
// nr_class
|
||||
ptr = mxGetPr(rhs[id]);
|
||||
model_->nr_class = (int)ptr[0];
|
||||
id++;
|
||||
|
||||
if(model_->nr_class==2 && model_->param.solver_type != MCSVM_CS)
|
||||
nr_w=1;
|
||||
else
|
||||
nr_w=model_->nr_class;
|
||||
|
||||
// nr_feature
|
||||
ptr = mxGetPr(rhs[id]);
|
||||
model_->nr_feature = (int)ptr[0];
|
||||
id++;
|
||||
|
||||
// bias
|
||||
ptr = mxGetPr(rhs[id]);
|
||||
model_->bias = ptr[0];
|
||||
id++;
|
||||
|
||||
if(model_->bias>=0)
|
||||
n=model_->nr_feature+1;
|
||||
else
|
||||
n=model_->nr_feature;
|
||||
w_size = n;
|
||||
|
||||
// Label
|
||||
if(mxIsEmpty(rhs[id]) == 0)
|
||||
{
|
||||
model_->label = Malloc(int, model_->nr_class);
|
||||
ptr = mxGetPr(rhs[id]);
|
||||
for(i=0;i<model_->nr_class;i++)
|
||||
model_->label[i] = (int)ptr[i];
|
||||
}
|
||||
id++;
|
||||
|
||||
// w
|
||||
ptr = mxGetPr(rhs[id]);
|
||||
model_->w=Malloc(double, w_size*nr_w);
|
||||
for(i = 0; i < w_size*nr_w; i++)
|
||||
model_->w[i]=ptr[i];
|
||||
id++;
|
||||
|
||||
// rho
|
||||
ptr = mxGetPr(rhs[id]);
|
||||
model_->rho = ptr[0];
|
||||
id++;
|
||||
|
||||
mxFree(rhs);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
2
liblinear-2.49/matlab/linear_model_matlab.h
Normal file
2
liblinear-2.49/matlab/linear_model_matlab.h
Normal file
@@ -0,0 +1,2 @@
|
||||
const char *model_to_matlab_structure(mxArray *plhs[], struct model *model_);
|
||||
const char *matlab_matrix_to_model(struct model *model_, const mxArray *matlab_struct);
|
22
liblinear-2.49/matlab/make.m
Normal file
22
liblinear-2.49/matlab/make.m
Normal file
@@ -0,0 +1,22 @@
|
||||
% This make.m is for MATLAB and OCTAVE under Windows, Mac, and Unix
|
||||
function make()
|
||||
try
|
||||
% This part is for OCTAVE
|
||||
if(exist('OCTAVE_VERSION', 'builtin'))
|
||||
mex libsvmread.c
|
||||
mex libsvmwrite.c
|
||||
mex -I.. train.c linear_model_matlab.c ../linear.cpp ../newton.cpp ../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
|
||||
mex -I.. predict.c linear_model_matlab.c ../linear.cpp ../newton.cpp ../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
|
||||
% This part is for MATLAB
|
||||
% Add -largeArrayDims on 64-bit machines of MATLAB
|
||||
else
|
||||
mex -largeArrayDims libsvmread.c
|
||||
mex -largeArrayDims libsvmwrite.c
|
||||
mex -I.. -largeArrayDims train.c linear_model_matlab.c ../linear.cpp ../newton.cpp ../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
|
||||
mex -I.. -largeArrayDims predict.c linear_model_matlab.c ../linear.cpp ../newton.cpp ../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
|
||||
end
|
||||
catch err
|
||||
fprintf('Error: %s failed (line %d)\n', err.stack(1).file, err.stack(1).line);
|
||||
disp(err.message);
|
||||
fprintf('=> Please check README for detailed instructions.\n');
|
||||
end
|
341
liblinear-2.49/matlab/predict.c
Normal file
341
liblinear-2.49/matlab/predict.c
Normal file
@@ -0,0 +1,341 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "linear.h"
|
||||
|
||||
#include "mex.h"
|
||||
#include "linear_model_matlab.h"
|
||||
|
||||
#ifdef MX_API_VER
|
||||
#if MX_API_VER < 0x07030000
|
||||
typedef int mwIndex;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define CMD_LEN 2048
|
||||
|
||||
#define Malloc(type,n) (type *)malloc((n)*sizeof(type))
|
||||
|
||||
int print_null(const char *s,...) {return 0;}
|
||||
int (*info)(const char *fmt,...);
|
||||
|
||||
int col_format_flag;
|
||||
|
||||
void read_sparse_instance(const mxArray *prhs, int index, struct feature_node *x, int feature_number, double bias)
|
||||
{
|
||||
int j;
|
||||
mwIndex *ir, *jc, low, high, i;
|
||||
double *samples;
|
||||
|
||||
ir = mxGetIr(prhs);
|
||||
jc = mxGetJc(prhs);
|
||||
samples = mxGetPr(prhs);
|
||||
|
||||
// each column is one instance
|
||||
j = 0;
|
||||
low = jc[index], high = jc[index+1];
|
||||
for(i=low; i<high && (int) (ir[i])<feature_number; i++)
|
||||
{
|
||||
x[j].index = (int) ir[i]+1;
|
||||
x[j].value = samples[i];
|
||||
j++;
|
||||
}
|
||||
if(bias>=0)
|
||||
{
|
||||
x[j].index = feature_number+1;
|
||||
x[j].value = bias;
|
||||
j++;
|
||||
}
|
||||
x[j].index = -1;
|
||||
}
|
||||
|
||||
static void fake_answer(int nlhs, mxArray *plhs[])
|
||||
{
|
||||
int i;
|
||||
for(i=0;i<nlhs;i++)
|
||||
plhs[i] = mxCreateDoubleMatrix(0, 0, mxREAL);
|
||||
}
|
||||
|
||||
void do_predict(int nlhs, mxArray *plhs[], const mxArray *prhs[], struct model *model_, const int predict_probability_flag)
|
||||
{
|
||||
int label_vector_row_num, label_vector_col_num;
|
||||
int feature_number, testing_instance_number;
|
||||
int instance_index;
|
||||
double *ptr_label, *ptr_predict_label;
|
||||
double *ptr_prob_estimates, *ptr_dec_values, *ptr;
|
||||
struct feature_node *x;
|
||||
mxArray *pplhs[1]; // instance sparse matrix in row format
|
||||
mxArray *tplhs[3]; // temporary storage for plhs[]
|
||||
|
||||
int correct = 0;
|
||||
int total = 0;
|
||||
double error = 0;
|
||||
double sump = 0, sumt = 0, sumpp = 0, sumtt = 0, sumpt = 0;
|
||||
|
||||
int nr_class=get_nr_class(model_);
|
||||
int nr_w;
|
||||
double *prob_estimates=NULL;
|
||||
|
||||
if(nr_class==2 && model_->param.solver_type!=MCSVM_CS)
|
||||
nr_w=1;
|
||||
else
|
||||
nr_w=nr_class;
|
||||
|
||||
// prhs[1] = testing instance matrix
|
||||
feature_number = get_nr_feature(model_);
|
||||
testing_instance_number = (int) mxGetM(prhs[1]);
|
||||
if(col_format_flag)
|
||||
{
|
||||
feature_number = (int) mxGetM(prhs[1]);
|
||||
testing_instance_number = (int) mxGetN(prhs[1]);
|
||||
}
|
||||
|
||||
label_vector_row_num = (int) mxGetM(prhs[0]);
|
||||
label_vector_col_num = (int) mxGetN(prhs[0]);
|
||||
|
||||
if(label_vector_row_num!=testing_instance_number)
|
||||
{
|
||||
mexPrintf("Length of label vector does not match # of instances.\n");
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
if(label_vector_col_num!=1)
|
||||
{
|
||||
mexPrintf("label (1st argument) should be a vector (# of column is 1).\n");
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
|
||||
ptr_label = mxGetPr(prhs[0]);
|
||||
|
||||
// transpose instance matrix
|
||||
if(col_format_flag)
|
||||
pplhs[0] = (mxArray *)prhs[1];
|
||||
else
|
||||
{
|
||||
mxArray *pprhs[1];
|
||||
pprhs[0] = mxDuplicateArray(prhs[1]);
|
||||
if(mexCallMATLAB(1, pplhs, 1, pprhs, "transpose"))
|
||||
{
|
||||
mexPrintf("Error: cannot transpose testing instance matrix\n");
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
prob_estimates = Malloc(double, nr_class);
|
||||
|
||||
tplhs[0] = mxCreateDoubleMatrix(testing_instance_number, 1, mxREAL);
|
||||
if(predict_probability_flag)
|
||||
tplhs[2] = mxCreateDoubleMatrix(testing_instance_number, nr_class, mxREAL);
|
||||
else
|
||||
tplhs[2] = mxCreateDoubleMatrix(testing_instance_number, nr_w, mxREAL);
|
||||
|
||||
ptr_predict_label = mxGetPr(tplhs[0]);
|
||||
ptr_prob_estimates = mxGetPr(tplhs[2]);
|
||||
ptr_dec_values = mxGetPr(tplhs[2]);
|
||||
x = Malloc(struct feature_node, feature_number+2);
|
||||
for(instance_index=0;instance_index<testing_instance_number;instance_index++)
|
||||
{
|
||||
int i;
|
||||
double target_label, predict_label;
|
||||
|
||||
target_label = ptr_label[instance_index];
|
||||
|
||||
// prhs[1] and prhs[1]^T are sparse
|
||||
read_sparse_instance(pplhs[0], instance_index, x, feature_number, model_->bias);
|
||||
|
||||
if(predict_probability_flag)
|
||||
{
|
||||
predict_label = predict_probability(model_, x, prob_estimates);
|
||||
ptr_predict_label[instance_index] = predict_label;
|
||||
for(i=0;i<nr_class;i++)
|
||||
ptr_prob_estimates[instance_index + i * testing_instance_number] = prob_estimates[i];
|
||||
}
|
||||
else
|
||||
{
|
||||
double *dec_values = Malloc(double, nr_class);
|
||||
predict_label = predict_values(model_, x, dec_values);
|
||||
ptr_predict_label[instance_index] = predict_label;
|
||||
|
||||
for(i=0;i<nr_w;i++)
|
||||
ptr_dec_values[instance_index + i * testing_instance_number] = dec_values[i];
|
||||
free(dec_values);
|
||||
}
|
||||
|
||||
if(predict_label == target_label)
|
||||
++correct;
|
||||
error += (predict_label-target_label)*(predict_label-target_label);
|
||||
sump += predict_label;
|
||||
sumt += target_label;
|
||||
sumpp += predict_label*predict_label;
|
||||
sumtt += target_label*target_label;
|
||||
sumpt += predict_label*target_label;
|
||||
|
||||
++total;
|
||||
}
|
||||
|
||||
if(check_regression_model(model_))
|
||||
{
|
||||
info("Mean squared error = %g (regression)\n",error/total);
|
||||
info("Squared correlation coefficient = %g (regression)\n",
|
||||
((total*sumpt-sump*sumt)*(total*sumpt-sump*sumt))/
|
||||
((total*sumpp-sump*sump)*(total*sumtt-sumt*sumt))
|
||||
);
|
||||
}
|
||||
else
|
||||
info("Accuracy = %g%% (%d/%d)\n", (double) correct/total*100,correct,total);
|
||||
|
||||
// return accuracy, mean squared error, squared correlation coefficient
|
||||
tplhs[1] = mxCreateDoubleMatrix(3, 1, mxREAL);
|
||||
ptr = mxGetPr(tplhs[1]);
|
||||
ptr[0] = (double)correct/total*100;
|
||||
ptr[1] = error/total;
|
||||
ptr[2] = ((total*sumpt-sump*sumt)*(total*sumpt-sump*sumt))/
|
||||
((total*sumpp-sump*sump)*(total*sumtt-sumt*sumt));
|
||||
|
||||
free(x);
|
||||
if(prob_estimates != NULL)
|
||||
free(prob_estimates);
|
||||
|
||||
switch(nlhs)
|
||||
{
|
||||
case 3:
|
||||
plhs[2] = tplhs[2];
|
||||
plhs[1] = tplhs[1];
|
||||
case 1:
|
||||
case 0:
|
||||
plhs[0] = tplhs[0];
|
||||
}
|
||||
}
|
||||
|
||||
void exit_with_help()
|
||||
{
|
||||
mexPrintf(
|
||||
"Usage: [predicted_label, accuracy, decision_values/prob_estimates] = predict(testing_label_vector, testing_instance_matrix, model, 'liblinear_options','col')\n"
|
||||
" [predicted_label] = predict(testing_label_vector, testing_instance_matrix, model, 'liblinear_options','col')\n"
|
||||
"liblinear_options:\n"
|
||||
"-b probability_estimates: whether to output probability estimates, 0 or 1 (default 0); currently for logistic regression only\n"
|
||||
"-q quiet mode (no outputs)\n"
|
||||
"col: if 'col' is setted testing_instance_matrix is parsed in column format, otherwise is in row format\n"
|
||||
"Returns:\n"
|
||||
" predicted_label: prediction output vector.\n"
|
||||
" accuracy: a vector with accuracy, mean squared error, squared correlation coefficient.\n"
|
||||
" prob_estimates: If selected, probability estimate vector.\n"
|
||||
);
|
||||
}
|
||||
|
||||
void mexFunction( int nlhs, mxArray *plhs[],
|
||||
int nrhs, const mxArray *prhs[] )
|
||||
{
|
||||
int prob_estimate_flag = 0;
|
||||
struct model *model_;
|
||||
char cmd[CMD_LEN];
|
||||
info = &mexPrintf;
|
||||
col_format_flag = 0;
|
||||
|
||||
if(nlhs == 2 || nlhs > 3 || nrhs > 5 || nrhs < 3)
|
||||
{
|
||||
exit_with_help();
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
if(nrhs == 5)
|
||||
{
|
||||
mxGetString(prhs[4], cmd, mxGetN(prhs[4])+1);
|
||||
if(strcmp(cmd, "col") == 0)
|
||||
{
|
||||
col_format_flag = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if(!mxIsDouble(prhs[0]) || !mxIsDouble(prhs[1])) {
|
||||
mexPrintf("Error: label vector and instance matrix must be double\n");
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
|
||||
if(mxIsStruct(prhs[2]))
|
||||
{
|
||||
const char *error_msg;
|
||||
|
||||
// parse options
|
||||
if(nrhs>=4)
|
||||
{
|
||||
int i, argc = 1;
|
||||
char *argv[CMD_LEN/2];
|
||||
|
||||
// put options in argv[]
|
||||
mxGetString(prhs[3], cmd, mxGetN(prhs[3]) + 1);
|
||||
if((argv[argc] = strtok(cmd, " ")) != NULL)
|
||||
while((argv[++argc] = strtok(NULL, " ")) != NULL)
|
||||
;
|
||||
|
||||
for(i=1;i<argc;i++)
|
||||
{
|
||||
if(argv[i][0] != '-') break;
|
||||
++i;
|
||||
if(i>=argc && argv[i-1][1] != 'q')
|
||||
{
|
||||
exit_with_help();
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
switch(argv[i-1][1])
|
||||
{
|
||||
case 'b':
|
||||
prob_estimate_flag = atoi(argv[i]);
|
||||
break;
|
||||
case 'q':
|
||||
info = &print_null;
|
||||
i--;
|
||||
break;
|
||||
default:
|
||||
mexPrintf("unknown option\n");
|
||||
exit_with_help();
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
model_ = Malloc(struct model, 1);
|
||||
error_msg = matlab_matrix_to_model(model_, prhs[2]);
|
||||
if(error_msg)
|
||||
{
|
||||
mexPrintf("Error: can't read model: %s\n", error_msg);
|
||||
free_and_destroy_model(&model_);
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
|
||||
if(prob_estimate_flag)
|
||||
{
|
||||
if(!check_probability_model(model_))
|
||||
{
|
||||
mexPrintf("probability output is only supported for logistic regression\n");
|
||||
prob_estimate_flag=0;
|
||||
}
|
||||
}
|
||||
|
||||
if(mxIsSparse(prhs[1]))
|
||||
do_predict(nlhs, plhs, prhs, model_, prob_estimate_flag);
|
||||
else
|
||||
{
|
||||
mexPrintf("Testing_instance_matrix must be sparse; "
|
||||
"use sparse(Testing_instance_matrix) first\n");
|
||||
fake_answer(nlhs, plhs);
|
||||
}
|
||||
|
||||
// destroy model_
|
||||
free_and_destroy_model(&model_);
|
||||
}
|
||||
else
|
||||
{
|
||||
mexPrintf("model file should be a struct array\n");
|
||||
fake_answer(nlhs, plhs);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
523
liblinear-2.49/matlab/train.c
Normal file
523
liblinear-2.49/matlab/train.c
Normal file
@@ -0,0 +1,523 @@
|
||||
#include <math.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <ctype.h>
|
||||
#include "linear.h"
|
||||
|
||||
#include "mex.h"
|
||||
#include "linear_model_matlab.h"
|
||||
|
||||
#ifdef MX_API_VER
|
||||
#if MX_API_VER < 0x07030000
|
||||
typedef int mwIndex;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define CMD_LEN 2048
|
||||
#define Malloc(type,n) (type *)malloc((n)*sizeof(type))
|
||||
#define INF HUGE_VAL
|
||||
|
||||
void print_null(const char *s) {}
|
||||
void print_string_matlab(const char *s) {mexPrintf(s);}
|
||||
|
||||
void exit_with_help()
|
||||
{
|
||||
mexPrintf(
|
||||
"Usage: model = train(training_label_vector, training_instance_matrix, 'liblinear_options', 'col');\n"
|
||||
"liblinear_options:\n"
|
||||
"-s type : set type of solver (default 1)\n"
|
||||
" for multi-class classification\n"
|
||||
" 0 -- L2-regularized logistic regression (primal)\n"
|
||||
" 1 -- L2-regularized L2-loss support vector classification (dual)\n"
|
||||
" 2 -- L2-regularized L2-loss support vector classification (primal)\n"
|
||||
" 3 -- L2-regularized L1-loss support vector classification (dual)\n"
|
||||
" 4 -- support vector classification by Crammer and Singer\n"
|
||||
" 5 -- L1-regularized L2-loss support vector classification\n"
|
||||
" 6 -- L1-regularized logistic regression\n"
|
||||
" 7 -- L2-regularized logistic regression (dual)\n"
|
||||
" for regression\n"
|
||||
" 11 -- L2-regularized L2-loss support vector regression (primal)\n"
|
||||
" 12 -- L2-regularized L2-loss support vector regression (dual)\n"
|
||||
" 13 -- L2-regularized L1-loss support vector regression (dual)\n"
|
||||
" for outlier detection\n"
|
||||
" 21 -- one-class support vector machine (dual)\n"
|
||||
"-c cost : set the parameter C (default 1)\n"
|
||||
"-p epsilon : set the epsilon in loss function of SVR (default 0.1)\n"
|
||||
"-n nu : set the parameter nu of one-class SVM (default 0.5)\n"
|
||||
"-e epsilon : set tolerance of termination criterion\n"
|
||||
" -s 0 and 2\n"
|
||||
" |f'(w)|_2 <= eps*min(pos,neg)/l*|f'(w0)|_2,\n"
|
||||
" where f is the primal function and pos/neg are # of\n"
|
||||
" positive/negative data (default 0.01)\n"
|
||||
" -s 11\n"
|
||||
" |f'(w)|_2 <= eps*|f'(w0)|_2 (default 0.0001)\n"
|
||||
" -s 1, 3, 4, 7, and 21\n"
|
||||
" Dual maximal violation <= eps; similar to libsvm (default 0.1 except 0.01 for -s 21)\n"
|
||||
" -s 5 and 6\n"
|
||||
" |f'(w)|_1 <= eps*min(pos,neg)/l*|f'(w0)|_1,\n"
|
||||
" where f is the primal function (default 0.01)\n"
|
||||
" -s 12 and 13\n"
|
||||
" |f'(alpha)|_1 <= eps |f'(alpha0)|,\n"
|
||||
" where f is the dual function (default 0.1)\n"
|
||||
"-B bias : if bias >= 0, instance x becomes [x; bias]; if < 0, no bias term added (default -1)\n"
|
||||
"-R : not regularize the bias; must with -B 1 to have the bias; DON'T use this unless you know what it is\n"
|
||||
" (for -s 0, 2, 5, 6, 11)\n"
|
||||
"-wi weight: weights adjust the parameter C of different classes (see README for details)\n"
|
||||
"-v n: n-fold cross validation mode\n"
|
||||
"-C : find parameters (C for -s 0, 2 and C, p for -s 11)\n"
|
||||
"-q : quiet mode (no outputs)\n"
|
||||
"col:\n"
|
||||
" if 'col' is setted, training_instance_matrix is parsed in column format, otherwise is in row format\n"
|
||||
);
|
||||
}
|
||||
|
||||
// liblinear arguments
|
||||
struct parameter param; // set by parse_command_line
|
||||
struct problem prob; // set by read_problem
|
||||
struct model *model_;
|
||||
struct feature_node *x_space;
|
||||
int flag_cross_validation;
|
||||
int flag_find_parameters;
|
||||
int flag_C_specified;
|
||||
int flag_p_specified;
|
||||
int flag_solver_specified;
|
||||
int col_format_flag;
|
||||
int nr_fold;
|
||||
double bias;
|
||||
|
||||
|
||||
void do_find_parameters(double *best_C, double *best_p, double *best_score)
|
||||
{
|
||||
double start_C, start_p;
|
||||
if (flag_C_specified)
|
||||
start_C = param.C;
|
||||
else
|
||||
start_C = -1.0;
|
||||
if (flag_p_specified)
|
||||
start_p = param.p;
|
||||
else
|
||||
start_p = -1.0;
|
||||
|
||||
mexPrintf("Doing parameter search with %d-fold cross validation.\n", nr_fold);
|
||||
find_parameters(&prob, ¶m, nr_fold, start_C, start_p, best_C, best_p, best_score);
|
||||
|
||||
if(param.solver_type == L2R_LR || param.solver_type == L2R_L2LOSS_SVC)
|
||||
mexPrintf("Best C = %g CV accuracy = %g%%\n", *best_C, 100.0**best_score);
|
||||
else if(param.solver_type == L2R_L2LOSS_SVR)
|
||||
mexPrintf("Best C = %g Best p = %g CV MSE = %g\n", *best_C, *best_p, *best_score);
|
||||
}
|
||||
|
||||
|
||||
double do_cross_validation()
|
||||
{
|
||||
int i;
|
||||
int total_correct = 0;
|
||||
double total_error = 0;
|
||||
double sumv = 0, sumy = 0, sumvv = 0, sumyy = 0, sumvy = 0;
|
||||
double *target = Malloc(double, prob.l);
|
||||
double retval = 0.0;
|
||||
|
||||
cross_validation(&prob,¶m,nr_fold,target);
|
||||
if(param.solver_type == L2R_L2LOSS_SVR ||
|
||||
param.solver_type == L2R_L1LOSS_SVR_DUAL ||
|
||||
param.solver_type == L2R_L2LOSS_SVR_DUAL)
|
||||
{
|
||||
for(i=0;i<prob.l;i++)
|
||||
{
|
||||
double y = prob.y[i];
|
||||
double v = target[i];
|
||||
total_error += (v-y)*(v-y);
|
||||
sumv += v;
|
||||
sumy += y;
|
||||
sumvv += v*v;
|
||||
sumyy += y*y;
|
||||
sumvy += v*y;
|
||||
}
|
||||
mexPrintf("Cross Validation Mean squared error = %g\n",total_error/prob.l);
|
||||
mexPrintf("Cross Validation Squared correlation coefficient = %g\n",
|
||||
((prob.l*sumvy-sumv*sumy)*(prob.l*sumvy-sumv*sumy))/
|
||||
((prob.l*sumvv-sumv*sumv)*(prob.l*sumyy-sumy*sumy))
|
||||
);
|
||||
retval = total_error/prob.l;
|
||||
}
|
||||
else
|
||||
{
|
||||
for(i=0;i<prob.l;i++)
|
||||
if(target[i] == prob.y[i])
|
||||
++total_correct;
|
||||
mexPrintf("Cross Validation Accuracy = %g%%\n",100.0*total_correct/prob.l);
|
||||
retval = 100.0*total_correct/prob.l;
|
||||
}
|
||||
|
||||
free(target);
|
||||
return retval;
|
||||
}
|
||||
|
||||
// nrhs should be 3
|
||||
int parse_command_line(int nrhs, const mxArray *prhs[], char *model_file_name)
|
||||
{
|
||||
int i, argc = 1;
|
||||
char cmd[CMD_LEN];
|
||||
char *argv[CMD_LEN/2];
|
||||
void (*print_func)(const char *) = print_string_matlab; // default printing to matlab display
|
||||
|
||||
// default values
|
||||
param.solver_type = L2R_L2LOSS_SVC_DUAL;
|
||||
param.C = 1;
|
||||
param.p = 0.1;
|
||||
param.nu = 0.5;
|
||||
param.eps = INF; // see setting below
|
||||
param.nr_weight = 0;
|
||||
param.weight_label = NULL;
|
||||
param.weight = NULL;
|
||||
param.init_sol = NULL;
|
||||
param.regularize_bias = 1;
|
||||
flag_cross_validation = 0;
|
||||
col_format_flag = 0;
|
||||
flag_C_specified = 0;
|
||||
flag_p_specified = 0;
|
||||
flag_solver_specified = 0;
|
||||
flag_find_parameters = 0;
|
||||
bias = -1;
|
||||
|
||||
|
||||
if(nrhs <= 1)
|
||||
return 1;
|
||||
|
||||
if(nrhs == 4)
|
||||
{
|
||||
mxGetString(prhs[3], cmd, mxGetN(prhs[3])+1);
|
||||
if(strcmp(cmd, "col") == 0)
|
||||
col_format_flag = 1;
|
||||
}
|
||||
|
||||
// put options in argv[]
|
||||
if(nrhs > 2)
|
||||
{
|
||||
mxGetString(prhs[2], cmd, mxGetN(prhs[2]) + 1);
|
||||
if((argv[argc] = strtok(cmd, " ")) != NULL)
|
||||
while((argv[++argc] = strtok(NULL, " ")) != NULL)
|
||||
;
|
||||
}
|
||||
|
||||
// parse options
|
||||
for(i=1;i<argc;i++)
|
||||
{
|
||||
if(argv[i][0] != '-') break;
|
||||
++i;
|
||||
if(i>=argc && argv[i-1][1] != 'q' && argv[i-1][1] != 'C'
|
||||
&& argv[i-1][1] != 'R') // since options -q and -C have no parameter
|
||||
return 1;
|
||||
switch(argv[i-1][1])
|
||||
{
|
||||
case 's':
|
||||
param.solver_type = atoi(argv[i]);
|
||||
flag_solver_specified = 1;
|
||||
break;
|
||||
case 'c':
|
||||
param.C = atof(argv[i]);
|
||||
flag_C_specified = 1;
|
||||
break;
|
||||
case 'p':
|
||||
param.p = atof(argv[i]);
|
||||
flag_p_specified = 1;
|
||||
break;
|
||||
case 'n':
|
||||
param.nu = atof(argv[i]);
|
||||
break;
|
||||
case 'e':
|
||||
param.eps = atof(argv[i]);
|
||||
break;
|
||||
case 'B':
|
||||
bias = atof(argv[i]);
|
||||
break;
|
||||
case 'v':
|
||||
flag_cross_validation = 1;
|
||||
nr_fold = atoi(argv[i]);
|
||||
if(nr_fold < 2)
|
||||
{
|
||||
mexPrintf("n-fold cross validation: n must >= 2\n");
|
||||
return 1;
|
||||
}
|
||||
break;
|
||||
case 'w':
|
||||
++param.nr_weight;
|
||||
param.weight_label = (int *) realloc(param.weight_label,sizeof(int)*param.nr_weight);
|
||||
param.weight = (double *) realloc(param.weight,sizeof(double)*param.nr_weight);
|
||||
param.weight_label[param.nr_weight-1] = atoi(&argv[i-1][2]);
|
||||
param.weight[param.nr_weight-1] = atof(argv[i]);
|
||||
break;
|
||||
case 'q':
|
||||
print_func = &print_null;
|
||||
i--;
|
||||
break;
|
||||
case 'C':
|
||||
flag_find_parameters = 1;
|
||||
i--;
|
||||
break;
|
||||
case 'R':
|
||||
param.regularize_bias = 0;
|
||||
i--;
|
||||
break;
|
||||
default:
|
||||
mexPrintf("unknown option\n");
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
set_print_string_function(print_func);
|
||||
|
||||
// default solver for parameter selection is L2R_L2LOSS_SVC
|
||||
if(flag_find_parameters)
|
||||
{
|
||||
if(!flag_cross_validation)
|
||||
nr_fold = 5;
|
||||
if(!flag_solver_specified)
|
||||
{
|
||||
mexPrintf("Solver not specified. Using -s 2\n");
|
||||
param.solver_type = L2R_L2LOSS_SVC;
|
||||
}
|
||||
else if(param.solver_type != L2R_LR && param.solver_type != L2R_L2LOSS_SVC && param.solver_type != L2R_L2LOSS_SVR)
|
||||
{
|
||||
mexPrintf("Warm-start parameter search only available for -s 0, -s 2 and -s 11\n");
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
if(param.eps == INF)
|
||||
{
|
||||
switch(param.solver_type)
|
||||
{
|
||||
case L2R_LR:
|
||||
case L2R_L2LOSS_SVC:
|
||||
param.eps = 0.01;
|
||||
break;
|
||||
case L2R_L2LOSS_SVR:
|
||||
param.eps = 0.0001;
|
||||
break;
|
||||
case L2R_L2LOSS_SVC_DUAL:
|
||||
case L2R_L1LOSS_SVC_DUAL:
|
||||
case MCSVM_CS:
|
||||
case L2R_LR_DUAL:
|
||||
param.eps = 0.1;
|
||||
break;
|
||||
case L1R_L2LOSS_SVC:
|
||||
case L1R_LR:
|
||||
param.eps = 0.01;
|
||||
break;
|
||||
case L2R_L1LOSS_SVR_DUAL:
|
||||
case L2R_L2LOSS_SVR_DUAL:
|
||||
param.eps = 0.1;
|
||||
break;
|
||||
case ONECLASS_SVM:
|
||||
param.eps = 0.01;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fake_answer(int nlhs, mxArray *plhs[])
|
||||
{
|
||||
int i;
|
||||
for(i=0;i<nlhs;i++)
|
||||
plhs[i] = mxCreateDoubleMatrix(0, 0, mxREAL);
|
||||
}
|
||||
|
||||
int read_problem_sparse(const mxArray *label_vec, const mxArray *instance_mat)
|
||||
{
|
||||
mwIndex *ir, *jc, low, high, k;
|
||||
// using size_t due to the output type of matlab functions
|
||||
size_t i, j, l, elements, max_index, label_vector_row_num;
|
||||
mwSize num_samples;
|
||||
double *samples, *labels;
|
||||
mxArray *instance_mat_col; // instance sparse matrix in column format
|
||||
|
||||
prob.x = NULL;
|
||||
prob.y = NULL;
|
||||
x_space = NULL;
|
||||
|
||||
if(col_format_flag)
|
||||
instance_mat_col = (mxArray *)instance_mat;
|
||||
else
|
||||
{
|
||||
// transpose instance matrix
|
||||
mxArray *prhs[1], *plhs[1];
|
||||
prhs[0] = mxDuplicateArray(instance_mat);
|
||||
if(mexCallMATLAB(1, plhs, 1, prhs, "transpose"))
|
||||
{
|
||||
mexPrintf("Error: cannot transpose training instance matrix\n");
|
||||
return -1;
|
||||
}
|
||||
instance_mat_col = plhs[0];
|
||||
mxDestroyArray(prhs[0]);
|
||||
}
|
||||
|
||||
// the number of instance
|
||||
l = mxGetN(instance_mat_col);
|
||||
label_vector_row_num = mxGetM(label_vec);
|
||||
prob.l = (int) l;
|
||||
|
||||
if(label_vector_row_num!=l)
|
||||
{
|
||||
mexPrintf("Length of label vector does not match # of instances.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// each column is one instance
|
||||
labels = mxGetPr(label_vec);
|
||||
samples = mxGetPr(instance_mat_col);
|
||||
ir = mxGetIr(instance_mat_col);
|
||||
jc = mxGetJc(instance_mat_col);
|
||||
|
||||
num_samples = mxGetNzmax(instance_mat_col);
|
||||
|
||||
elements = num_samples + l*2;
|
||||
max_index = mxGetM(instance_mat_col);
|
||||
|
||||
prob.y = Malloc(double, l);
|
||||
prob.x = Malloc(struct feature_node*, l);
|
||||
x_space = Malloc(struct feature_node, elements);
|
||||
|
||||
prob.bias=bias;
|
||||
|
||||
j = 0;
|
||||
for(i=0;i<l;i++)
|
||||
{
|
||||
prob.x[i] = &x_space[j];
|
||||
prob.y[i] = labels[i];
|
||||
low = jc[i], high = jc[i+1];
|
||||
for(k=low;k<high;k++)
|
||||
{
|
||||
x_space[j].index = (int) ir[k]+1;
|
||||
x_space[j].value = samples[k];
|
||||
j++;
|
||||
}
|
||||
if(prob.bias>=0)
|
||||
{
|
||||
x_space[j].index = (int) max_index+1;
|
||||
x_space[j].value = prob.bias;
|
||||
j++;
|
||||
}
|
||||
x_space[j++].index = -1;
|
||||
}
|
||||
|
||||
if(prob.bias>=0)
|
||||
prob.n = (int) max_index+1;
|
||||
else
|
||||
prob.n = (int) max_index;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Interface function of matlab
|
||||
// now assume prhs[0]: label prhs[1]: features
|
||||
void mexFunction( int nlhs, mxArray *plhs[],
|
||||
int nrhs, const mxArray *prhs[] )
|
||||
{
|
||||
const char *error_msg;
|
||||
// fix random seed to have same results for each run
|
||||
// (for cross validation)
|
||||
srand(1);
|
||||
|
||||
if(nlhs > 1)
|
||||
{
|
||||
exit_with_help();
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
|
||||
// Transform the input Matrix to libsvm format
|
||||
if(nrhs > 1 && nrhs < 5)
|
||||
{
|
||||
int err=0;
|
||||
|
||||
if(!mxIsDouble(prhs[0]) || !mxIsDouble(prhs[1]))
|
||||
{
|
||||
mexPrintf("Error: label vector and instance matrix must be double\n");
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
|
||||
if(mxIsSparse(prhs[0]))
|
||||
{
|
||||
mexPrintf("Error: label vector should not be in sparse format");
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
|
||||
if(parse_command_line(nrhs, prhs, NULL))
|
||||
{
|
||||
exit_with_help();
|
||||
destroy_param(¶m);
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
|
||||
if(mxIsSparse(prhs[1]))
|
||||
err = read_problem_sparse(prhs[0], prhs[1]);
|
||||
else
|
||||
{
|
||||
mexPrintf("Training_instance_matrix must be sparse; "
|
||||
"use sparse(Training_instance_matrix) first\n");
|
||||
destroy_param(¶m);
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
|
||||
// train's original code
|
||||
error_msg = check_parameter(&prob, ¶m);
|
||||
|
||||
if(err || error_msg)
|
||||
{
|
||||
if (error_msg != NULL)
|
||||
mexPrintf("Error: %s\n", error_msg);
|
||||
destroy_param(¶m);
|
||||
free(prob.y);
|
||||
free(prob.x);
|
||||
free(x_space);
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
|
||||
if (flag_find_parameters)
|
||||
{
|
||||
double best_C, best_p, best_score, *ptr;
|
||||
|
||||
do_find_parameters(&best_C, &best_p, &best_score);
|
||||
|
||||
plhs[0] = mxCreateDoubleMatrix(3, 1, mxREAL);
|
||||
ptr = mxGetPr(plhs[0]);
|
||||
ptr[0] = best_C;
|
||||
ptr[1] = best_p;
|
||||
ptr[2] = best_score;
|
||||
}
|
||||
else if(flag_cross_validation)
|
||||
{
|
||||
double *ptr;
|
||||
plhs[0] = mxCreateDoubleMatrix(1, 1, mxREAL);
|
||||
ptr = mxGetPr(plhs[0]);
|
||||
ptr[0] = do_cross_validation();
|
||||
}
|
||||
else
|
||||
{
|
||||
const char *error_msg;
|
||||
|
||||
model_ = train(&prob, ¶m);
|
||||
error_msg = model_to_matlab_structure(plhs, model_);
|
||||
if(error_msg)
|
||||
mexPrintf("Error: can't convert libsvm model to matrix structure: %s\n", error_msg);
|
||||
free_and_destroy_model(&model_);
|
||||
}
|
||||
destroy_param(¶m);
|
||||
free(prob.y);
|
||||
free(prob.x);
|
||||
free(x_space);
|
||||
}
|
||||
else
|
||||
{
|
||||
exit_with_help();
|
||||
fake_answer(nlhs, plhs);
|
||||
return;
|
||||
}
|
||||
}
|
251
liblinear-2.49/newton.cpp
Normal file
251
liblinear-2.49/newton.cpp
Normal file
@@ -0,0 +1,251 @@
|
||||
#include <math.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <stdarg.h>
|
||||
#include "newton.h"
|
||||
|
||||
#ifndef min
|
||||
template <class T> static inline T min(T x,T y) { return (x<y)?x:y; }
|
||||
#endif
|
||||
|
||||
#ifndef max
|
||||
template <class T> static inline T max(T x,T y) { return (x>y)?x:y; }
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
extern double dnrm2_(int *, double *, int *);
|
||||
extern double ddot_(int *, double *, int *, double *, int *);
|
||||
extern int daxpy_(int *, double *, double *, int *, double *, int *);
|
||||
extern int dscal_(int *, double *, double *, int *);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
static void default_print(const char *buf)
|
||||
{
|
||||
fputs(buf,stdout);
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
// On entry *f must be the function value of w
|
||||
// On exit w is updated and *f is the new function value
|
||||
double function::linesearch_and_update(double *w, double *s, double *f, double *g, double alpha)
|
||||
{
|
||||
double gTs = 0;
|
||||
double eta = 0.01;
|
||||
int n = get_nr_variable();
|
||||
int max_num_linesearch = 20;
|
||||
double *w_new = new double[n];
|
||||
double fold = *f;
|
||||
|
||||
for (int i=0;i<n;i++)
|
||||
gTs += s[i] * g[i];
|
||||
|
||||
int num_linesearch = 0;
|
||||
for(num_linesearch=0; num_linesearch < max_num_linesearch; num_linesearch++)
|
||||
{
|
||||
for (int i=0;i<n;i++)
|
||||
w_new[i] = w[i] + alpha*s[i];
|
||||
*f = fun(w_new);
|
||||
if (*f - fold <= eta * alpha * gTs)
|
||||
break;
|
||||
else
|
||||
alpha *= 0.5;
|
||||
}
|
||||
|
||||
if (num_linesearch >= max_num_linesearch)
|
||||
{
|
||||
*f = fold;
|
||||
return 0;
|
||||
}
|
||||
else
|
||||
memcpy(w, w_new, sizeof(double)*n);
|
||||
|
||||
delete [] w_new;
|
||||
return alpha;
|
||||
}
|
||||
|
||||
void NEWTON::info(const char *fmt,...)
|
||||
{
|
||||
char buf[BUFSIZ];
|
||||
va_list ap;
|
||||
va_start(ap,fmt);
|
||||
vsprintf(buf,fmt,ap);
|
||||
va_end(ap);
|
||||
(*newton_print_string)(buf);
|
||||
}
|
||||
|
||||
NEWTON::NEWTON(const function *fun_obj, double eps, double eps_cg, int max_iter)
|
||||
{
|
||||
this->fun_obj=const_cast<function *>(fun_obj);
|
||||
this->eps=eps;
|
||||
this->eps_cg=eps_cg;
|
||||
this->max_iter=max_iter;
|
||||
newton_print_string = default_print;
|
||||
}
|
||||
|
||||
NEWTON::~NEWTON()
|
||||
{
|
||||
}
|
||||
|
||||
void NEWTON::newton(double *w)
|
||||
{
|
||||
int n = fun_obj->get_nr_variable();
|
||||
int i, cg_iter;
|
||||
double step_size;
|
||||
double f, fold, actred;
|
||||
double init_step_size = 1;
|
||||
int search = 1, iter = 1, inc = 1;
|
||||
double *s = new double[n];
|
||||
double *r = new double[n];
|
||||
double *g = new double[n];
|
||||
|
||||
const double alpha_pcg = 0.01;
|
||||
double *M = new double[n];
|
||||
|
||||
// calculate gradient norm at w=0 for stopping condition.
|
||||
double *w0 = new double[n];
|
||||
for (i=0; i<n; i++)
|
||||
w0[i] = 0;
|
||||
fun_obj->fun(w0);
|
||||
fun_obj->grad(w0, g);
|
||||
double gnorm0 = dnrm2_(&n, g, &inc);
|
||||
delete [] w0;
|
||||
|
||||
f = fun_obj->fun(w);
|
||||
fun_obj->grad(w, g);
|
||||
double gnorm = dnrm2_(&n, g, &inc);
|
||||
info("init f %5.3e |g| %5.3e\n", f, gnorm);
|
||||
|
||||
if (gnorm <= eps*gnorm0)
|
||||
search = 0;
|
||||
|
||||
while (iter <= max_iter && search)
|
||||
{
|
||||
fun_obj->get_diag_preconditioner(M);
|
||||
for(i=0; i<n; i++)
|
||||
M[i] = (1-alpha_pcg) + alpha_pcg*M[i];
|
||||
cg_iter = pcg(g, M, s, r);
|
||||
|
||||
fold = f;
|
||||
step_size = fun_obj->linesearch_and_update(w, s, &f, g, init_step_size);
|
||||
|
||||
if (step_size == 0)
|
||||
{
|
||||
info("WARNING: line search fails\n");
|
||||
break;
|
||||
}
|
||||
|
||||
fun_obj->grad(w, g);
|
||||
gnorm = dnrm2_(&n, g, &inc);
|
||||
|
||||
info("iter %2d f %5.3e |g| %5.3e CG %3d step_size %4.2e \n", iter, f, gnorm, cg_iter, step_size);
|
||||
|
||||
if (gnorm <= eps*gnorm0)
|
||||
break;
|
||||
if (f < -1.0e+32)
|
||||
{
|
||||
info("WARNING: f < -1.0e+32\n");
|
||||
break;
|
||||
}
|
||||
actred = fold - f;
|
||||
if (fabs(actred) <= 1.0e-12*fabs(f))
|
||||
{
|
||||
info("WARNING: actred too small\n");
|
||||
break;
|
||||
}
|
||||
|
||||
iter++;
|
||||
}
|
||||
|
||||
if(iter >= max_iter)
|
||||
info("\nWARNING: reaching max number of Newton iterations\n");
|
||||
|
||||
delete[] g;
|
||||
delete[] r;
|
||||
delete[] s;
|
||||
delete[] M;
|
||||
}
|
||||
|
||||
int NEWTON::pcg(double *g, double *M, double *s, double *r)
|
||||
{
|
||||
int i, inc = 1;
|
||||
int n = fun_obj->get_nr_variable();
|
||||
double one = 1;
|
||||
double *d = new double[n];
|
||||
double *Hd = new double[n];
|
||||
double zTr, znewTrnew, alpha, beta, cgtol, dHd;
|
||||
double *z = new double[n];
|
||||
double Q = 0, newQ, Qdiff;
|
||||
|
||||
for (i=0; i<n; i++)
|
||||
{
|
||||
s[i] = 0;
|
||||
r[i] = -g[i];
|
||||
z[i] = r[i] / M[i];
|
||||
d[i] = z[i];
|
||||
}
|
||||
|
||||
zTr = ddot_(&n, z, &inc, r, &inc);
|
||||
double gMinv_norm = sqrt(zTr);
|
||||
cgtol = min(eps_cg, sqrt(gMinv_norm));
|
||||
int cg_iter = 0;
|
||||
int max_cg_iter = max(n, 5);
|
||||
|
||||
while (cg_iter < max_cg_iter)
|
||||
{
|
||||
cg_iter++;
|
||||
|
||||
fun_obj->Hv(d, Hd);
|
||||
dHd = ddot_(&n, d, &inc, Hd, &inc);
|
||||
// avoid 0/0 in getting alpha
|
||||
if (dHd <= 1.0e-16)
|
||||
break;
|
||||
|
||||
alpha = zTr/dHd;
|
||||
daxpy_(&n, &alpha, d, &inc, s, &inc);
|
||||
alpha = -alpha;
|
||||
daxpy_(&n, &alpha, Hd, &inc, r, &inc);
|
||||
|
||||
// Using quadratic approximation as CG stopping criterion
|
||||
newQ = -0.5*(ddot_(&n, s, &inc, r, &inc) - ddot_(&n, s, &inc, g, &inc));
|
||||
Qdiff = newQ - Q;
|
||||
if (newQ <= 0 && Qdiff <= 0)
|
||||
{
|
||||
if (cg_iter * Qdiff >= cgtol * newQ)
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
info("WARNING: quadratic approximation > 0 or increasing in CG\n");
|
||||
break;
|
||||
}
|
||||
Q = newQ;
|
||||
|
||||
for (i=0; i<n; i++)
|
||||
z[i] = r[i] / M[i];
|
||||
znewTrnew = ddot_(&n, z, &inc, r, &inc);
|
||||
beta = znewTrnew/zTr;
|
||||
dscal_(&n, &beta, d, &inc);
|
||||
daxpy_(&n, &one, z, &inc, d, &inc);
|
||||
zTr = znewTrnew;
|
||||
}
|
||||
|
||||
if (cg_iter == max_cg_iter)
|
||||
info("WARNING: reaching maximal number of CG steps\n");
|
||||
|
||||
delete[] d;
|
||||
delete[] Hd;
|
||||
delete[] z;
|
||||
|
||||
return cg_iter;
|
||||
}
|
||||
|
||||
void NEWTON::set_print_string(void (*print_string) (const char *buf))
|
||||
{
|
||||
newton_print_string = print_string;
|
||||
}
|
37
liblinear-2.49/newton.h
Normal file
37
liblinear-2.49/newton.h
Normal file
@@ -0,0 +1,37 @@
|
||||
#ifndef _NEWTON_H
|
||||
#define _NEWTON_H
|
||||
|
||||
class function
|
||||
{
|
||||
public:
|
||||
virtual double fun(double *w) = 0 ;
|
||||
virtual void grad(double *w, double *g) = 0 ;
|
||||
virtual void Hv(double *s, double *Hs) = 0 ;
|
||||
virtual int get_nr_variable(void) = 0 ;
|
||||
virtual void get_diag_preconditioner(double *M) = 0 ;
|
||||
virtual ~function(void){}
|
||||
|
||||
// base implementation in newton.cpp
|
||||
virtual double linesearch_and_update(double *w, double *s, double *f, double *g, double alpha);
|
||||
};
|
||||
|
||||
class NEWTON
|
||||
{
|
||||
public:
|
||||
NEWTON(const function *fun_obj, double eps = 0.1, double eps_cg = 0.5, int max_iter = 1000);
|
||||
~NEWTON();
|
||||
|
||||
void newton(double *w);
|
||||
void set_print_string(void (*i_print) (const char *buf));
|
||||
|
||||
private:
|
||||
int pcg(double *g, double *M, double *s, double *r);
|
||||
|
||||
double eps;
|
||||
double eps_cg;
|
||||
int max_iter;
|
||||
function *fun_obj;
|
||||
void info(const char *fmt,...);
|
||||
void (*newton_print_string)(const char *buf);
|
||||
};
|
||||
#endif
|
243
liblinear-2.49/predict.c
Normal file
243
liblinear-2.49/predict.c
Normal file
@@ -0,0 +1,243 @@
|
||||
#include <stdio.h>
|
||||
#include <ctype.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
#include "linear.h"
|
||||
|
||||
int print_null(const char *s,...) {return 0;}
|
||||
|
||||
static int (*info)(const char *fmt,...) = &printf;
|
||||
|
||||
struct feature_node *x;
|
||||
int max_nr_attr = 64;
|
||||
|
||||
struct model* model_;
|
||||
int flag_predict_probability=0;
|
||||
|
||||
void exit_input_error(int line_num)
|
||||
{
|
||||
fprintf(stderr,"Wrong input format at line %d\n", line_num);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
static char *line = NULL;
|
||||
static int max_line_len;
|
||||
|
||||
static char* readline(FILE *input)
|
||||
{
|
||||
int len;
|
||||
|
||||
if(fgets(line,max_line_len,input) == NULL)
|
||||
return NULL;
|
||||
|
||||
while(strrchr(line,'\n') == NULL)
|
||||
{
|
||||
max_line_len *= 2;
|
||||
line = (char *) realloc(line,max_line_len);
|
||||
len = (int) strlen(line);
|
||||
if(fgets(line+len,max_line_len-len,input) == NULL)
|
||||
break;
|
||||
}
|
||||
return line;
|
||||
}
|
||||
|
||||
void do_predict(FILE *input, FILE *output)
|
||||
{
|
||||
int correct = 0;
|
||||
int total = 0;
|
||||
double error = 0;
|
||||
double sump = 0, sumt = 0, sumpp = 0, sumtt = 0, sumpt = 0;
|
||||
|
||||
int nr_class=get_nr_class(model_);
|
||||
double *prob_estimates=NULL;
|
||||
int j, n;
|
||||
int nr_feature=get_nr_feature(model_);
|
||||
if(model_->bias>=0)
|
||||
n=nr_feature+1;
|
||||
else
|
||||
n=nr_feature;
|
||||
|
||||
if(flag_predict_probability)
|
||||
{
|
||||
int *labels;
|
||||
|
||||
if(!check_probability_model(model_))
|
||||
{
|
||||
fprintf(stderr, "probability output is only supported for logistic regression\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
labels=(int *) malloc(nr_class*sizeof(int));
|
||||
get_labels(model_,labels);
|
||||
prob_estimates = (double *) malloc(nr_class*sizeof(double));
|
||||
fprintf(output,"labels");
|
||||
for(j=0;j<nr_class;j++)
|
||||
fprintf(output," %d",labels[j]);
|
||||
fprintf(output,"\n");
|
||||
free(labels);
|
||||
}
|
||||
|
||||
max_line_len = 1024;
|
||||
line = (char *)malloc(max_line_len*sizeof(char));
|
||||
while(readline(input) != NULL)
|
||||
{
|
||||
int i = 0;
|
||||
double target_label, predict_label;
|
||||
char *idx, *val, *label, *endptr;
|
||||
int inst_max_index = 0; // strtol gives 0 if wrong format
|
||||
|
||||
label = strtok(line," \t\n");
|
||||
if(label == NULL) // empty line
|
||||
exit_input_error(total+1);
|
||||
|
||||
target_label = strtod(label,&endptr);
|
||||
if(endptr == label || *endptr != '\0')
|
||||
exit_input_error(total+1);
|
||||
|
||||
while(1)
|
||||
{
|
||||
if(i>=max_nr_attr-2) // need one more for index = -1
|
||||
{
|
||||
max_nr_attr *= 2;
|
||||
x = (struct feature_node *) realloc(x,max_nr_attr*sizeof(struct feature_node));
|
||||
}
|
||||
|
||||
idx = strtok(NULL,":");
|
||||
val = strtok(NULL," \t");
|
||||
|
||||
if(val == NULL)
|
||||
break;
|
||||
errno = 0;
|
||||
x[i].index = (int) strtol(idx,&endptr,10);
|
||||
if(endptr == idx || errno != 0 || *endptr != '\0' || x[i].index <= inst_max_index)
|
||||
exit_input_error(total+1);
|
||||
else
|
||||
inst_max_index = x[i].index;
|
||||
|
||||
errno = 0;
|
||||
x[i].value = strtod(val,&endptr);
|
||||
if(endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr)))
|
||||
exit_input_error(total+1);
|
||||
|
||||
// feature indices larger than those in training are not used
|
||||
if(x[i].index <= nr_feature)
|
||||
++i;
|
||||
}
|
||||
|
||||
if(model_->bias>=0)
|
||||
{
|
||||
x[i].index = n;
|
||||
x[i].value = model_->bias;
|
||||
i++;
|
||||
}
|
||||
x[i].index = -1;
|
||||
|
||||
if(flag_predict_probability)
|
||||
{
|
||||
int j;
|
||||
predict_label = predict_probability(model_,x,prob_estimates);
|
||||
fprintf(output,"%g",predict_label);
|
||||
for(j=0;j<model_->nr_class;j++)
|
||||
fprintf(output," %g",prob_estimates[j]);
|
||||
fprintf(output,"\n");
|
||||
}
|
||||
else
|
||||
{
|
||||
predict_label = predict(model_,x);
|
||||
fprintf(output,"%.17g\n",predict_label);
|
||||
}
|
||||
|
||||
if(predict_label == target_label)
|
||||
++correct;
|
||||
error += (predict_label-target_label)*(predict_label-target_label);
|
||||
sump += predict_label;
|
||||
sumt += target_label;
|
||||
sumpp += predict_label*predict_label;
|
||||
sumtt += target_label*target_label;
|
||||
sumpt += predict_label*target_label;
|
||||
++total;
|
||||
}
|
||||
if(check_regression_model(model_))
|
||||
{
|
||||
info("Mean squared error = %g (regression)\n",error/total);
|
||||
info("Squared correlation coefficient = %g (regression)\n",
|
||||
((total*sumpt-sump*sumt)*(total*sumpt-sump*sumt))/
|
||||
((total*sumpp-sump*sump)*(total*sumtt-sumt*sumt))
|
||||
);
|
||||
}
|
||||
else
|
||||
info("Accuracy = %g%% (%d/%d)\n",(double) correct/total*100,correct,total);
|
||||
if(flag_predict_probability)
|
||||
free(prob_estimates);
|
||||
}
|
||||
|
||||
void exit_with_help()
|
||||
{
|
||||
printf(
|
||||
"Usage: predict [options] test_file model_file output_file\n"
|
||||
"options:\n"
|
||||
"-b probability_estimates: whether to output probability estimates, 0 or 1 (default 0); currently for logistic regression only\n"
|
||||
"-q : quiet mode (no outputs)\n"
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
FILE *input, *output;
|
||||
int i;
|
||||
|
||||
// parse options
|
||||
for(i=1;i<argc;i++)
|
||||
{
|
||||
if(argv[i][0] != '-') break;
|
||||
++i;
|
||||
switch(argv[i-1][1])
|
||||
{
|
||||
case 'b':
|
||||
flag_predict_probability = atoi(argv[i]);
|
||||
break;
|
||||
case 'q':
|
||||
info = &print_null;
|
||||
i--;
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr,"unknown option: -%c\n", argv[i-1][1]);
|
||||
exit_with_help();
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(i>=argc)
|
||||
exit_with_help();
|
||||
|
||||
input = fopen(argv[i],"r");
|
||||
if(input == NULL)
|
||||
{
|
||||
fprintf(stderr,"can't open input file %s\n",argv[i]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
output = fopen(argv[i+2],"w");
|
||||
if(output == NULL)
|
||||
{
|
||||
fprintf(stderr,"can't open output file %s\n",argv[i+2]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if((model_=load_model(argv[i+1]))==0)
|
||||
{
|
||||
fprintf(stderr,"can't open model file %s\n",argv[i+1]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
x = (struct feature_node *) malloc(max_nr_attr*sizeof(struct feature_node));
|
||||
do_predict(input, output);
|
||||
free_and_destroy_model(&model_);
|
||||
free(line);
|
||||
free(x);
|
||||
fclose(input);
|
||||
fclose(output);
|
||||
return 0;
|
||||
}
|
||||
|
2
liblinear-2.49/python/MANIFEST.in
Normal file
2
liblinear-2.49/python/MANIFEST.in
Normal file
@@ -0,0 +1,2 @@
|
||||
include cpp-source/*
|
||||
include cpp-source/*/*
|
4
liblinear-2.49/python/Makefile
Normal file
4
liblinear-2.49/python/Makefile
Normal file
@@ -0,0 +1,4 @@
|
||||
all = lib
|
||||
|
||||
lib:
|
||||
make -C .. lib
|
549
liblinear-2.49/python/README
Executable file
549
liblinear-2.49/python/README
Executable file
@@ -0,0 +1,549 @@
|
||||
-------------------------------------
|
||||
--- Python interface of LIBLINEAR ---
|
||||
-------------------------------------
|
||||
|
||||
Table of Contents
|
||||
=================
|
||||
|
||||
- Introduction
|
||||
- Installation via PyPI
|
||||
- Installation via Sources
|
||||
- Quick Start
|
||||
- Quick Start with Scipy
|
||||
- Design Description
|
||||
- Data Structures
|
||||
- Utility Functions
|
||||
- Additional Information
|
||||
|
||||
Introduction
|
||||
============
|
||||
|
||||
Python (http://www.python.org/) is a programming language suitable for rapid
|
||||
development. This tool provides a simple Python interface to LIBLINEAR, a library
|
||||
for support vector machines (http://www.csie.ntu.edu.tw/~cjlin/liblinear). The
|
||||
interface is very easy to use as the usage is the same as that of LIBLINEAR. The
|
||||
interface is developed with the built-in Python library "ctypes."
|
||||
|
||||
Installation via PyPI
|
||||
=====================
|
||||
|
||||
To install the interface from PyPI, execute the following command:
|
||||
|
||||
> pip install -U liblinear-official
|
||||
|
||||
Installation via Sources
|
||||
========================
|
||||
|
||||
Alternatively, you may install the interface from sources by
|
||||
generating the LIBLINEAR shared library.
|
||||
|
||||
Depending on your use cases, you can choose between local-directory
|
||||
and system-wide installation.
|
||||
|
||||
- Local-directory installation:
|
||||
|
||||
On Unix systems, type
|
||||
|
||||
> make
|
||||
|
||||
This generates a .so file in the LIBLINEAR main directory and you
|
||||
can run the interface in the current python directory.
|
||||
|
||||
For Windows, starting from version 2.48, we no longer provide the
|
||||
pre-built shared library liblinear.dll. To run the interface in the
|
||||
current python directory, please follow the instruction of building
|
||||
Windows binaries in LIBLINEAR README. You can copy liblinear.dll to
|
||||
the system directory (e.g., `C:\WINDOWS\system32\') to make it
|
||||
system-widely available.
|
||||
|
||||
- System-wide installation:
|
||||
|
||||
Type
|
||||
|
||||
> pip install -e .
|
||||
|
||||
or
|
||||
|
||||
> pip install --user -e .
|
||||
|
||||
The option --user would install the package in the home directory
|
||||
instead of the system directory, and thus does not require the
|
||||
root privilege.
|
||||
|
||||
Please note that you must keep the sources after the installation.
|
||||
|
||||
For Windows, to run the above command, Microsoft Visual C++ and
|
||||
other tools are needed.
|
||||
|
||||
In addition, DON'T use the following FAILED commands
|
||||
|
||||
> python setup.py install (failed to run at the python directory)
|
||||
> pip install .
|
||||
|
||||
Quick Start
|
||||
===========
|
||||
|
||||
"Quick Start with Scipy" is in the next section.
|
||||
|
||||
There are two levels of usage. The high-level one uses utility
|
||||
functions in liblinearutil.py and commonutil.py (shared with LIBSVM
|
||||
and imported by svmutil.py). The usage is the same as the LIBLINEAR
|
||||
MATLAB interface.
|
||||
|
||||
>>> from liblinear.liblinearutil import *
|
||||
# Read data in LIBSVM format
|
||||
>>> y, x = svm_read_problem('../heart_scale')
|
||||
>>> m = train(y[:200], x[:200], '-c 4')
|
||||
>>> p_label, p_acc, p_val = predict(y[200:], x[200:], m)
|
||||
|
||||
# Construct problem in python format
|
||||
# Dense data
|
||||
>>> y, x = [1,-1], [[1,0,1], [-1,0,-1]]
|
||||
# Sparse data
|
||||
>>> y, x = [1,-1], [{1:1, 3:1}, {1:-1,3:-1}]
|
||||
>>> prob = problem(y, x)
|
||||
>>> param = parameter('-s 0 -c 4 -B 1')
|
||||
>>> m = train(prob, param)
|
||||
|
||||
# Other utility functions
|
||||
>>> save_model('heart_scale.model', m)
|
||||
>>> m = load_model('heart_scale.model')
|
||||
>>> p_label, p_acc, p_val = predict(y, x, m, '-b 1')
|
||||
>>> ACC, MSE, SCC = evaluations(y, p_label)
|
||||
|
||||
# Getting online help
|
||||
>>> help(train)
|
||||
|
||||
The low-level use directly calls C interfaces imported by liblinear.py. Note that
|
||||
all arguments and return values are in ctypes format. You need to handle them
|
||||
carefully.
|
||||
|
||||
>>> from liblinear.liblinear import *
|
||||
>>> prob = problem([1,-1], [{1:1, 3:1}, {1:-1,3:-1}])
|
||||
>>> param = parameter('-c 4')
|
||||
>>> m = liblinear.train(prob, param) # m is a ctype pointer to a model
|
||||
# Convert a Python-format instance to feature_nodearray, a ctypes structure
|
||||
>>> x0, max_idx = gen_feature_nodearray({1:1, 3:1})
|
||||
>>> label = liblinear.predict(m, x0)
|
||||
|
||||
Quick Start with Scipy
|
||||
======================
|
||||
|
||||
Make sure you have Scipy installed to proceed in this section.
|
||||
If numba (http://numba.pydata.org) is installed, some operations will be much faster.
|
||||
|
||||
There are two levels of usage. The high-level one uses utility functions
|
||||
in liblinearutil.py and the usage is the same as the LIBLINEAR MATLAB interface.
|
||||
|
||||
>>> import numpy as np
|
||||
>>> import scipy
|
||||
>>> from liblinear.liblinearutil import *
|
||||
# Read data in LIBSVM format
|
||||
>>> y, x = svm_read_problem('../heart_scale', return_scipy = True) # y: ndarray, x: csr_matrix
|
||||
>>> m = train(y[:200], x[:200, :], '-c 4')
|
||||
>>> p_label, p_acc, p_val = predict(y[200:], x[200:, :], m)
|
||||
|
||||
# Construct problem in Scipy format
|
||||
# Dense data: numpy ndarray
|
||||
>>> y, x = np.asarray([1,-1]), np.asarray([[1,0,1], [-1,0,-1]])
|
||||
# Sparse data: scipy csr_matrix((data, (row_ind, col_ind))
|
||||
>>> y, x = np.asarray([1,-1]), scipy.sparse.csr_matrix(([1, 1, -1, -1], ([0, 0, 1, 1], [0, 2, 0, 2])))
|
||||
>>> prob = problem(y, x)
|
||||
>>> param = parameter('-s 0 -c 4 -B 1')
|
||||
>>> m = train(prob, param)
|
||||
|
||||
# Apply data scaling in Scipy format
|
||||
>>> y, x = svm_read_problem('../heart_scale', return_scipy=True)
|
||||
>>> scale_param = csr_find_scale_param(x, lower=0)
|
||||
>>> scaled_x = csr_scale(x, scale_param)
|
||||
|
||||
# Other utility functions
|
||||
>>> save_model('heart_scale.model', m)
|
||||
>>> m = load_model('heart_scale.model')
|
||||
>>> p_label, p_acc, p_val = predict(y, x, m, '-b 1')
|
||||
>>> ACC, MSE, SCC = evaluations(y, p_label)
|
||||
|
||||
# Getting online help
|
||||
>>> help(train)
|
||||
|
||||
The low-level use directly calls C interfaces imported by liblinear.py. Note that
|
||||
all arguments and return values are in ctypes format. You need to handle them
|
||||
carefully.
|
||||
|
||||
>>> from liblinear.liblinear import *
|
||||
>>> prob = problem(np.asarray([1,-1]), scipy.sparse.csr_matrix(([1, 1, -1, -1], ([0, 0, 1, 1], [0, 2, 0, 2]))))
|
||||
>>> param = parameter('-s 1 -c 4')
|
||||
# One may also direct assign the options after creating the parameter instance
|
||||
>>> param = parameter()
|
||||
>>> param.solver_type = 1
|
||||
>>> param.C = 4
|
||||
>>> m = liblinear.train(prob, param) # m is a ctype pointer to a model
|
||||
# Convert a tuple of ndarray (index, data) to feature_nodearray, a ctypes structure
|
||||
# Note that index starts from 0, though the following example will be changed to 1:1, 3:1 internally
|
||||
>>> x0, max_idx = gen_feature_nodearray((np.asarray([0,2]), np.asarray([1,1])))
|
||||
>>> label = liblinear.predict(m, x0)
|
||||
|
||||
Design Description
|
||||
==================
|
||||
|
||||
There are two files liblinear.py and liblinearutil.py, which respectively correspond to
|
||||
low-level and high-level use of the interface.
|
||||
|
||||
In liblinear.py, we adopt the Python built-in library "ctypes," so that
|
||||
Python can directly access C structures and interface functions defined
|
||||
in linear.h.
|
||||
|
||||
While advanced users can use structures/functions in liblinear.py, to
|
||||
avoid handling ctypes structures, in liblinearutil.py we provide some easy-to-use
|
||||
functions. The usage is similar to LIBLINEAR MATLAB interface.
|
||||
|
||||
Data Structures
|
||||
===============
|
||||
|
||||
Three data structures derived from linear.h are node, problem, and
|
||||
parameter. They all contain fields with the same names in
|
||||
linear.h. Access these fields carefully because you directly use a C structure
|
||||
instead of a Python object. The following description introduces additional
|
||||
fields and methods.
|
||||
|
||||
Before using the data structures, execute the following command to load the
|
||||
LIBLINEAR shared library:
|
||||
|
||||
>>> from liblinear.liblinear import *
|
||||
|
||||
- class feature_node:
|
||||
|
||||
Construct a feature_node.
|
||||
|
||||
>>> node = feature_node(idx, val)
|
||||
|
||||
idx: an integer indicates the feature index.
|
||||
|
||||
val: a float indicates the feature value.
|
||||
|
||||
Show the index and the value of a node.
|
||||
|
||||
>>> print(node)
|
||||
|
||||
- Function: gen_feature_nodearray(xi [,feature_max=None])
|
||||
|
||||
Generate a feature vector from a Python list/tuple/dictionary, numpy ndarray or tuple of (index, data):
|
||||
|
||||
>>> xi_ctype, max_idx = gen_feature_nodearray({1:1, 3:1, 5:-2})
|
||||
|
||||
xi_ctype: the returned feature_nodearray (a ctypes structure)
|
||||
|
||||
max_idx: the maximal feature index of xi
|
||||
|
||||
feature_max: if feature_max is assigned, features with indices larger than
|
||||
feature_max are removed.
|
||||
|
||||
- class problem:
|
||||
|
||||
Construct a problem instance
|
||||
|
||||
>>> prob = problem(y, x [,bias=-1])
|
||||
|
||||
y: a Python list/tuple/ndarray of l labels (type must be int/double).
|
||||
|
||||
x: 1. a list/tuple of l training instances. Feature vector of
|
||||
each training instance is a list/tuple or dictionary.
|
||||
|
||||
2. an l * n numpy ndarray or scipy spmatrix (n: number of features).
|
||||
|
||||
bias: if bias >= 0, instance x becomes [x; bias]; if < 0, no bias term
|
||||
added (default -1)
|
||||
|
||||
You can also modify the bias value by
|
||||
|
||||
>>> prob.set_bias(1)
|
||||
|
||||
Note that if your x contains sparse data (i.e., dictionary), the internal
|
||||
ctypes data format is still sparse.
|
||||
|
||||
Copy a problem instance.
|
||||
DON'T use this unless you know what it does.
|
||||
|
||||
>>> prob_copy = prob.copy()
|
||||
|
||||
The reason we need to copy a problem instance is because, for example,
|
||||
in multi-label tasks using the OVR setting, we need to train a binary
|
||||
classification problem for each label on data with/without that label.
|
||||
Since each training uses the same x but different y, simply looping
|
||||
over labels and creating a new problem instance for each training would
|
||||
introduce overhead either from repeatedly transforming x into feature_node
|
||||
or from allocating additional memory space for x during parallel training.
|
||||
|
||||
With problem copying, suppose x represents data, y1 represents the label
|
||||
for class 1 and y2 represent the label for class 2.
|
||||
We can do:
|
||||
|
||||
>>> class1_prob = problem(y1, x)
|
||||
>>> class2_prob = class1_prob.copy()
|
||||
>>> class2_prob.y = (ctypes.c_double * class1_prob.l)(*y2)
|
||||
|
||||
Note that although the copied problem is a new instance, attributes such as
|
||||
y (POINTER(c_double)), x (POINTER(POINTER(feature_node))), and x_space (list/np.ndarray)
|
||||
are copied by reference. That is, class1_prob and class2_prob share the same
|
||||
y, x and x_space after the copy.
|
||||
|
||||
- class parameter:
|
||||
|
||||
Construct a parameter instance
|
||||
|
||||
>>> param = parameter('training_options')
|
||||
|
||||
If 'training_options' is empty, LIBLINEAR default values are applied.
|
||||
|
||||
Set param to LIBLINEAR default values.
|
||||
|
||||
>>> param.set_to_default_values()
|
||||
|
||||
Parse a string of options.
|
||||
|
||||
>>> param.parse_options('training_options')
|
||||
|
||||
Show values of parameters.
|
||||
|
||||
>>> print(param)
|
||||
|
||||
- class model:
|
||||
|
||||
There are two ways to obtain an instance of model:
|
||||
|
||||
>>> model_ = train(y, x)
|
||||
>>> model_ = load_model('model_file_name')
|
||||
|
||||
Note that the returned structure of interface functions
|
||||
liblinear.train and liblinear.load_model is a ctypes pointer of
|
||||
model, which is different from the model object returned
|
||||
by train and load_model in liblinearutil.py. We provide a
|
||||
function toPyModel for the conversion:
|
||||
|
||||
>>> model_ptr = liblinear.train(prob, param)
|
||||
>>> model_ = toPyModel(model_ptr)
|
||||
|
||||
If you obtain a model in a way other than the above approaches,
|
||||
handle it carefully to avoid memory leak or segmentation fault.
|
||||
|
||||
Some interface functions to access LIBLINEAR models are wrapped as
|
||||
members of the class model:
|
||||
|
||||
>>> nr_feature = model_.get_nr_feature()
|
||||
>>> nr_class = model_.get_nr_class()
|
||||
>>> class_labels = model_.get_labels()
|
||||
>>> is_prob_model = model_.is_probability_model()
|
||||
>>> is_regression_model = model_.is_regression_model()
|
||||
|
||||
The decision function is W*x + b, where
|
||||
W is an nr_class-by-nr_feature matrix, and
|
||||
b is a vector of size nr_class.
|
||||
To access W_kj (i.e., coefficient for the k-th class and the j-th feature)
|
||||
and b_k (i.e., bias for the k-th class), use the following functions.
|
||||
|
||||
>>> W_kj = model_.get_decfun_coef(feat_idx=j, label_idx=k)
|
||||
>>> b_k = model_.get_decfun_bias(label_idx=k)
|
||||
|
||||
We also provide a function to extract w_k (i.e., the k-th row of W) and
|
||||
b_k directly as follows.
|
||||
|
||||
>>> [w_k, b_k] = model_.get_decfun(label_idx=k)
|
||||
|
||||
Note that w_k is a Python list of length nr_feature, which means that
|
||||
w_k[0] = W_k1.
|
||||
For regression models, W is just a vector of length nr_feature. Either
|
||||
set label_idx=0 or omit the label_idx parameter to access the coefficients.
|
||||
|
||||
>>> W_j = model_.get_decfun_coef(feat_idx=j)
|
||||
>>> b = model_.get_decfun_bias()
|
||||
>>> [W, b] = model_.get_decfun()
|
||||
|
||||
For one-class SVM models, label_idx is ignored and b=-rho is
|
||||
returned from get_decfun(). That is, the decision function is
|
||||
w*x+b = w*x-rho.
|
||||
|
||||
>>> rho = model_.get_decfun_rho()
|
||||
>>> [W, b] = model_.get_decfun()
|
||||
|
||||
Note that in get_decfun_coef, get_decfun_bias, and get_decfun, feat_idx
|
||||
starts from 1, while label_idx starts from 0. If label_idx is not in the
|
||||
valid range (0 to nr_class-1), then a NaN will be returned; and if feat_idx
|
||||
is not in the valid range (1 to nr_feature), then a zero value will be
|
||||
returned. For regression models, label_idx is ignored.
|
||||
|
||||
Utility Functions
|
||||
=================
|
||||
|
||||
To use utility functions, type
|
||||
|
||||
>>> from liblinear.liblinearutil import *
|
||||
|
||||
The above command loads
|
||||
train() : train a linear model
|
||||
predict() : predict testing data
|
||||
svm_read_problem() : read the data from a LIBSVM-format file or object.
|
||||
load_model() : load a LIBLINEAR model.
|
||||
save_model() : save model to a file.
|
||||
evaluations() : evaluate prediction results.
|
||||
csr_find_scale_param() : find scaling parameter for data in csr format.
|
||||
csr_scale() : apply data scaling to data in csr format.
|
||||
|
||||
- Function: train
|
||||
|
||||
There are three ways to call train()
|
||||
|
||||
>>> model = train(y, x [, 'training_options'])
|
||||
>>> model = train(prob [, 'training_options'])
|
||||
>>> model = train(prob, param)
|
||||
|
||||
y: a list/tuple/ndarray of l training labels (type must be int/double).
|
||||
|
||||
x: 1. a list/tuple of l training instances. Feature vector of
|
||||
each training instance is a list/tuple or dictionary.
|
||||
|
||||
2. an l * n numpy ndarray or scipy spmatrix (n: number of features).
|
||||
|
||||
training_options: a string in the same form as that for LIBLINEAR command
|
||||
mode.
|
||||
|
||||
prob: a problem instance generated by calling
|
||||
problem(y, x).
|
||||
|
||||
param: a parameter instance generated by calling
|
||||
parameter('training_options')
|
||||
|
||||
model: the returned model instance. See linear.h for details of this
|
||||
structure. If '-v' is specified, cross validation is
|
||||
conducted and the returned model is just a scalar: cross-validation
|
||||
accuracy for classification and mean-squared error for regression.
|
||||
|
||||
If the '-C' option is specified, best parameters are found
|
||||
by cross validation. The parameter selection utility is supported
|
||||
only by -s 0, -s 2 (for finding C) and -s 11 (for finding C, p).
|
||||
The returned structure is a triple with the best C, the best p,
|
||||
and the corresponding cross-validation accuracy or mean squared
|
||||
error. The returned best p for -s 0 and -s 2 is set to -1 because
|
||||
the p parameter is not used by classification models.
|
||||
|
||||
|
||||
To train the same data many times with different
|
||||
parameters, the second and the third ways should be faster..
|
||||
|
||||
Examples:
|
||||
|
||||
>>> y, x = svm_read_problem('../heart_scale')
|
||||
>>> prob = problem(y, x)
|
||||
>>> param = parameter('-s 3 -c 5 -q')
|
||||
>>> m = train(y, x, '-c 5')
|
||||
>>> m = train(prob, '-w1 5 -c 5')
|
||||
>>> m = train(prob, param)
|
||||
>>> CV_ACC = train(y, x, '-v 3')
|
||||
>>> best_C, best_p, best_rate = train(y, x, '-C -s 0') # best_p is only for -s 11
|
||||
>>> m = train(y, x, '-c {0} -s 0'.format(best_C)) # use the same solver: -s 0
|
||||
|
||||
- Function: predict
|
||||
|
||||
To predict testing data with a model, use
|
||||
|
||||
>>> p_labs, p_acc, p_vals = predict(y, x, model [,'predicting_options'])
|
||||
|
||||
y: a list/tuple/ndarray of l true labels (type must be int/double).
|
||||
It is used for calculating the accuracy. Use [] if true labels are
|
||||
unavailable.
|
||||
|
||||
x: 1. a list/tuple of l training instances. Feature vector of
|
||||
each training instance is a list/tuple or dictionary.
|
||||
|
||||
2. an l * n numpy ndarray or scipy spmatrix (n: number of features).
|
||||
|
||||
predicting_options: a string of predicting options in the same format as
|
||||
that of LIBLINEAR.
|
||||
|
||||
model: a model instance.
|
||||
|
||||
p_labels: a list of predicted labels
|
||||
|
||||
p_acc: a tuple including accuracy (for classification), mean
|
||||
squared error, and squared correlation coefficient (for
|
||||
regression).
|
||||
|
||||
p_vals: a list of decision values or probability estimates (if '-b 1'
|
||||
is specified). If k is the number of classes, for decision values,
|
||||
each element includes results of predicting k binary-class
|
||||
SVMs. If k = 2 and solver is not MCSVM_CS, only one decision value
|
||||
is returned. For probabilities, each element contains k values
|
||||
indicating the probability that the testing instance is in each class.
|
||||
Note that the order of classes here is the same as 'model.label'
|
||||
field in the model structure.
|
||||
|
||||
Example:
|
||||
|
||||
>>> m = train(y, x, '-c 5')
|
||||
>>> p_labels, p_acc, p_vals = predict(y, x, m)
|
||||
|
||||
- Functions: svm_read_problem/load_model/save_model
|
||||
|
||||
See the usage by examples:
|
||||
|
||||
>>> y, x = svm_read_problem('data.txt')
|
||||
>>> with open('data.txt') as f:
|
||||
>>> y, x = svm_read_problem(f)
|
||||
>>> m = load_model('model_file')
|
||||
>>> save_model('model_file', m)
|
||||
|
||||
- Function: evaluations
|
||||
|
||||
Calculate some evaluations using the true values (ty) and the predicted
|
||||
values (pv):
|
||||
|
||||
>>> (ACC, MSE, SCC) = evaluations(ty, pv, useScipy)
|
||||
|
||||
ty: a list/tuple/ndarray of true values.
|
||||
|
||||
pv: a list/tuple/ndarray of predicted values.
|
||||
|
||||
useScipy: convert ty, pv to ndarray, and use scipy functions to do the evaluation
|
||||
|
||||
ACC: accuracy.
|
||||
|
||||
MSE: mean squared error.
|
||||
|
||||
SCC: squared correlation coefficient.
|
||||
|
||||
- Function: csr_find_scale_parameter/csr_scale
|
||||
|
||||
Scale data in csr format.
|
||||
|
||||
>>> param = csr_find_scale_param(x [, lower=l, upper=u])
|
||||
>>> x = csr_scale(x, param)
|
||||
|
||||
x: a csr_matrix of data.
|
||||
|
||||
l: x scaling lower limit; default -1.
|
||||
|
||||
u: x scaling upper limit; default 1.
|
||||
|
||||
The scaling process is: x * diag(coef) + ones(l, 1) * offset'
|
||||
|
||||
param: a dictionary of scaling parameters, where param['coef'] = coef and param['offset'] = offset.
|
||||
|
||||
coef: a scipy array of scaling coefficients.
|
||||
|
||||
offset: a scipy array of scaling offsets.
|
||||
|
||||
Additional Information
|
||||
======================
|
||||
|
||||
This interface was originally written by Hsiang-Fu Yu from Department of Computer
|
||||
Science, National Taiwan University. If you find this tool useful, please
|
||||
cite LIBLINEAR as follows
|
||||
|
||||
R.-E. Fan, K.-W. Chang, C.-J. Hsieh, X.-R. Wang, and C.-J. Lin.
|
||||
LIBLINEAR: A Library for Large Linear Classification, Journal of
|
||||
Machine Learning Research 9(2008), 1871-1874. Software available at
|
||||
http://www.csie.ntu.edu.tw/~cjlin/liblinear
|
||||
|
||||
For any question, please contact Chih-Jen Lin <cjlin@csie.ntu.edu.tw>,
|
||||
or check the FAQ page:
|
||||
|
||||
http://www.csie.ntu.edu.tw/~cjlin/liblinear/faq.html
|
0
liblinear-2.49/python/liblinear/__init__.py
Normal file
0
liblinear-2.49/python/liblinear/__init__.py
Normal file
189
liblinear-2.49/python/liblinear/commonutil.py
Normal file
189
liblinear-2.49/python/liblinear/commonutil.py
Normal file
@@ -0,0 +1,189 @@
|
||||
from __future__ import print_function
|
||||
from array import array
|
||||
import sys
|
||||
|
||||
try:
|
||||
import numpy as np
|
||||
import scipy
|
||||
from scipy import sparse
|
||||
except:
|
||||
scipy = None
|
||||
|
||||
|
||||
__all__ = ['svm_read_problem', 'evaluations', 'csr_find_scale_param', 'csr_scale']
|
||||
|
||||
def svm_read_problem(data_source, return_scipy=False):
|
||||
"""
|
||||
svm_read_problem(data_source, return_scipy=False) -> [y, x], y: list, x: list of dictionary
|
||||
svm_read_problem(data_source, return_scipy=True) -> [y, x], y: ndarray, x: csr_matrix
|
||||
|
||||
Read LIBSVM-format data from data_source and return labels y
|
||||
and data instances x.
|
||||
"""
|
||||
if scipy != None and return_scipy:
|
||||
prob_y = array('d')
|
||||
prob_x = array('d')
|
||||
row_ptr = array('l', [0])
|
||||
col_idx = array('l')
|
||||
else:
|
||||
prob_y = []
|
||||
prob_x = []
|
||||
row_ptr = [0]
|
||||
col_idx = []
|
||||
indx_start = 1
|
||||
|
||||
if hasattr(data_source, "read"):
|
||||
file = data_source
|
||||
else:
|
||||
file = open(data_source)
|
||||
try:
|
||||
for line in file:
|
||||
line = line.split(None, 1)
|
||||
# In case an instance with all zero features
|
||||
if len(line) == 1: line += ['']
|
||||
label, features = line
|
||||
prob_y.append(float(label))
|
||||
if scipy != None and return_scipy:
|
||||
nz = 0
|
||||
for e in features.split():
|
||||
ind, val = e.split(":")
|
||||
if ind == '0':
|
||||
indx_start = 0
|
||||
val = float(val)
|
||||
if val != 0:
|
||||
col_idx.append(int(ind)-indx_start)
|
||||
prob_x.append(val)
|
||||
nz += 1
|
||||
row_ptr.append(row_ptr[-1]+nz)
|
||||
else:
|
||||
xi = {}
|
||||
for e in features.split():
|
||||
ind, val = e.split(":")
|
||||
xi[int(ind)] = float(val)
|
||||
prob_x += [xi]
|
||||
except Exception as err_msg:
|
||||
raise err_msg
|
||||
finally:
|
||||
if not hasattr(data_source, "read"):
|
||||
# close file only if it was created by us
|
||||
file.close()
|
||||
|
||||
if scipy != None and return_scipy:
|
||||
prob_y = np.frombuffer(prob_y, dtype='d')
|
||||
prob_x = np.frombuffer(prob_x, dtype='d')
|
||||
col_idx = np.frombuffer(col_idx, dtype='l')
|
||||
row_ptr = np.frombuffer(row_ptr, dtype='l')
|
||||
prob_x = sparse.csr_matrix((prob_x, col_idx, row_ptr))
|
||||
return (prob_y, prob_x)
|
||||
|
||||
def evaluations_scipy(ty, pv):
|
||||
"""
|
||||
evaluations_scipy(ty, pv) -> (ACC, MSE, SCC)
|
||||
ty, pv: ndarray
|
||||
|
||||
Calculate accuracy, mean squared error and squared correlation coefficient
|
||||
using the true values (ty) and predicted values (pv).
|
||||
"""
|
||||
if not (scipy != None and isinstance(ty, np.ndarray) and isinstance(pv, np.ndarray)):
|
||||
raise TypeError("type of ty and pv must be ndarray")
|
||||
if len(ty) != len(pv):
|
||||
raise ValueError("len(ty) must be equal to len(pv)")
|
||||
ACC = 100.0*(ty == pv).mean()
|
||||
MSE = ((ty - pv)**2).mean()
|
||||
l = len(ty)
|
||||
sumv = pv.sum()
|
||||
sumy = ty.sum()
|
||||
sumvy = (pv*ty).sum()
|
||||
sumvv = (pv*pv).sum()
|
||||
sumyy = (ty*ty).sum()
|
||||
with np.errstate(all = 'raise'):
|
||||
try:
|
||||
SCC = ((l*sumvy-sumv*sumy)*(l*sumvy-sumv*sumy))/((l*sumvv-sumv*sumv)*(l*sumyy-sumy*sumy))
|
||||
except:
|
||||
SCC = float('nan')
|
||||
return (float(ACC), float(MSE), float(SCC))
|
||||
|
||||
def evaluations(ty, pv, useScipy = True):
|
||||
"""
|
||||
evaluations(ty, pv, useScipy) -> (ACC, MSE, SCC)
|
||||
ty, pv: list, tuple or ndarray
|
||||
useScipy: convert ty, pv to ndarray, and use scipy functions for the evaluation
|
||||
|
||||
Calculate accuracy, mean squared error and squared correlation coefficient
|
||||
using the true values (ty) and predicted values (pv).
|
||||
"""
|
||||
if scipy != None and useScipy:
|
||||
return evaluations_scipy(np.asarray(ty), np.asarray(pv))
|
||||
if len(ty) != len(pv):
|
||||
raise ValueError("len(ty) must be equal to len(pv)")
|
||||
total_correct = total_error = 0
|
||||
sumv = sumy = sumvv = sumyy = sumvy = 0
|
||||
for v, y in zip(pv, ty):
|
||||
if y == v:
|
||||
total_correct += 1
|
||||
total_error += (v-y)*(v-y)
|
||||
sumv += v
|
||||
sumy += y
|
||||
sumvv += v*v
|
||||
sumyy += y*y
|
||||
sumvy += v*y
|
||||
l = len(ty)
|
||||
ACC = 100.0*total_correct/l
|
||||
MSE = total_error/l
|
||||
try:
|
||||
SCC = ((l*sumvy-sumv*sumy)*(l*sumvy-sumv*sumy))/((l*sumvv-sumv*sumv)*(l*sumyy-sumy*sumy))
|
||||
except:
|
||||
SCC = float('nan')
|
||||
return (float(ACC), float(MSE), float(SCC))
|
||||
|
||||
def csr_find_scale_param(x, lower=-1, upper=1):
|
||||
assert isinstance(x, sparse.csr_matrix)
|
||||
assert lower < upper
|
||||
l, n = x.shape
|
||||
feat_min = x.min(axis=0).toarray().flatten()
|
||||
feat_max = x.max(axis=0).toarray().flatten()
|
||||
coef = (feat_max - feat_min) / (upper - lower)
|
||||
coef[coef != 0] = 1.0 / coef[coef != 0]
|
||||
|
||||
# (x - ones(l,1) * feat_min') * diag(coef) + lower
|
||||
# = x * diag(coef) - ones(l, 1) * (feat_min' * diag(coef)) + lower
|
||||
# = x * diag(coef) + ones(l, 1) * (-feat_min' * diag(coef) + lower)
|
||||
# = x * diag(coef) + ones(l, 1) * offset'
|
||||
offset = -feat_min * coef + lower
|
||||
offset[coef == 0] = 0
|
||||
|
||||
if sum(offset != 0) * l > 3 * x.getnnz():
|
||||
print(
|
||||
"WARNING: The #nonzeros of the scaled data is at least 2 times larger than the original one.\n"
|
||||
"If feature values are non-negative and sparse, set lower=0 rather than the default lower=-1.",
|
||||
file=sys.stderr)
|
||||
|
||||
return {'coef':coef, 'offset':offset}
|
||||
|
||||
def csr_scale(x, scale_param):
|
||||
assert isinstance(x, sparse.csr_matrix)
|
||||
|
||||
offset = scale_param['offset']
|
||||
coef = scale_param['coef']
|
||||
assert len(coef) == len(offset)
|
||||
|
||||
l, n = x.shape
|
||||
|
||||
if not n == len(coef):
|
||||
print("WARNING: The dimension of scaling parameters and feature number do not match.", file=sys.stderr)
|
||||
coef = coef.resize(n) # zeros padded if n > len(coef)
|
||||
offset = offset.resize(n)
|
||||
|
||||
# scaled_x = x * diag(coef) + ones(l, 1) * offset'
|
||||
offset = sparse.csr_matrix(offset.reshape(1, n))
|
||||
offset = sparse.vstack([offset] * l, format='csr', dtype=x.dtype)
|
||||
scaled_x = x.dot(sparse.diags(coef, 0, shape=(n, n))) + offset
|
||||
|
||||
if scaled_x.getnnz() > x.getnnz():
|
||||
print(
|
||||
"WARNING: original #nonzeros %d\n" % x.getnnz() +
|
||||
" > new #nonzeros %d\n" % scaled_x.getnnz() +
|
||||
"If feature values are non-negative and sparse, get scale_param by setting lower=0 rather than the default lower=-1.",
|
||||
file=sys.stderr)
|
||||
|
||||
return scaled_x
|
479
liblinear-2.49/python/liblinear/liblinear.py
Normal file
479
liblinear-2.49/python/liblinear/liblinear.py
Normal file
@@ -0,0 +1,479 @@
|
||||
from ctypes import *
|
||||
from ctypes.util import find_library
|
||||
from os import path
|
||||
from glob import glob
|
||||
import sys
|
||||
from enum import IntEnum
|
||||
try:
|
||||
import numpy as np
|
||||
import scipy
|
||||
from scipy import sparse
|
||||
except:
|
||||
scipy = None
|
||||
|
||||
if sys.version_info[0] < 3:
|
||||
range = xrange
|
||||
from itertools import izip as zip
|
||||
|
||||
__all__ = ['liblinear', 'feature_node', 'gen_feature_nodearray', 'problem',
|
||||
'parameter', 'model', 'toPyModel', 'solver_names',
|
||||
'print_null']
|
||||
|
||||
try:
|
||||
dirname = path.dirname(path.abspath(__file__))
|
||||
dynamic_lib_name = 'clib.cp*'
|
||||
path_to_so = glob(path.join(dirname, dynamic_lib_name))[0]
|
||||
liblinear = CDLL(path_to_so)
|
||||
except:
|
||||
try :
|
||||
if sys.platform == 'win32':
|
||||
liblinear = CDLL(path.join(dirname, r'..\..\windows\liblinear.dll'))
|
||||
else:
|
||||
liblinear = CDLL(path.join(dirname, '../../liblinear.so.6'))
|
||||
except:
|
||||
# For unix the prefix 'lib' is not considered.
|
||||
if find_library('linear'):
|
||||
liblinear = CDLL(find_library('linear'))
|
||||
elif find_library('liblinear'):
|
||||
liblinear = CDLL(find_library('liblinear'))
|
||||
else:
|
||||
raise Exception('LIBLINEAR library not found.')
|
||||
|
||||
class solver_names(IntEnum):
|
||||
L2R_LR = 0
|
||||
L2R_L2LOSS_SVC_DUAL = 1
|
||||
L2R_L2LOSS_SVC = 2
|
||||
L2R_L1LOSS_SVC_DUAL = 3
|
||||
MCSVM_CS = 4
|
||||
L1R_L2LOSS_SVC = 5
|
||||
L1R_LR = 6
|
||||
L2R_LR_DUAL = 7
|
||||
L2R_L2LOSS_SVR = 11
|
||||
L2R_L2LOSS_SVR_DUAL = 12
|
||||
L2R_L1LOSS_SVR_DUAL = 13
|
||||
ONECLASS_SVM = 21
|
||||
|
||||
PRINT_STRING_FUN = CFUNCTYPE(None, c_char_p)
|
||||
def print_null(s):
|
||||
return
|
||||
|
||||
# In multi-threading, all threads share the same memory space of
|
||||
# the dynamic library (liblinear). Thus, we use a module-level
|
||||
# variable to keep a reference to ctypes print_null, preventing
|
||||
# python from garbage collecting it in thread B while thread A
|
||||
# still needs it. Check the usage of svm_set_print_string_function()
|
||||
# in LIBLINEAR README for details.
|
||||
ctypes_print_null = PRINT_STRING_FUN(print_null)
|
||||
|
||||
def genFields(names, types):
|
||||
return list(zip(names, types))
|
||||
|
||||
def fillprototype(f, restype, argtypes):
|
||||
f.restype = restype
|
||||
f.argtypes = argtypes
|
||||
|
||||
class feature_node(Structure):
|
||||
_names = ["index", "value"]
|
||||
_types = [c_int, c_double]
|
||||
_fields_ = genFields(_names, _types)
|
||||
|
||||
def __str__(self):
|
||||
return '%d:%g' % (self.index, self.value)
|
||||
|
||||
def gen_feature_nodearray(xi, feature_max=None):
|
||||
if feature_max:
|
||||
assert(isinstance(feature_max, int))
|
||||
|
||||
xi_shift = 0 # ensure correct indices of xi
|
||||
if scipy and isinstance(xi, tuple) and len(xi) == 2\
|
||||
and isinstance(xi[0], np.ndarray) and isinstance(xi[1], np.ndarray): # for a sparse vector
|
||||
index_range = xi[0] + 1 # index starts from 1
|
||||
if feature_max:
|
||||
index_range = index_range[np.where(index_range <= feature_max)]
|
||||
elif scipy and isinstance(xi, np.ndarray):
|
||||
xi_shift = 1
|
||||
index_range = xi.nonzero()[0] + 1 # index starts from 1
|
||||
if feature_max:
|
||||
index_range = index_range[np.where(index_range <= feature_max)]
|
||||
elif isinstance(xi, (dict, list, tuple)):
|
||||
if isinstance(xi, dict):
|
||||
index_range = sorted(xi.keys())
|
||||
elif isinstance(xi, (list, tuple)):
|
||||
xi_shift = 1
|
||||
index_range = range(1, len(xi) + 1)
|
||||
index_range = list(filter(lambda j: xi[j-xi_shift] != 0, index_range))
|
||||
|
||||
if feature_max:
|
||||
index_range = list(filter(lambda j: j <= feature_max, index_range))
|
||||
else:
|
||||
raise TypeError('xi should be a dictionary, list, tuple, 1-d numpy array, or tuple of (index, data)')
|
||||
|
||||
ret = (feature_node*(len(index_range)+2))()
|
||||
ret[-1].index = -1 # for bias term
|
||||
ret[-2].index = -1
|
||||
|
||||
if scipy and isinstance(xi, tuple) and len(xi) == 2\
|
||||
and isinstance(xi[0], np.ndarray) and isinstance(xi[1], np.ndarray): # for a sparse vector
|
||||
# since xi=(indices, values), we must sort them simultaneously.
|
||||
for idx, arg in enumerate(np.argsort(index_range)):
|
||||
ret[idx].index = index_range[arg]
|
||||
ret[idx].value = (xi[1])[arg]
|
||||
else:
|
||||
for idx, j in enumerate(index_range):
|
||||
ret[idx].index = j
|
||||
ret[idx].value = xi[j - xi_shift]
|
||||
|
||||
max_idx = 0
|
||||
if len(index_range) > 0:
|
||||
max_idx = index_range[-1]
|
||||
return ret, max_idx
|
||||
|
||||
try:
|
||||
from numba import jit
|
||||
jit_enabled = True
|
||||
except:
|
||||
# We need to support two cases: when jit is called with no arguments, and when jit is called with
|
||||
# a keyword argument.
|
||||
def jit(func=None, *args, **kwargs):
|
||||
if func is None:
|
||||
# This handles the case where jit is used with parentheses: @jit(nopython=True)
|
||||
return lambda x: x
|
||||
else:
|
||||
# This handles the case where jit is used without parentheses: @jit
|
||||
return func
|
||||
jit_enabled = False
|
||||
|
||||
@jit(nopython=True)
|
||||
def csr_to_problem_jit(l, x_val, x_ind, x_rowptr, prob_val, prob_ind, prob_rowptr):
|
||||
for i in range(l):
|
||||
b1,e1 = x_rowptr[i], x_rowptr[i+1]
|
||||
b2,e2 = prob_rowptr[i], prob_rowptr[i+1]-2
|
||||
for j in range(b1,e1):
|
||||
prob_ind[j-b1+b2] = x_ind[j]+1
|
||||
prob_val[j-b1+b2] = x_val[j]
|
||||
def csr_to_problem_nojit(l, x_val, x_ind, x_rowptr, prob_val, prob_ind, prob_rowptr):
|
||||
for i in range(l):
|
||||
x_slice = slice(x_rowptr[i], x_rowptr[i+1])
|
||||
prob_slice = slice(prob_rowptr[i], prob_rowptr[i+1]-2)
|
||||
prob_ind[prob_slice] = x_ind[x_slice]+1
|
||||
prob_val[prob_slice] = x_val[x_slice]
|
||||
|
||||
def csr_to_problem(x, prob):
|
||||
if not x.has_sorted_indices:
|
||||
x.sort_indices()
|
||||
|
||||
# Extra space for termination node and (possibly) bias term
|
||||
x_space = prob.x_space = np.empty((x.nnz+x.shape[0]*2), dtype=feature_node)
|
||||
# rowptr has to be a 64bit integer because it will later be used for pointer arithmetic,
|
||||
# which overflows when the added pointer points to an address that is numerically high.
|
||||
prob.rowptr = x.indptr.astype(np.int64, copy=True)
|
||||
prob.rowptr[1:] += 2*np.arange(1,x.shape[0]+1)
|
||||
prob_ind = x_space["index"]
|
||||
prob_val = x_space["value"]
|
||||
prob_ind[:] = -1
|
||||
if jit_enabled:
|
||||
csr_to_problem_jit(x.shape[0], x.data, x.indices, x.indptr, prob_val, prob_ind, prob.rowptr)
|
||||
else:
|
||||
csr_to_problem_nojit(x.shape[0], x.data, x.indices, x.indptr, prob_val, prob_ind, prob.rowptr)
|
||||
|
||||
class problem(Structure):
|
||||
_names = ["l", "n", "y", "x", "bias"]
|
||||
_types = [c_int, c_int, POINTER(c_double), POINTER(POINTER(feature_node)), c_double]
|
||||
_fields_ = genFields(_names, _types)
|
||||
|
||||
def __init__(self, y, x, bias = -1):
|
||||
if (not isinstance(y, (list, tuple))) and (not (scipy and isinstance(y, np.ndarray))):
|
||||
raise TypeError("type of y: {0} is not supported!".format(type(y)))
|
||||
|
||||
if isinstance(x, (list, tuple)):
|
||||
if len(y) != len(x):
|
||||
raise ValueError("len(y) != len(x)")
|
||||
elif scipy != None and isinstance(x, (np.ndarray, sparse.spmatrix)):
|
||||
if len(y) != x.shape[0]:
|
||||
raise ValueError("len(y) != len(x)")
|
||||
if isinstance(x, np.ndarray):
|
||||
x = np.ascontiguousarray(x) # enforce row-major
|
||||
if isinstance(x, sparse.spmatrix):
|
||||
x = x.tocsr()
|
||||
pass
|
||||
else:
|
||||
raise TypeError("type of x: {0} is not supported!".format(type(x)))
|
||||
self.l = l = len(y)
|
||||
self.bias = -1
|
||||
|
||||
max_idx = 0
|
||||
x_space = self.x_space = []
|
||||
if scipy != None and isinstance(x, sparse.csr_matrix):
|
||||
csr_to_problem(x, self)
|
||||
max_idx = x.shape[1]
|
||||
else:
|
||||
for i, xi in enumerate(x):
|
||||
tmp_xi, tmp_idx = gen_feature_nodearray(xi)
|
||||
x_space += [tmp_xi]
|
||||
max_idx = max(max_idx, tmp_idx)
|
||||
self.n = max_idx
|
||||
|
||||
self.y = (c_double * l)()
|
||||
if scipy != None and isinstance(y, np.ndarray):
|
||||
np.ctypeslib.as_array(self.y, (self.l,))[:] = y
|
||||
else:
|
||||
for i, yi in enumerate(y): self.y[i] = yi
|
||||
|
||||
self.x = (POINTER(feature_node) * l)()
|
||||
if scipy != None and isinstance(x, sparse.csr_matrix):
|
||||
base = addressof(self.x_space.ctypes.data_as(POINTER(feature_node))[0])
|
||||
x_ptr = cast(self.x, POINTER(c_uint64))
|
||||
x_ptr = np.ctypeslib.as_array(x_ptr,(self.l,))
|
||||
x_ptr[:] = self.rowptr[:-1]*sizeof(feature_node)+base
|
||||
else:
|
||||
for i, xi in enumerate(self.x_space): self.x[i] = xi
|
||||
|
||||
self.set_bias(bias)
|
||||
|
||||
def set_bias(self, bias):
|
||||
if self.bias == bias:
|
||||
return
|
||||
if bias >= 0 and self.bias < 0:
|
||||
self.n += 1
|
||||
node = feature_node(self.n, bias)
|
||||
if bias < 0 and self.bias >= 0:
|
||||
self.n -= 1
|
||||
node = feature_node(-1, bias)
|
||||
|
||||
if isinstance(self.x_space, list):
|
||||
for xi in self.x_space:
|
||||
xi[-2] = node
|
||||
else:
|
||||
self.x_space["index"][self.rowptr[1:]-2] = node.index
|
||||
self.x_space["value"][self.rowptr[1:]-2] = node.value
|
||||
|
||||
self.bias = bias
|
||||
|
||||
def copy(self):
|
||||
prob_copy = problem.__new__(problem)
|
||||
for key in problem._names + list(vars(self)):
|
||||
setattr(prob_copy, key, getattr(self, key))
|
||||
return prob_copy
|
||||
|
||||
|
||||
class parameter(Structure):
|
||||
_names = ["solver_type", "eps", "C", "nr_weight", "weight_label",
|
||||
"weight", "p", "nu", "init_sol", "regularize_bias",
|
||||
"w_recalc"]
|
||||
_types = [c_int, c_double, c_double, c_int, POINTER(c_int),
|
||||
POINTER(c_double), c_double, c_double, POINTER(c_double), c_int,c_bool]
|
||||
_fields_ = genFields(_names, _types)
|
||||
|
||||
def __init__(self, options = None):
|
||||
if options == None:
|
||||
options = ''
|
||||
self.parse_options(options)
|
||||
|
||||
def __str__(self):
|
||||
s = ''
|
||||
attrs = parameter._names + list(self.__dict__.keys())
|
||||
values = map(lambda attr: getattr(self, attr), attrs)
|
||||
for attr, val in zip(attrs, values):
|
||||
s += (' %s: %s\n' % (attr, val))
|
||||
s = s.strip()
|
||||
|
||||
return s
|
||||
|
||||
def set_to_default_values(self):
|
||||
self.solver_type = solver_names.L2R_L2LOSS_SVC_DUAL
|
||||
self.eps = float('inf')
|
||||
self.C = 1
|
||||
self.p = 0.1
|
||||
self.nu = 0.5
|
||||
self.nr_weight = 0
|
||||
self.weight_label = None
|
||||
self.weight = None
|
||||
self.init_sol = None
|
||||
self.bias = -1
|
||||
self.regularize_bias = 1
|
||||
self.w_recalc = False
|
||||
self.flag_cross_validation = False
|
||||
self.flag_C_specified = False
|
||||
self.flag_p_specified = False
|
||||
self.flag_solver_specified = False
|
||||
self.flag_find_parameters = False
|
||||
self.nr_fold = 0
|
||||
self.print_func = cast(None, PRINT_STRING_FUN)
|
||||
|
||||
def parse_options(self, options):
|
||||
if isinstance(options, list):
|
||||
argv = options
|
||||
elif isinstance(options, str):
|
||||
argv = options.split()
|
||||
else:
|
||||
raise TypeError("arg 1 should be a list or a str.")
|
||||
self.set_to_default_values()
|
||||
self.print_func = cast(None, PRINT_STRING_FUN)
|
||||
weight_label = []
|
||||
weight = []
|
||||
|
||||
i = 0
|
||||
while i < len(argv) :
|
||||
if argv[i] == "-s":
|
||||
i = i + 1
|
||||
self.solver_type = solver_names(int(argv[i]))
|
||||
self.flag_solver_specified = True
|
||||
elif argv[i] == "-c":
|
||||
i = i + 1
|
||||
self.C = float(argv[i])
|
||||
self.flag_C_specified = True
|
||||
elif argv[i] == "-p":
|
||||
i = i + 1
|
||||
self.p = float(argv[i])
|
||||
self.flag_p_specified = True
|
||||
elif argv[i] == "-n":
|
||||
i = i + 1
|
||||
self.nu = float(argv[i])
|
||||
elif argv[i] == "-e":
|
||||
i = i + 1
|
||||
self.eps = float(argv[i])
|
||||
elif argv[i] == "-B":
|
||||
i = i + 1
|
||||
self.bias = float(argv[i])
|
||||
elif argv[i] == "-v":
|
||||
i = i + 1
|
||||
self.flag_cross_validation = 1
|
||||
self.nr_fold = int(argv[i])
|
||||
if self.nr_fold < 2 :
|
||||
raise ValueError("n-fold cross validation: n must >= 2")
|
||||
elif argv[i].startswith("-w"):
|
||||
i = i + 1
|
||||
self.nr_weight += 1
|
||||
weight_label += [int(argv[i-1][2:])]
|
||||
weight += [float(argv[i])]
|
||||
elif argv[i] == "-q":
|
||||
self.print_func = ctypes_print_null
|
||||
elif argv[i] == "-C":
|
||||
self.flag_find_parameters = True
|
||||
elif argv[i] == "-R":
|
||||
self.regularize_bias = 0
|
||||
else:
|
||||
raise ValueError("Wrong options")
|
||||
i += 1
|
||||
|
||||
liblinear.set_print_string_function(self.print_func)
|
||||
self.weight_label = (c_int*self.nr_weight)()
|
||||
self.weight = (c_double*self.nr_weight)()
|
||||
for i in range(self.nr_weight):
|
||||
self.weight[i] = weight[i]
|
||||
self.weight_label[i] = weight_label[i]
|
||||
|
||||
# default solver for parameter selection is L2R_L2LOSS_SVC
|
||||
if self.flag_find_parameters:
|
||||
if not self.flag_cross_validation:
|
||||
self.nr_fold = 5
|
||||
if not self.flag_solver_specified:
|
||||
self.solver_type = solver_names.L2R_L2LOSS_SVC
|
||||
self.flag_solver_specified = True
|
||||
elif self.solver_type not in [solver_names.L2R_LR, solver_names.L2R_L2LOSS_SVC, solver_names.L2R_L2LOSS_SVR]:
|
||||
raise ValueError("Warm-start parameter search only available for -s 0, -s 2 and -s 11")
|
||||
|
||||
if self.eps == float('inf'):
|
||||
if self.solver_type in [solver_names.L2R_LR, solver_names.L2R_L2LOSS_SVC]:
|
||||
self.eps = 0.01
|
||||
elif self.solver_type in [solver_names.L2R_L2LOSS_SVR]:
|
||||
self.eps = 0.0001
|
||||
elif self.solver_type in [solver_names.L2R_L2LOSS_SVC_DUAL, solver_names.L2R_L1LOSS_SVC_DUAL, solver_names.MCSVM_CS, solver_names.L2R_LR_DUAL]:
|
||||
self.eps = 0.1
|
||||
elif self.solver_type in [solver_names.L1R_L2LOSS_SVC, solver_names.L1R_LR]:
|
||||
self.eps = 0.01
|
||||
elif self.solver_type in [solver_names.L2R_L2LOSS_SVR_DUAL, solver_names.L2R_L1LOSS_SVR_DUAL]:
|
||||
self.eps = 0.1
|
||||
elif self.solver_type in [solver_names.ONECLASS_SVM]:
|
||||
self.eps = 0.01
|
||||
|
||||
class model(Structure):
|
||||
_names = ["param", "nr_class", "nr_feature", "w", "label", "bias", "rho"]
|
||||
_types = [parameter, c_int, c_int, POINTER(c_double), POINTER(c_int), c_double, c_double]
|
||||
_fields_ = genFields(_names, _types)
|
||||
|
||||
def __init__(self):
|
||||
self.__createfrom__ = 'python'
|
||||
|
||||
def __del__(self):
|
||||
# free memory created by C to avoid memory leak
|
||||
if hasattr(self, '__createfrom__') and self.__createfrom__ == 'C':
|
||||
liblinear.free_and_destroy_model(pointer(self))
|
||||
|
||||
def get_nr_feature(self):
|
||||
return liblinear.get_nr_feature(self)
|
||||
|
||||
def get_nr_class(self):
|
||||
return liblinear.get_nr_class(self)
|
||||
|
||||
def get_labels(self):
|
||||
nr_class = self.get_nr_class()
|
||||
labels = (c_int * nr_class)()
|
||||
liblinear.get_labels(self, labels)
|
||||
return labels[:nr_class]
|
||||
|
||||
def get_decfun_coef(self, feat_idx, label_idx=0):
|
||||
return liblinear.get_decfun_coef(self, feat_idx, label_idx)
|
||||
|
||||
def get_decfun_bias(self, label_idx=0):
|
||||
return liblinear.get_decfun_bias(self, label_idx)
|
||||
|
||||
def get_decfun_rho(self):
|
||||
return liblinear.get_decfun_rho(self)
|
||||
|
||||
def get_decfun(self, label_idx=0):
|
||||
w = [liblinear.get_decfun_coef(self, feat_idx, label_idx) for feat_idx in range(1, self.nr_feature+1)]
|
||||
if self.is_oneclass_model():
|
||||
rho = self.get_decfun_rho()
|
||||
return (w, -rho)
|
||||
else:
|
||||
b = liblinear.get_decfun_bias(self, label_idx)
|
||||
return (w, b)
|
||||
|
||||
def is_probability_model(self):
|
||||
return (liblinear.check_probability_model(self) == 1)
|
||||
|
||||
def is_regression_model(self):
|
||||
return (liblinear.check_regression_model(self) == 1)
|
||||
|
||||
def is_oneclass_model(self):
|
||||
return (liblinear.check_oneclass_model(self) == 1)
|
||||
|
||||
def toPyModel(model_ptr):
|
||||
"""
|
||||
toPyModel(model_ptr) -> model
|
||||
|
||||
Convert a ctypes POINTER(model) to a Python model
|
||||
"""
|
||||
if bool(model_ptr) == False:
|
||||
raise ValueError("Null pointer")
|
||||
m = model_ptr.contents
|
||||
m.__createfrom__ = 'C'
|
||||
return m
|
||||
|
||||
fillprototype(liblinear.train, POINTER(model), [POINTER(problem), POINTER(parameter)])
|
||||
fillprototype(liblinear.find_parameters, None, [POINTER(problem), POINTER(parameter), c_int, c_double, c_double, POINTER(c_double), POINTER(c_double), POINTER(c_double)])
|
||||
fillprototype(liblinear.cross_validation, None, [POINTER(problem), POINTER(parameter), c_int, POINTER(c_double)])
|
||||
|
||||
fillprototype(liblinear.predict_values, c_double, [POINTER(model), POINTER(feature_node), POINTER(c_double)])
|
||||
fillprototype(liblinear.predict, c_double, [POINTER(model), POINTER(feature_node)])
|
||||
fillprototype(liblinear.predict_probability, c_double, [POINTER(model), POINTER(feature_node), POINTER(c_double)])
|
||||
|
||||
fillprototype(liblinear.save_model, c_int, [c_char_p, POINTER(model)])
|
||||
fillprototype(liblinear.load_model, POINTER(model), [c_char_p])
|
||||
|
||||
fillprototype(liblinear.get_nr_feature, c_int, [POINTER(model)])
|
||||
fillprototype(liblinear.get_nr_class, c_int, [POINTER(model)])
|
||||
fillprototype(liblinear.get_labels, None, [POINTER(model), POINTER(c_int)])
|
||||
fillprototype(liblinear.get_decfun_coef, c_double, [POINTER(model), c_int, c_int])
|
||||
fillprototype(liblinear.get_decfun_bias, c_double, [POINTER(model), c_int])
|
||||
fillprototype(liblinear.get_decfun_rho, c_double, [POINTER(model)])
|
||||
|
||||
fillprototype(liblinear.free_model_content, None, [POINTER(model)])
|
||||
fillprototype(liblinear.free_and_destroy_model, None, [POINTER(POINTER(model))])
|
||||
fillprototype(liblinear.destroy_param, None, [POINTER(parameter)])
|
||||
fillprototype(liblinear.check_parameter, c_char_p, [POINTER(problem), POINTER(parameter)])
|
||||
fillprototype(liblinear.check_probability_model, c_int, [POINTER(model)])
|
||||
fillprototype(liblinear.check_regression_model, c_int, [POINTER(model)])
|
||||
fillprototype(liblinear.check_oneclass_model, c_int, [POINTER(model)])
|
||||
fillprototype(liblinear.set_print_string_function, None, [CFUNCTYPE(None, c_char_p)])
|
285
liblinear-2.49/python/liblinear/liblinearutil.py
Normal file
285
liblinear-2.49/python/liblinear/liblinearutil.py
Normal file
@@ -0,0 +1,285 @@
|
||||
import os, sys
|
||||
from .liblinear import *
|
||||
from .liblinear import __all__ as liblinear_all
|
||||
from .commonutil import *
|
||||
from .commonutil import __all__ as common_all
|
||||
from ctypes import c_double
|
||||
|
||||
try:
|
||||
import numpy as np
|
||||
import scipy
|
||||
from scipy import sparse
|
||||
except:
|
||||
scipy = None
|
||||
|
||||
if sys.version_info[0] < 3:
|
||||
range = xrange
|
||||
from itertools import izip as zip
|
||||
_cstr = lambda s: s.encode("utf-8") if isinstance(s,unicode) else str(s)
|
||||
else:
|
||||
_cstr = lambda s: bytes(s, "utf-8")
|
||||
|
||||
__all__ = ['load_model', 'save_model', 'train', 'predict'] + liblinear_all + common_all
|
||||
|
||||
|
||||
def load_model(model_file_name):
|
||||
"""
|
||||
load_model(model_file_name) -> model
|
||||
|
||||
Load a LIBLINEAR model from model_file_name and return.
|
||||
"""
|
||||
model = liblinear.load_model(_cstr(model_file_name))
|
||||
if not model:
|
||||
print("can't open model file %s" % model_file_name)
|
||||
return None
|
||||
model = toPyModel(model)
|
||||
return model
|
||||
|
||||
def save_model(model_file_name, model):
|
||||
"""
|
||||
save_model(model_file_name, model) -> None
|
||||
|
||||
Save a LIBLINEAR model to the file model_file_name.
|
||||
"""
|
||||
liblinear.save_model(_cstr(model_file_name), model)
|
||||
|
||||
def train(arg1, arg2=None, arg3=None):
|
||||
"""
|
||||
train(y, x [, options]) -> model | ACC
|
||||
|
||||
y: a list/tuple/ndarray of l true labels (type must be int/double).
|
||||
|
||||
x: 1. a list/tuple of l training instances. Feature vector of
|
||||
each training instance is a list/tuple or dictionary.
|
||||
|
||||
2. an l * n numpy ndarray or scipy spmatrix (n: number of features).
|
||||
|
||||
train(prob [, options]) -> model | ACC
|
||||
train(prob, param) -> model | ACC
|
||||
|
||||
Train a model from data (y, x) or a problem prob using
|
||||
'options' or a parameter param.
|
||||
|
||||
If '-v' is specified in 'options' (i.e., cross validation)
|
||||
either accuracy (ACC) or mean-squared error (MSE) is returned.
|
||||
|
||||
options:
|
||||
-s type : set type of solver (default 1)
|
||||
for multi-class classification
|
||||
0 -- L2-regularized logistic regression (primal)
|
||||
1 -- L2-regularized L2-loss support vector classification (dual)
|
||||
2 -- L2-regularized L2-loss support vector classification (primal)
|
||||
3 -- L2-regularized L1-loss support vector classification (dual)
|
||||
4 -- support vector classification by Crammer and Singer
|
||||
5 -- L1-regularized L2-loss support vector classification
|
||||
6 -- L1-regularized logistic regression
|
||||
7 -- L2-regularized logistic regression (dual)
|
||||
for regression
|
||||
11 -- L2-regularized L2-loss support vector regression (primal)
|
||||
12 -- L2-regularized L2-loss support vector regression (dual)
|
||||
13 -- L2-regularized L1-loss support vector regression (dual)
|
||||
for outlier detection
|
||||
21 -- one-class support vector machine (dual)
|
||||
-c cost : set the parameter C (default 1)
|
||||
-p epsilon : set the epsilon in loss function of SVR (default 0.1)
|
||||
-e epsilon : set tolerance of termination criterion
|
||||
-s 0 and 2
|
||||
|f'(w)|_2 <= eps*min(pos,neg)/l*|f'(w0)|_2,
|
||||
where f is the primal function, (default 0.01)
|
||||
-s 11
|
||||
|f'(w)|_2 <= eps*|f'(w0)|_2 (default 0.0001)
|
||||
-s 1, 3, 4, 7, and 21
|
||||
Dual maximal violation <= eps; similar to libsvm (default 0.1 except 0.01 for -s 21)
|
||||
-s 5 and 6
|
||||
|f'(w)|_inf <= eps*min(pos,neg)/l*|f'(w0)|_inf,
|
||||
where f is the primal function (default 0.01)
|
||||
-s 12 and 13
|
||||
|f'(alpha)|_1 <= eps |f'(alpha0)|,
|
||||
where f is the dual function (default 0.1)
|
||||
-B bias : if bias >= 0, instance x becomes [x; bias]; if < 0, no bias term added (default -1)
|
||||
-R : not regularize the bias; must with -B 1 to have the bias; DON'T use this unless you know what it is
|
||||
(for -s 0, 2, 5, 6, 11)"
|
||||
-wi weight: weights adjust the parameter C of different classes (see README for details)
|
||||
-v n: n-fold cross validation mode
|
||||
-C : find parameters (C for -s 0, 2 and C, p for -s 11)
|
||||
-q : quiet mode (no outputs)
|
||||
"""
|
||||
prob, param = None, None
|
||||
if isinstance(arg1, (list, tuple)) or (scipy and isinstance(arg1, np.ndarray)):
|
||||
assert isinstance(arg2, (list, tuple)) or (scipy and isinstance(arg2, (np.ndarray, sparse.spmatrix)))
|
||||
y, x, options = arg1, arg2, arg3
|
||||
prob = problem(y, x)
|
||||
param = parameter(options)
|
||||
elif isinstance(arg1, problem):
|
||||
prob = arg1
|
||||
if isinstance(arg2, parameter):
|
||||
param = arg2
|
||||
else:
|
||||
param = parameter(arg2)
|
||||
if prob == None or param == None :
|
||||
raise TypeError("Wrong types for the arguments")
|
||||
|
||||
prob.set_bias(param.bias)
|
||||
liblinear.set_print_string_function(param.print_func)
|
||||
err_msg = liblinear.check_parameter(prob, param)
|
||||
if err_msg :
|
||||
raise ValueError('Error: %s' % err_msg)
|
||||
|
||||
if param.flag_find_parameters:
|
||||
nr_fold = param.nr_fold
|
||||
best_C = c_double()
|
||||
best_p = c_double()
|
||||
best_score = c_double()
|
||||
if param.flag_C_specified:
|
||||
start_C = param.C
|
||||
else:
|
||||
start_C = -1.0
|
||||
if param.flag_p_specified:
|
||||
start_p = param.p
|
||||
else:
|
||||
start_p = -1.0
|
||||
liblinear.find_parameters(prob, param, nr_fold, start_C, start_p, best_C, best_p, best_score)
|
||||
if param.solver_type in [solver_names.L2R_LR, solver_names.L2R_L2LOSS_SVC]:
|
||||
print("Best C = %g CV accuracy = %g%%\n"% (best_C.value, 100.0*best_score.value))
|
||||
elif param.solver_type in [solver_names.L2R_L2LOSS_SVR]:
|
||||
print("Best C = %g Best p = %g CV MSE = %g\n"% (best_C.value, best_p.value, best_score.value))
|
||||
return best_C.value,best_p.value,best_score.value
|
||||
|
||||
|
||||
elif param.flag_cross_validation:
|
||||
l, nr_fold = prob.l, param.nr_fold
|
||||
target = (c_double * l)()
|
||||
liblinear.cross_validation(prob, param, nr_fold, target)
|
||||
ACC, MSE, SCC = evaluations(prob.y[:l], target[:l])
|
||||
if param.solver_type in [solver_names.L2R_L2LOSS_SVR, solver_names.L2R_L2LOSS_SVR_DUAL, solver_names.L2R_L1LOSS_SVR_DUAL]:
|
||||
print("Cross Validation Mean squared error = %g" % MSE)
|
||||
print("Cross Validation Squared correlation coefficient = %g" % SCC)
|
||||
return MSE
|
||||
else:
|
||||
print("Cross Validation Accuracy = %g%%" % ACC)
|
||||
return ACC
|
||||
else:
|
||||
m = liblinear.train(prob, param)
|
||||
m = toPyModel(m)
|
||||
|
||||
return m
|
||||
|
||||
def predict(y, x, m, options=""):
|
||||
"""
|
||||
predict(y, x, m [, options]) -> (p_labels, p_acc, p_vals)
|
||||
|
||||
y: a list/tuple/ndarray of l true labels (type must be int/double).
|
||||
It is used for calculating the accuracy. Use [] if true labels are
|
||||
unavailable.
|
||||
|
||||
x: 1. a list/tuple of l training instances. Feature vector of
|
||||
each training instance is a list/tuple or dictionary.
|
||||
|
||||
2. an l * n numpy ndarray or scipy spmatrix (n: number of features).
|
||||
|
||||
Predict data (y, x) with the SVM model m.
|
||||
options:
|
||||
-b probability_estimates: whether to output probability estimates, 0 or 1 (default 0); currently for logistic regression only
|
||||
-q quiet mode (no outputs)
|
||||
|
||||
The return tuple contains
|
||||
p_labels: a list of predicted labels
|
||||
p_acc: a tuple including accuracy (for classification), mean-squared
|
||||
error, and squared correlation coefficient (for regression).
|
||||
p_vals: a list of decision values or probability estimates (if '-b 1'
|
||||
is specified). If k is the number of classes, for decision values,
|
||||
each element includes results of predicting k binary-class
|
||||
SVMs. if k = 2 and solver is not MCSVM_CS, only one decision value
|
||||
is returned. For probabilities, each element contains k values
|
||||
indicating the probability that the testing instance is in each class.
|
||||
Note that the order of classes here is the same as 'model.label'
|
||||
field in the model structure.
|
||||
"""
|
||||
|
||||
def info(s):
|
||||
print(s)
|
||||
|
||||
if scipy and isinstance(x, np.ndarray):
|
||||
x = np.ascontiguousarray(x) # enforce row-major
|
||||
elif scipy and isinstance(x, sparse.spmatrix):
|
||||
x = x.tocsr()
|
||||
elif not isinstance(x, (list, tuple)):
|
||||
raise TypeError("type of x: {0} is not supported!".format(type(x)))
|
||||
|
||||
if (not isinstance(y, (list, tuple))) and (not (scipy and isinstance(y, np.ndarray))):
|
||||
raise TypeError("type of y: {0} is not supported!".format(type(y)))
|
||||
|
||||
predict_probability = 0
|
||||
argv = options.split()
|
||||
i = 0
|
||||
while i < len(argv):
|
||||
if argv[i] == '-b':
|
||||
i += 1
|
||||
predict_probability = int(argv[i])
|
||||
elif argv[i] == '-q':
|
||||
info = print_null
|
||||
else:
|
||||
raise ValueError("Wrong options")
|
||||
i+=1
|
||||
|
||||
solver_type = m.param.solver_type
|
||||
nr_class = m.get_nr_class()
|
||||
nr_feature = m.get_nr_feature()
|
||||
is_prob_model = m.is_probability_model()
|
||||
bias = m.bias
|
||||
if bias >= 0:
|
||||
biasterm = feature_node(nr_feature+1, bias)
|
||||
else:
|
||||
biasterm = feature_node(-1, bias)
|
||||
pred_labels = []
|
||||
pred_values = []
|
||||
|
||||
if scipy and isinstance(x, sparse.spmatrix):
|
||||
nr_instance = x.shape[0]
|
||||
else:
|
||||
nr_instance = len(x)
|
||||
|
||||
if predict_probability:
|
||||
if not is_prob_model:
|
||||
raise TypeError('probability output is only supported for logistic regression')
|
||||
prob_estimates = (c_double * nr_class)()
|
||||
for i in range(nr_instance):
|
||||
if scipy and isinstance(x, sparse.spmatrix):
|
||||
indslice = slice(x.indptr[i], x.indptr[i+1])
|
||||
xi, idx = gen_feature_nodearray((x.indices[indslice], x.data[indslice]), feature_max=nr_feature)
|
||||
else:
|
||||
xi, idx = gen_feature_nodearray(x[i], feature_max=nr_feature)
|
||||
xi[-2] = biasterm
|
||||
label = liblinear.predict_probability(m, xi, prob_estimates)
|
||||
values = prob_estimates[:nr_class]
|
||||
pred_labels += [label]
|
||||
pred_values += [values]
|
||||
else:
|
||||
if nr_class <= 2:
|
||||
nr_classifier = 1
|
||||
else:
|
||||
nr_classifier = nr_class
|
||||
dec_values = (c_double * nr_classifier)()
|
||||
for i in range(nr_instance):
|
||||
if scipy and isinstance(x, sparse.spmatrix):
|
||||
indslice = slice(x.indptr[i], x.indptr[i+1])
|
||||
xi, idx = gen_feature_nodearray((x.indices[indslice], x.data[indslice]), feature_max=nr_feature)
|
||||
else:
|
||||
xi, idx = gen_feature_nodearray(x[i], feature_max=nr_feature)
|
||||
xi[-2] = biasterm
|
||||
label = liblinear.predict_values(m, xi, dec_values)
|
||||
values = dec_values[:nr_classifier]
|
||||
pred_labels += [label]
|
||||
pred_values += [values]
|
||||
|
||||
if len(y) == 0:
|
||||
y = [0] * nr_instance
|
||||
ACC, MSE, SCC = evaluations(y, pred_labels)
|
||||
|
||||
if m.is_regression_model():
|
||||
info("Mean squared error = %g (regression)" % MSE)
|
||||
info("Squared correlation coefficient = %g (regression)" % SCC)
|
||||
else:
|
||||
info("Accuracy = %g%% (%d/%d) (classification)" % (ACC, int(round(nr_instance*ACC/100)), nr_instance))
|
||||
|
||||
return pred_labels, (ACC, MSE, SCC), pred_values
|
123
liblinear-2.49/python/setup.py
Normal file
123
liblinear-2.49/python/setup.py
Normal file
@@ -0,0 +1,123 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys, os
|
||||
from os import path
|
||||
from shutil import copyfile, rmtree
|
||||
from glob import glob
|
||||
|
||||
from setuptools import setup, Extension
|
||||
from distutils.command.clean import clean as clean_cmd
|
||||
|
||||
# a technique to build a shared library on windows
|
||||
from distutils.command.build_ext import build_ext
|
||||
|
||||
build_ext.get_export_symbols = lambda x, y: []
|
||||
|
||||
|
||||
PACKAGE_DIR = "liblinear"
|
||||
PACKAGE_NAME = "liblinear-official"
|
||||
VERSION = "2.49.0"
|
||||
cpp_dir = "cpp-source"
|
||||
# should be consistent with dynamic_lib_name in liblinear/liblinear.py
|
||||
dynamic_lib_name = "clib"
|
||||
|
||||
# sources to be included to build the shared library
|
||||
source_codes = [
|
||||
path.join("blas", "daxpy.c"),
|
||||
path.join("blas", "ddot.c"),
|
||||
path.join("blas", "dnrm2.c"),
|
||||
path.join("blas", "dscal.c"),
|
||||
"linear.cpp",
|
||||
"newton.cpp",
|
||||
]
|
||||
headers = [
|
||||
path.join("blas", "blas.h"),
|
||||
path.join("blas", "blasp.h"),
|
||||
"newton.h",
|
||||
"linear.h",
|
||||
"linear.def",
|
||||
]
|
||||
|
||||
# license parameters
|
||||
license_source = path.join("..", "COPYRIGHT")
|
||||
license_file = "LICENSE"
|
||||
license_name = "BSD-3-Clause"
|
||||
|
||||
kwargs_for_extension = {
|
||||
"sources": [path.join(cpp_dir, f) for f in source_codes],
|
||||
"depends": [path.join(cpp_dir, f) for f in headers],
|
||||
"include_dirs": [cpp_dir],
|
||||
"language": "c++",
|
||||
}
|
||||
|
||||
# see ../Makefile.win
|
||||
if sys.platform == "win32":
|
||||
kwargs_for_extension.update(
|
||||
{
|
||||
"define_macros": [("_WIN64", ""), ("_CRT_SECURE_NO_DEPRECATE", "")],
|
||||
"extra_link_args": ["-DEF:{}\linear.def".format(cpp_dir)],
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def create_cpp_source():
|
||||
for f in source_codes + headers:
|
||||
src_file = path.join("..", f)
|
||||
tgt_file = path.join(cpp_dir, f)
|
||||
# ensure blas directory is created
|
||||
os.makedirs(path.dirname(tgt_file), exist_ok=True)
|
||||
copyfile(src_file, tgt_file)
|
||||
|
||||
|
||||
class CleanCommand(clean_cmd):
|
||||
def run(self):
|
||||
clean_cmd.run(self)
|
||||
to_be_removed = ["build/", "dist/", "MANIFEST", cpp_dir, "{}.egg-info".format(PACKAGE_NAME), license_file]
|
||||
to_be_removed += glob("./{}/{}.*".format(PACKAGE_DIR, dynamic_lib_name))
|
||||
for root, dirs, files in os.walk(os.curdir, topdown=False):
|
||||
if "__pycache__" in dirs:
|
||||
to_be_removed.append(path.join(root, "__pycache__"))
|
||||
to_be_removed += [f for f in files if f.endswith(".pyc")]
|
||||
|
||||
for f in to_be_removed:
|
||||
print("remove {}".format(f))
|
||||
if f == ".":
|
||||
continue
|
||||
elif path.isfile(f):
|
||||
os.remove(f)
|
||||
elif path.isdir(f):
|
||||
rmtree(f)
|
||||
|
||||
def main():
|
||||
if not path.exists(cpp_dir):
|
||||
create_cpp_source()
|
||||
|
||||
if not path.exists(license_file):
|
||||
copyfile(license_source, license_file)
|
||||
|
||||
with open("README") as f:
|
||||
long_description = f.read()
|
||||
|
||||
setup(
|
||||
name=PACKAGE_NAME,
|
||||
packages=[PACKAGE_DIR],
|
||||
version=VERSION,
|
||||
description="Python binding of LIBLINEAR",
|
||||
long_description=long_description,
|
||||
long_description_content_type="text/plain",
|
||||
author="ML group @ National Taiwan University",
|
||||
author_email="cjlin@csie.ntu.edu.tw",
|
||||
url="https://www.csie.ntu.edu.tw/~cjlin/liblinear",
|
||||
license=license_name,
|
||||
install_requires=["scipy"],
|
||||
ext_modules=[
|
||||
Extension(
|
||||
"{}.{}".format(PACKAGE_DIR, dynamic_lib_name), **kwargs_for_extension
|
||||
)
|
||||
],
|
||||
cmdclass={"clean": CleanCommand},
|
||||
)
|
||||
|
||||
|
||||
main()
|
||||
|
405
liblinear-2.49/svm-scale.c
Normal file
405
liblinear-2.49/svm-scale.c
Normal file
@@ -0,0 +1,405 @@
|
||||
#include <float.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <ctype.h>
|
||||
#include <string.h>
|
||||
|
||||
void exit_with_help()
|
||||
{
|
||||
printf(
|
||||
"Usage: svm-scale [options] data_filename\n"
|
||||
"options:\n"
|
||||
"-l lower : x scaling lower limit (default -1)\n"
|
||||
"-u upper : x scaling upper limit (default +1)\n"
|
||||
"-y y_lower y_upper : y scaling limits (default: no y scaling)\n"
|
||||
"-s save_filename : save scaling parameters to save_filename\n"
|
||||
"-r restore_filename : restore scaling parameters from restore_filename\n"
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
char *line = NULL;
|
||||
int max_line_len = 1024;
|
||||
double lower=-1.0,upper=1.0,y_lower,y_upper;
|
||||
int y_scaling = 0;
|
||||
double *feature_max;
|
||||
double *feature_min;
|
||||
double y_max = -DBL_MAX;
|
||||
double y_min = DBL_MAX;
|
||||
int max_index;
|
||||
int min_index;
|
||||
long int num_nonzeros = 0;
|
||||
long int new_num_nonzeros = 0;
|
||||
|
||||
#define max(x,y) (((x)>(y))?(x):(y))
|
||||
#define min(x,y) (((x)<(y))?(x):(y))
|
||||
|
||||
void output_target(double value);
|
||||
void output(int index, double value);
|
||||
char* readline(FILE *input);
|
||||
int clean_up(FILE *fp_restore, FILE *fp, const char *msg);
|
||||
|
||||
int main(int argc,char **argv)
|
||||
{
|
||||
int i,index;
|
||||
FILE *fp, *fp_restore = NULL;
|
||||
char *save_filename = NULL;
|
||||
char *restore_filename = NULL;
|
||||
|
||||
for(i=1;i<argc;i++)
|
||||
{
|
||||
if(argv[i][0] != '-') break;
|
||||
++i;
|
||||
switch(argv[i-1][1])
|
||||
{
|
||||
case 'l': lower = atof(argv[i]); break;
|
||||
case 'u': upper = atof(argv[i]); break;
|
||||
case 'y':
|
||||
y_lower = atof(argv[i]);
|
||||
++i;
|
||||
y_upper = atof(argv[i]);
|
||||
y_scaling = 1;
|
||||
break;
|
||||
case 's': save_filename = argv[i]; break;
|
||||
case 'r': restore_filename = argv[i]; break;
|
||||
default:
|
||||
fprintf(stderr,"unknown option\n");
|
||||
exit_with_help();
|
||||
}
|
||||
}
|
||||
|
||||
if(!(upper > lower) || (y_scaling && !(y_upper > y_lower)))
|
||||
{
|
||||
fprintf(stderr,"inconsistent lower/upper specification\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if(restore_filename && save_filename)
|
||||
{
|
||||
fprintf(stderr,"cannot use -r and -s simultaneously\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if(argc != i+1)
|
||||
exit_with_help();
|
||||
|
||||
fp=fopen(argv[i],"r");
|
||||
|
||||
if(fp==NULL)
|
||||
{
|
||||
fprintf(stderr,"can't open file %s\n", argv[i]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
line = (char *) malloc(max_line_len*sizeof(char));
|
||||
|
||||
#define SKIP_TARGET\
|
||||
while(isspace(*p)) ++p;\
|
||||
while(!isspace(*p)) ++p;
|
||||
|
||||
#define SKIP_ELEMENT\
|
||||
while(*p!=':') ++p;\
|
||||
++p;\
|
||||
while(isspace(*p)) ++p;\
|
||||
while(*p && !isspace(*p)) ++p;
|
||||
|
||||
/* assumption: min index of attributes is 1 */
|
||||
/* pass 1: find out max index of attributes */
|
||||
max_index = 0;
|
||||
min_index = 1;
|
||||
|
||||
if(restore_filename)
|
||||
{
|
||||
int idx, c;
|
||||
|
||||
fp_restore = fopen(restore_filename,"r");
|
||||
if(fp_restore==NULL)
|
||||
{
|
||||
fprintf(stderr,"can't open file %s\n", restore_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = fgetc(fp_restore);
|
||||
if(c == 'y')
|
||||
{
|
||||
readline(fp_restore);
|
||||
readline(fp_restore);
|
||||
readline(fp_restore);
|
||||
}
|
||||
readline(fp_restore);
|
||||
readline(fp_restore);
|
||||
|
||||
while(fscanf(fp_restore,"%d %*f %*f\n",&idx) == 1)
|
||||
max_index = max(idx,max_index);
|
||||
rewind(fp_restore);
|
||||
}
|
||||
|
||||
while(readline(fp)!=NULL)
|
||||
{
|
||||
char *p=line;
|
||||
|
||||
SKIP_TARGET
|
||||
|
||||
while(sscanf(p,"%d:%*f",&index)==1)
|
||||
{
|
||||
max_index = max(max_index, index);
|
||||
min_index = min(min_index, index);
|
||||
SKIP_ELEMENT
|
||||
num_nonzeros++;
|
||||
}
|
||||
}
|
||||
|
||||
if(min_index < 1)
|
||||
fprintf(stderr,
|
||||
"WARNING: minimal feature index is %d, but indices should start from 1\n", min_index);
|
||||
|
||||
rewind(fp);
|
||||
|
||||
feature_max = (double *)malloc((max_index+1)* sizeof(double));
|
||||
feature_min = (double *)malloc((max_index+1)* sizeof(double));
|
||||
|
||||
if(feature_max == NULL || feature_min == NULL)
|
||||
{
|
||||
fprintf(stderr,"can't allocate enough memory\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
for(i=0;i<=max_index;i++)
|
||||
{
|
||||
feature_max[i]=-DBL_MAX;
|
||||
feature_min[i]=DBL_MAX;
|
||||
}
|
||||
|
||||
/* pass 2: find out min/max value */
|
||||
while(readline(fp)!=NULL)
|
||||
{
|
||||
char *p=line;
|
||||
int next_index=1;
|
||||
double target;
|
||||
double value;
|
||||
|
||||
if (sscanf(p,"%lf",&target) != 1)
|
||||
return clean_up(fp_restore, fp, "ERROR: failed to read labels\n");
|
||||
y_max = max(y_max,target);
|
||||
y_min = min(y_min,target);
|
||||
|
||||
SKIP_TARGET
|
||||
|
||||
while(sscanf(p,"%d:%lf",&index,&value)==2)
|
||||
{
|
||||
for(i=next_index;i<index;i++)
|
||||
{
|
||||
feature_max[i]=max(feature_max[i],0);
|
||||
feature_min[i]=min(feature_min[i],0);
|
||||
}
|
||||
|
||||
feature_max[index]=max(feature_max[index],value);
|
||||
feature_min[index]=min(feature_min[index],value);
|
||||
|
||||
SKIP_ELEMENT
|
||||
next_index=index+1;
|
||||
}
|
||||
|
||||
for(i=next_index;i<=max_index;i++)
|
||||
{
|
||||
feature_max[i]=max(feature_max[i],0);
|
||||
feature_min[i]=min(feature_min[i],0);
|
||||
}
|
||||
}
|
||||
|
||||
rewind(fp);
|
||||
|
||||
/* pass 2.5: save/restore feature_min/feature_max */
|
||||
|
||||
if(restore_filename)
|
||||
{
|
||||
/* fp_restore rewinded in finding max_index */
|
||||
int idx, c;
|
||||
double fmin, fmax;
|
||||
int next_index = 1;
|
||||
|
||||
if((c = fgetc(fp_restore)) == 'y')
|
||||
{
|
||||
if(fscanf(fp_restore, "%lf %lf\n", &y_lower, &y_upper) != 2 ||
|
||||
fscanf(fp_restore, "%lf %lf\n", &y_min, &y_max) != 2)
|
||||
return clean_up(fp_restore, fp, "ERROR: failed to read scaling parameters\n");
|
||||
y_scaling = 1;
|
||||
}
|
||||
else
|
||||
ungetc(c, fp_restore);
|
||||
|
||||
if (fgetc(fp_restore) == 'x')
|
||||
{
|
||||
if(fscanf(fp_restore, "%lf %lf\n", &lower, &upper) != 2)
|
||||
return clean_up(fp_restore, fp, "ERROR: failed to read scaling parameters\n");
|
||||
while(fscanf(fp_restore,"%d %lf %lf\n",&idx,&fmin,&fmax)==3)
|
||||
{
|
||||
for(i = next_index;i<idx;i++)
|
||||
if(feature_min[i] != feature_max[i])
|
||||
{
|
||||
fprintf(stderr,
|
||||
"WARNING: feature index %d appeared in file %s was not seen in the scaling factor file %s. The feature is scaled to 0.\n",
|
||||
i, argv[argc-1], restore_filename);
|
||||
feature_min[i] = 0;
|
||||
feature_max[i] = 0;
|
||||
}
|
||||
|
||||
feature_min[idx] = fmin;
|
||||
feature_max[idx] = fmax;
|
||||
|
||||
next_index = idx + 1;
|
||||
}
|
||||
|
||||
for(i=next_index;i<=max_index;i++)
|
||||
if(feature_min[i] != feature_max[i])
|
||||
{
|
||||
fprintf(stderr,
|
||||
"WARNING: feature index %d appeared in file %s was not seen in the scaling factor file %s. The feature is scaled to 0.\n",
|
||||
i, argv[argc-1], restore_filename);
|
||||
feature_min[i] = 0;
|
||||
feature_max[i] = 0;
|
||||
}
|
||||
}
|
||||
fclose(fp_restore);
|
||||
}
|
||||
|
||||
if(save_filename)
|
||||
{
|
||||
FILE *fp_save = fopen(save_filename,"w");
|
||||
if(fp_save==NULL)
|
||||
{
|
||||
fprintf(stderr,"can't open file %s\n", save_filename);
|
||||
exit(1);
|
||||
}
|
||||
if(y_scaling)
|
||||
{
|
||||
fprintf(fp_save, "y\n");
|
||||
fprintf(fp_save, "%.17g %.17g\n", y_lower, y_upper);
|
||||
fprintf(fp_save, "%.17g %.17g\n", y_min, y_max);
|
||||
}
|
||||
fprintf(fp_save, "x\n");
|
||||
fprintf(fp_save, "%.17g %.17g\n", lower, upper);
|
||||
for(i=1;i<=max_index;i++)
|
||||
{
|
||||
if(feature_min[i]!=feature_max[i])
|
||||
fprintf(fp_save,"%d %.17g %.17g\n",i,feature_min[i],feature_max[i]);
|
||||
}
|
||||
|
||||
if(min_index < 1)
|
||||
fprintf(stderr,
|
||||
"WARNING: scaling factors with indices smaller than 1 are not stored to the file %s.\n", save_filename);
|
||||
|
||||
fclose(fp_save);
|
||||
}
|
||||
|
||||
/* pass 3: scale */
|
||||
while(readline(fp)!=NULL)
|
||||
{
|
||||
char *p=line;
|
||||
int next_index=1;
|
||||
double target;
|
||||
double value;
|
||||
|
||||
if (sscanf(p,"%lf",&target) != 1)
|
||||
return clean_up(NULL, fp, "ERROR: failed to read labels\n");
|
||||
output_target(target);
|
||||
|
||||
SKIP_TARGET
|
||||
|
||||
while(sscanf(p,"%d:%lf",&index,&value)==2)
|
||||
{
|
||||
for(i=next_index;i<index;i++)
|
||||
output(i,0);
|
||||
|
||||
output(index,value);
|
||||
|
||||
SKIP_ELEMENT
|
||||
next_index=index+1;
|
||||
}
|
||||
|
||||
for(i=next_index;i<=max_index;i++)
|
||||
output(i,0);
|
||||
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
if (new_num_nonzeros > num_nonzeros)
|
||||
fprintf(stderr,
|
||||
"WARNING: original #nonzeros %ld\n"
|
||||
" > new #nonzeros %ld\n"
|
||||
"If feature values are non-negative and sparse, use -l 0 rather than the default -l -1\n",
|
||||
num_nonzeros, new_num_nonzeros);
|
||||
|
||||
free(line);
|
||||
free(feature_max);
|
||||
free(feature_min);
|
||||
fclose(fp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
char* readline(FILE *input)
|
||||
{
|
||||
int len;
|
||||
|
||||
if(fgets(line,max_line_len,input) == NULL)
|
||||
return NULL;
|
||||
|
||||
while(strrchr(line,'\n') == NULL)
|
||||
{
|
||||
max_line_len *= 2;
|
||||
line = (char *) realloc(line, max_line_len);
|
||||
len = (int) strlen(line);
|
||||
if(fgets(line+len,max_line_len-len,input) == NULL)
|
||||
break;
|
||||
}
|
||||
return line;
|
||||
}
|
||||
|
||||
void output_target(double value)
|
||||
{
|
||||
if(y_scaling)
|
||||
{
|
||||
if(value == y_min)
|
||||
value = y_lower;
|
||||
else if(value == y_max)
|
||||
value = y_upper;
|
||||
else value = y_lower + (y_upper-y_lower) *
|
||||
(value - y_min)/(y_max-y_min);
|
||||
}
|
||||
printf("%.17g ",value);
|
||||
}
|
||||
|
||||
void output(int index, double value)
|
||||
{
|
||||
/* skip single-valued attribute */
|
||||
if(feature_max[index] == feature_min[index])
|
||||
return;
|
||||
|
||||
if(value == feature_min[index])
|
||||
value = lower;
|
||||
else if(value == feature_max[index])
|
||||
value = upper;
|
||||
else
|
||||
value = lower + (upper-lower) *
|
||||
(value-feature_min[index])/
|
||||
(feature_max[index]-feature_min[index]);
|
||||
|
||||
if(value != 0)
|
||||
{
|
||||
printf("%d:%g ",index, value);
|
||||
new_num_nonzeros++;
|
||||
}
|
||||
}
|
||||
|
||||
int clean_up(FILE *fp_restore, FILE *fp, const char* msg)
|
||||
{
|
||||
fprintf(stderr, "%s", msg);
|
||||
free(line);
|
||||
free(feature_max);
|
||||
free(feature_min);
|
||||
fclose(fp);
|
||||
if (fp_restore)
|
||||
fclose(fp_restore);
|
||||
return -1;
|
||||
}
|
||||
|
479
liblinear-2.49/train.c
Normal file
479
liblinear-2.49/train.c
Normal file
@@ -0,0 +1,479 @@
|
||||
#include <stdio.h>
|
||||
#include <math.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <ctype.h>
|
||||
#include <errno.h>
|
||||
#include "linear.h"
|
||||
#define Malloc(type,n) (type *)malloc((n)*sizeof(type))
|
||||
#define INF HUGE_VAL
|
||||
|
||||
void print_null(const char *s) {}
|
||||
|
||||
void exit_with_help()
|
||||
{
|
||||
printf(
|
||||
"Usage: train [options] training_set_file [model_file]\n"
|
||||
"options:\n"
|
||||
"-s type : set type of solver (default 1)\n"
|
||||
" for multi-class classification\n"
|
||||
" 0 -- L2-regularized logistic regression (primal)\n"
|
||||
" 1 -- L2-regularized L2-loss support vector classification (dual)\n"
|
||||
" 2 -- L2-regularized L2-loss support vector classification (primal)\n"
|
||||
" 3 -- L2-regularized L1-loss support vector classification (dual)\n"
|
||||
" 4 -- support vector classification by Crammer and Singer\n"
|
||||
" 5 -- L1-regularized L2-loss support vector classification\n"
|
||||
" 6 -- L1-regularized logistic regression\n"
|
||||
" 7 -- L2-regularized logistic regression (dual)\n"
|
||||
" for regression\n"
|
||||
" 11 -- L2-regularized L2-loss support vector regression (primal)\n"
|
||||
" 12 -- L2-regularized L2-loss support vector regression (dual)\n"
|
||||
" 13 -- L2-regularized L1-loss support vector regression (dual)\n"
|
||||
" for outlier detection\n"
|
||||
" 21 -- one-class support vector machine (dual)\n"
|
||||
"-c cost : set the parameter C (default 1)\n"
|
||||
"-p epsilon : set the epsilon in loss function of SVR (default 0.1)\n"
|
||||
"-n nu : set the parameter nu of one-class SVM (default 0.5)\n"
|
||||
"-e epsilon : set tolerance of termination criterion\n"
|
||||
" -s 0 and 2\n"
|
||||
" |f'(w)|_2 <= eps*min(pos,neg)/l*|f'(w0)|_2,\n"
|
||||
" where f is the primal function and pos/neg are # of\n"
|
||||
" positive/negative data (default 0.01)\n"
|
||||
" -s 11\n"
|
||||
" |f'(w)|_2 <= eps*|f'(w0)|_2 (default 0.0001)\n"
|
||||
" -s 1, 3, 4, 7, and 21\n"
|
||||
" Dual maximal violation <= eps; similar to libsvm (default 0.1 except 0.01 for -s 21)\n"
|
||||
" -s 5 and 6\n"
|
||||
" |f'(w)|_1 <= eps*min(pos,neg)/l*|f'(w0)|_1,\n"
|
||||
" where f is the primal function (default 0.01)\n"
|
||||
" -s 12 and 13\n"
|
||||
" |f'(alpha)|_1 <= eps |f'(alpha0)|,\n"
|
||||
" where f is the dual function (default 0.1)\n"
|
||||
"-B bias : if bias >= 0, instance x becomes [x; bias]; if < 0, no bias term added (default -1)\n"
|
||||
"-R : not regularize the bias; must with -B 1 to have the bias; DON'T use this unless you know what it is\n"
|
||||
" (for -s 0, 2, 5, 6, 11)\n"
|
||||
"-wi weight: weights adjust the parameter C of different classes (see README for details)\n"
|
||||
"-v n: n-fold cross validation mode\n"
|
||||
"-C : find parameters (C for -s 0, 2 and C, p for -s 11)\n"
|
||||
"-q : quiet mode (no outputs)\n"
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
void exit_input_error(int line_num)
|
||||
{
|
||||
fprintf(stderr,"Wrong input format at line %d\n", line_num);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
static char *line = NULL;
|
||||
static int max_line_len;
|
||||
|
||||
static char* readline(FILE *input)
|
||||
{
|
||||
int len;
|
||||
|
||||
if(fgets(line,max_line_len,input) == NULL)
|
||||
return NULL;
|
||||
|
||||
while(strrchr(line,'\n') == NULL)
|
||||
{
|
||||
max_line_len *= 2;
|
||||
line = (char *) realloc(line,max_line_len);
|
||||
len = (int) strlen(line);
|
||||
if(fgets(line+len,max_line_len-len,input) == NULL)
|
||||
break;
|
||||
}
|
||||
return line;
|
||||
}
|
||||
|
||||
void parse_command_line(int argc, char **argv, char *input_file_name, char *model_file_name);
|
||||
void read_problem(const char *filename);
|
||||
void do_cross_validation();
|
||||
void do_find_parameters();
|
||||
|
||||
struct feature_node *x_space;
|
||||
struct parameter param;
|
||||
struct problem prob;
|
||||
struct model* model_;
|
||||
int flag_cross_validation;
|
||||
int flag_find_parameters;
|
||||
int flag_C_specified;
|
||||
int flag_p_specified;
|
||||
int flag_solver_specified;
|
||||
int nr_fold;
|
||||
double bias;
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
char input_file_name[1024];
|
||||
char model_file_name[1024];
|
||||
const char *error_msg;
|
||||
|
||||
parse_command_line(argc, argv, input_file_name, model_file_name);
|
||||
read_problem(input_file_name);
|
||||
error_msg = check_parameter(&prob,¶m);
|
||||
|
||||
if(error_msg)
|
||||
{
|
||||
fprintf(stderr,"ERROR: %s\n",error_msg);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (flag_find_parameters)
|
||||
{
|
||||
do_find_parameters();
|
||||
}
|
||||
else if(flag_cross_validation)
|
||||
{
|
||||
do_cross_validation();
|
||||
}
|
||||
else
|
||||
{
|
||||
model_=train(&prob, ¶m);
|
||||
if(save_model(model_file_name, model_))
|
||||
{
|
||||
fprintf(stderr,"can't save model to file %s\n",model_file_name);
|
||||
exit(1);
|
||||
}
|
||||
free_and_destroy_model(&model_);
|
||||
}
|
||||
destroy_param(¶m);
|
||||
free(prob.y);
|
||||
free(prob.x);
|
||||
free(x_space);
|
||||
free(line);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void do_find_parameters()
|
||||
{
|
||||
double start_C, start_p, best_C, best_p, best_score;
|
||||
if (flag_C_specified)
|
||||
start_C = param.C;
|
||||
else
|
||||
start_C = -1.0;
|
||||
if (flag_p_specified)
|
||||
start_p = param.p;
|
||||
else
|
||||
start_p = -1.0;
|
||||
|
||||
printf("Doing parameter search with %d-fold cross validation.\n", nr_fold);
|
||||
find_parameters(&prob, ¶m, nr_fold, start_C, start_p, &best_C, &best_p, &best_score);
|
||||
if(param.solver_type == L2R_LR || param.solver_type == L2R_L2LOSS_SVC)
|
||||
printf("Best C = %g CV accuracy = %g%%\n", best_C, 100.0*best_score);
|
||||
else if(param.solver_type == L2R_L2LOSS_SVR)
|
||||
printf("Best C = %g Best p = %g CV MSE = %g\n", best_C, best_p, best_score);
|
||||
}
|
||||
|
||||
void do_cross_validation()
|
||||
{
|
||||
int i;
|
||||
int total_correct = 0;
|
||||
double total_error = 0;
|
||||
double sumv = 0, sumy = 0, sumvv = 0, sumyy = 0, sumvy = 0;
|
||||
double *target = Malloc(double, prob.l);
|
||||
|
||||
cross_validation(&prob,¶m,nr_fold,target);
|
||||
if(param.solver_type == L2R_L2LOSS_SVR ||
|
||||
param.solver_type == L2R_L1LOSS_SVR_DUAL ||
|
||||
param.solver_type == L2R_L2LOSS_SVR_DUAL)
|
||||
{
|
||||
for(i=0;i<prob.l;i++)
|
||||
{
|
||||
double y = prob.y[i];
|
||||
double v = target[i];
|
||||
total_error += (v-y)*(v-y);
|
||||
sumv += v;
|
||||
sumy += y;
|
||||
sumvv += v*v;
|
||||
sumyy += y*y;
|
||||
sumvy += v*y;
|
||||
}
|
||||
printf("Cross Validation Mean squared error = %g\n",total_error/prob.l);
|
||||
printf("Cross Validation Squared correlation coefficient = %g\n",
|
||||
((prob.l*sumvy-sumv*sumy)*(prob.l*sumvy-sumv*sumy))/
|
||||
((prob.l*sumvv-sumv*sumv)*(prob.l*sumyy-sumy*sumy))
|
||||
);
|
||||
}
|
||||
else
|
||||
{
|
||||
for(i=0;i<prob.l;i++)
|
||||
if(target[i] == prob.y[i])
|
||||
++total_correct;
|
||||
printf("Cross Validation Accuracy = %g%%\n",100.0*total_correct/prob.l);
|
||||
}
|
||||
|
||||
free(target);
|
||||
}
|
||||
|
||||
void parse_command_line(int argc, char **argv, char *input_file_name, char *model_file_name)
|
||||
{
|
||||
int i;
|
||||
void (*print_func)(const char*) = NULL; // default printing to stdout
|
||||
|
||||
// default values
|
||||
param.solver_type = L2R_L2LOSS_SVC_DUAL;
|
||||
param.C = 1;
|
||||
param.p = 0.1;
|
||||
param.nu = 0.5;
|
||||
param.eps = INF; // see setting below
|
||||
param.nr_weight = 0;
|
||||
param.regularize_bias = 1;
|
||||
param.weight_label = NULL;
|
||||
param.weight = NULL;
|
||||
param.init_sol = NULL;
|
||||
param.w_recalc = false;
|
||||
flag_cross_validation = 0;
|
||||
flag_C_specified = 0;
|
||||
flag_p_specified = 0;
|
||||
flag_solver_specified = 0;
|
||||
flag_find_parameters = 0;
|
||||
bias = -1;
|
||||
|
||||
// parse options
|
||||
for(i=1;i<argc;i++)
|
||||
{
|
||||
if(argv[i][0] != '-') break;
|
||||
if(++i>=argc)
|
||||
exit_with_help();
|
||||
switch(argv[i-1][1])
|
||||
{
|
||||
case 's':
|
||||
param.solver_type = atoi(argv[i]);
|
||||
flag_solver_specified = 1;
|
||||
break;
|
||||
|
||||
case 'c':
|
||||
param.C = atof(argv[i]);
|
||||
flag_C_specified = 1;
|
||||
break;
|
||||
|
||||
case 'p':
|
||||
flag_p_specified = 1;
|
||||
param.p = atof(argv[i]);
|
||||
break;
|
||||
|
||||
case 'n':
|
||||
param.nu = atof(argv[i]);
|
||||
break;
|
||||
|
||||
case 'e':
|
||||
param.eps = atof(argv[i]);
|
||||
break;
|
||||
|
||||
case 'B':
|
||||
bias = atof(argv[i]);
|
||||
break;
|
||||
|
||||
case 'w':
|
||||
++param.nr_weight;
|
||||
param.weight_label = (int *) realloc(param.weight_label,sizeof(int)*param.nr_weight);
|
||||
param.weight = (double *) realloc(param.weight,sizeof(double)*param.nr_weight);
|
||||
param.weight_label[param.nr_weight-1] = atoi(&argv[i-1][2]);
|
||||
param.weight[param.nr_weight-1] = atof(argv[i]);
|
||||
break;
|
||||
|
||||
case 'v':
|
||||
flag_cross_validation = 1;
|
||||
nr_fold = atoi(argv[i]);
|
||||
if(nr_fold < 2)
|
||||
{
|
||||
fprintf(stderr,"n-fold cross validation: n must >= 2\n");
|
||||
exit_with_help();
|
||||
}
|
||||
break;
|
||||
|
||||
case 'q':
|
||||
print_func = &print_null;
|
||||
i--;
|
||||
break;
|
||||
|
||||
case 'C':
|
||||
flag_find_parameters = 1;
|
||||
i--;
|
||||
break;
|
||||
|
||||
case 'R':
|
||||
param.regularize_bias = 0;
|
||||
i--;
|
||||
break;
|
||||
|
||||
default:
|
||||
fprintf(stderr,"unknown option: -%c\n", argv[i-1][1]);
|
||||
exit_with_help();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
set_print_string_function(print_func);
|
||||
|
||||
// determine filenames
|
||||
if(i>=argc)
|
||||
exit_with_help();
|
||||
|
||||
strcpy(input_file_name, argv[i]);
|
||||
|
||||
if(i<argc-1)
|
||||
strcpy(model_file_name,argv[i+1]);
|
||||
else
|
||||
{
|
||||
char *p = strrchr(argv[i],'/');
|
||||
if(p==NULL)
|
||||
p = argv[i];
|
||||
else
|
||||
++p;
|
||||
sprintf(model_file_name,"%s.model",p);
|
||||
}
|
||||
|
||||
// default solver for parameter selection is L2R_L2LOSS_SVC
|
||||
if(flag_find_parameters)
|
||||
{
|
||||
if(!flag_cross_validation)
|
||||
nr_fold = 5;
|
||||
if(!flag_solver_specified)
|
||||
{
|
||||
fprintf(stderr, "Solver not specified. Using -s 2\n");
|
||||
param.solver_type = L2R_L2LOSS_SVC;
|
||||
}
|
||||
else if(param.solver_type != L2R_LR && param.solver_type != L2R_L2LOSS_SVC && param.solver_type != L2R_L2LOSS_SVR)
|
||||
{
|
||||
fprintf(stderr, "Warm-start parameter search only available for -s 0, -s 2 and -s 11\n");
|
||||
exit_with_help();
|
||||
}
|
||||
}
|
||||
|
||||
if(param.eps == INF)
|
||||
{
|
||||
switch(param.solver_type)
|
||||
{
|
||||
case L2R_LR:
|
||||
case L2R_L2LOSS_SVC:
|
||||
param.eps = 0.01;
|
||||
break;
|
||||
case L2R_L2LOSS_SVR:
|
||||
param.eps = 0.0001;
|
||||
break;
|
||||
case L2R_L2LOSS_SVC_DUAL:
|
||||
case L2R_L1LOSS_SVC_DUAL:
|
||||
case MCSVM_CS:
|
||||
case L2R_LR_DUAL:
|
||||
param.eps = 0.1;
|
||||
break;
|
||||
case L1R_L2LOSS_SVC:
|
||||
case L1R_LR:
|
||||
param.eps = 0.01;
|
||||
break;
|
||||
case L2R_L1LOSS_SVR_DUAL:
|
||||
case L2R_L2LOSS_SVR_DUAL:
|
||||
param.eps = 0.1;
|
||||
break;
|
||||
case ONECLASS_SVM:
|
||||
param.eps = 0.01;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// read in a problem (in libsvm format)
|
||||
void read_problem(const char *filename)
|
||||
{
|
||||
int max_index, inst_max_index, i;
|
||||
size_t elements, j;
|
||||
FILE *fp = fopen(filename,"r");
|
||||
char *endptr;
|
||||
char *idx, *val, *label;
|
||||
|
||||
if(fp == NULL)
|
||||
{
|
||||
fprintf(stderr,"can't open input file %s\n",filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
prob.l = 0;
|
||||
elements = 0;
|
||||
max_line_len = 1024;
|
||||
line = Malloc(char,max_line_len);
|
||||
while(readline(fp)!=NULL)
|
||||
{
|
||||
char *p = strtok(line," \t"); // label
|
||||
|
||||
// features
|
||||
while(1)
|
||||
{
|
||||
p = strtok(NULL," \t");
|
||||
if(p == NULL || *p == '\n') // check '\n' as ' ' may be after the last feature
|
||||
break;
|
||||
elements++;
|
||||
}
|
||||
elements++; // for bias term
|
||||
prob.l++;
|
||||
}
|
||||
rewind(fp);
|
||||
|
||||
prob.bias=bias;
|
||||
|
||||
prob.y = Malloc(double,prob.l);
|
||||
prob.x = Malloc(struct feature_node *,prob.l);
|
||||
x_space = Malloc(struct feature_node,elements+prob.l);
|
||||
|
||||
max_index = 0;
|
||||
j=0;
|
||||
for(i=0;i<prob.l;i++)
|
||||
{
|
||||
inst_max_index = 0; // strtol gives 0 if wrong format
|
||||
readline(fp);
|
||||
prob.x[i] = &x_space[j];
|
||||
label = strtok(line," \t\n");
|
||||
if(label == NULL) // empty line
|
||||
exit_input_error(i+1);
|
||||
|
||||
prob.y[i] = strtod(label,&endptr);
|
||||
if(endptr == label || *endptr != '\0')
|
||||
exit_input_error(i+1);
|
||||
|
||||
while(1)
|
||||
{
|
||||
idx = strtok(NULL,":");
|
||||
val = strtok(NULL," \t");
|
||||
|
||||
if(val == NULL)
|
||||
break;
|
||||
|
||||
errno = 0;
|
||||
x_space[j].index = (int) strtol(idx,&endptr,10);
|
||||
if(endptr == idx || errno != 0 || *endptr != '\0' || x_space[j].index <= inst_max_index)
|
||||
exit_input_error(i+1);
|
||||
else
|
||||
inst_max_index = x_space[j].index;
|
||||
|
||||
errno = 0;
|
||||
x_space[j].value = strtod(val,&endptr);
|
||||
if(endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr)))
|
||||
exit_input_error(i+1);
|
||||
|
||||
++j;
|
||||
}
|
||||
|
||||
if(inst_max_index > max_index)
|
||||
max_index = inst_max_index;
|
||||
|
||||
if(prob.bias >= 0)
|
||||
x_space[j++].value = prob.bias;
|
||||
|
||||
x_space[j++].index = -1;
|
||||
}
|
||||
|
||||
if(prob.bias >= 0)
|
||||
{
|
||||
prob.n=max_index+1;
|
||||
for(i=1;i<prob.l;i++)
|
||||
(prob.x[i]-2)->index = prob.n;
|
||||
x_space[j-2].index = prob.n;
|
||||
}
|
||||
else
|
||||
prob.n=max_index;
|
||||
|
||||
fclose(fp);
|
||||
}
|
9
liblinear-2.49/windows/README
Executable file
9
liblinear-2.49/windows/README
Executable file
@@ -0,0 +1,9 @@
|
||||
-------------------------------------
|
||||
--- Windows binaries of LIBLINEAR ---
|
||||
-------------------------------------
|
||||
|
||||
Starting from version 2.48, we no longer provide pre-built Windows binaries.
|
||||
If you would like to build them, please follow the instruction of building
|
||||
Windows binaries in LIBLINEAR README.
|
||||
|
||||
For any question, please contact Chih-Jen Lin <cjlin@csie.ntu.edu.tw>.
|
Reference in New Issue
Block a user