From: biconnect Date: Thu, 18 Jun 2009 11:58:09 +0000 (+0000) Subject: - move everything to trunk/ X-Git-Tag: v134~14 X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=b504f25068483022a258cae315e8d6da7c2fd4e0;p=liblinear - move everything to trunk/ --- diff --git a/COPYRIGHT b/COPYRIGHT new file mode 100644 index 0000000..58e56fa --- /dev/null +++ b/COPYRIGHT @@ -0,0 +1,31 @@ + +Copyright (c) 2007-2009 The LIBLINEAR Project. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +3. Neither name of copyright holders nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..42a484f --- /dev/null +++ b/Makefile @@ -0,0 +1,26 @@ +CXX ?= g++ +CC ?= gcc +CFLAGS ?= -Wall -Wconversion -O3 -fPIC +LIBS ?= blas/blas.a +#LIBS ?= -lblas + +all: train predict + +train: tron.o linear.o train.c blas/blas.a + $(CXX) $(CFLAGS) -o train train.c tron.o linear.o $(LIBS) + +predict: tron.o linear.o predict.c blas/blas.a + $(CXX) $(CFLAGS) -o predict predict.c tron.o linear.o $(LIBS) + +tron.o: tron.cpp tron.h + $(CXX) $(CFLAGS) -c -o tron.o tron.cpp + +linear.o: linear.cpp linear.h + $(CXX) $(CFLAGS) -c -o linear.o linear.cpp + +blas/blas.a: + cd blas; make OPTFLAGS='$(CFLAGS)' CC='$(CC)'; + +clean: + cd blas; make clean + rm -f *~ tron.o linear.o train predict diff --git a/Makefile.win b/Makefile.win new file mode 100644 index 0000000..fdd3ba7 --- /dev/null +++ b/Makefile.win @@ -0,0 +1,27 @@ +#You must ensure nmake.exe, cl.exe, link.exe are in system path. +#VCVARS32.bat +#Under dosbox prompt +#nmake -f Makefile.win + +########################################## +CXXC = cl.exe +CFLAGS = -nologo -O2 -EHsc -I. -D __WIN32__ -D _CRT_SECURE_NO_DEPRECATE +TARGET = windows + +all: $(TARGET)\train.exe $(TARGET)\predict.exe + +$(TARGET)\train.exe: tron.obj linear.obj train.c blas\*.c + $(CXX) $(CFLAGS) -Fe$(TARGET)\train.exe tron.obj linear.obj train.c blas\*.c + +$(TARGET)\predict.exe: tron.obj linear.obj predict.c blas\*.c + $(CXX) $(CFLAGS) -Fe$(TARGET)\predict.exe tron.obj linear.obj predict.c blas\*.c + +linear.obj: linear.cpp linear.h + $(CXX) $(CFLAGS) -c linear.cpp + +tron.obj: tron.cpp tron.h + $(CXX) $(CFLAGS) -c tron.cpp + +clean: + -erase /Q *.obj $(TARGET)\. + diff --git a/README b/README new file mode 100644 index 0000000..2440161 --- /dev/null +++ b/README @@ -0,0 +1,411 @@ +LIBLINEAR is a simple package for solving large-scale regularized +linear classification. It currently supports L2-regularized logistic +regression, L2-loss support vector machines, and L1-loss support +vector machines. This document explains the usage of LIBLINEAR. + +To get started, please read the ``Quick Start'' section first. +For developers, please check the ``Library Usage'' section to learn +how to integrate LIBLINEAR in your software. + +Table of Contents +================= + +- When to use LIBLINEAR but not LIBSVM +- Quick Start +- Installation +- `train' Usage +- `predict' Usage +- Examples +- Library Usage +- Building Windows Binaries +- Additional Information +- MATLAB/OCTAVE interface + +When to use LIBLINEAR but not LIBSVM +==================================== + +There are some large data for which with/without nonlinear mappings +gives similar performances. Without using kernels, one can +efficiently train a much larger set via a linear classifier. These +data usually have a large number of features. Document classification +is an example. + +Warning: While generally liblinear is very fast, its default solver +may be slow under certain situations (e.g., data not scaled or C is +large). See Appendix B of our SVM guide about how to handle such +cases. +http://www.csie.ntu.edu.tw/~cjlin/papers/guide/guide.pdf + +Warning: If you are a beginner and your data sets are not large, you +should consider LIBSVM first. + +LIBSVM page: +http://www.csie.ntu.edu.tw/~cjlin/libsvm + + +Quick Start +=========== + +See the section ``Installation'' for installing LIBLINEAR. + +After installation, there are programs `train' and `predict' for +training and testing, respectively. + +About the data format, please check the README file of LIBSVM. Note +that feature index must start from 1 (but not 0). + +A sample classification data included in this package is `heart_scale'. + +Type `train heart_scale', and the program will read the training +data and output the model file `heart_scale.model'. If you have a test +set called heart_scale.t, then type `predict heart_scale.t +heart_scale.model output' to see the prediction accuracy. The `output' +file contains the predicted class labels. + +For more information about `train' and `predict', see the sections +`train' Usage and `predict' Usage. + +To obtain good performances, sometimes one needs to scale the +data. Please check the program `svm-scale' of LIBSVM. For large and +sparse data, use `-l 0' to keep the sparsity. + +Installation +============ + +On Unix systems, type `make' to build the `train' and `predict' +programs. Run them without arguments to show the usages. + +On other systems, consult `Makefile' to build them (e.g., see +'Building Windows binaries' in this file) or use the pre-built +binaries (Windows binaries are in the directory `windows'). + +This software uses some level-1 BLAS subroutines. The needed functions are +included in this package. If a BLAS library is available on your +machine, you may use it by modifying the Makefile: Unmark the following line + + #LIBS ?= -lblas + +and mark + + LIBS ?= blas/blas.a + +`train' Usage +============= + +Usage: train [options] training_set_file [model_file] +options: +-s type : set type of solver (default 1) + 0 -- L2-regularized logistic regression + 1 -- L2-loss support vector machines (dual) + 2 -- L2-loss support vector machines (primal) + 3 -- L1-loss support vector machines (dual) + 4 -- multi-class support vector machines by Crammer and Singer +-c cost : set the parameter C (default 1) +-e epsilon : set tolerance of termination criterion + -s 0 and 2 + |f'(w)|_2 <= eps*min(pos,neg)/l*|f'(w0)|_2, + where f is the primal function, (default 0.01) + -s 1, 3, and 4 + Dual maximal violation <= eps; similar to libsvm (default 0.1) +-B bias : if bias >= 0, instance x becomes [x; bias]; if < 0, no bias term added (default 1) +-wi weight: weights adjust the parameter C of different classes (see README for details) +-v n: n-fold cross validation mode + +Option -v randomly splits the data into n parts and calculates cross +validation accuracy on them. + +Formulations: + +For L2-regularized logistic regression (-s 0), we solve + +min_w w^Tw/2 + C \sum log(1 + exp(-y_i w^Tx_i)) + +For L2-loss SVM dual (-s 1), we solve + +min_alpha 0.5(alpha^T (Q + I/2/C) alpha) - e^T alpha + s.t. 0 <= alpha_i, + +For L2-loss SVM (-s 2), we solve + +min_w w^Tw/2 + C \sum max(0, 1- y_i w^Tx_i)^2 + +For L1-loss SVM dual (-s 3), we solve + +min_alpha 0.5(alpha^T Q alpha) - e^T alpha + s.t. 0 <= alpha_i <= C, + +where + +Q is a matrix with Q_ij = y_i y_j x_i^T x_j. + +If bias >= 0, w becomes [w; w_{n+1}] and x becomes [x; bias]. + +The primal-dual relationship implies that -s 1 and -s 2 gives the same +model. + +We implement 1-vs-the rest multi-class strategy. In training i +vs. non_i, their C parameters are (weight from -wi)*C and C, +respectively. If there are only two classes, we train only one +model. Thus weight1*C vs. weight2*C is used. See examples below. + +We also implement multi-class SVM by Crammer and Singer (-s 4): + +min_{w_m, \xi_i} 0.5 \sum_m ||w_m||^2 + C \sum_i \xi_i + s.t. w^T_{y_i} x_i - w^T_m x_i >= \e^m_i - \xi_i \forall m,i + +where e^m_i = 0 if y_i = m, + e^m_i = 1 if y_i != m, + +Here we solve the dual problem: + +min_{\alpha} 0.5 \sum_m ||w_m(\alpha)||^2 + \sum_i \sum_m e^m_i alpha^m_i + s.t. \alpha^m_i <= C^m_i \forall m,i , \sum_m \alpha^m_i=0 \forall i + +where w_m(\alpha) = \sum_i \alpha^m_i x_i, +and C^m_i = C if m = y_i, + C^m_i = 0 if m != y_i. + +`predict' Usage +=============== + +Usage: predict [options] test_file model_file output_file +options: +-b probability_estimates: whether to predict probability estimates, 0 or 1 (default 0) + +Examples +======== + +> train data_file + +Train linear SVM with L2-loss function. + +> train -s 0 data_file + +Train a logistic regression model. + +> train -v 5 -e 0.001 data_file + +Do five-fold cross-validation using L2-loss svm. +Use a smaller stopping tolerance 0.001 than the default +0.1 if you want more accurate solutions. + +> train -c 10 -w1 2 -w2 5 -w3 2 four_class_data_file + +Train four classifiers: +positive negative Cp Cn +class 1 class 2,3,4. 20 10 +class 2 class 1,3,4. 50 10 +class 3 class 1,2,4. 20 10 +class 4 class 1,2,3. 10 10 + +> train -c 10 -w3 1 -w2 5 two_class_data_file + +If there are only two classes, we train ONE model. +The C values for the two classes are 10 and 50. + +> predict -b 1 test_file data_file.model output_file + +Output probability estimates (for logistic regression only). + +Library Usage +============= + +- Function: model* train(const struct problem *prob, + const struct parameter *param); + + This function constructs and returns a linear classification model + according to the given training data and parameters. + + struct problem describes the problem: + + struct problem + { + int l, n; + int *y; + struct feature_node **x; + double bias; + }; + + where `l' is the number of training data. If bias >= 0, we assume + that one additional feature is added to the end of each data + instance. `n' is the number of feature (including the bias feature + if bias >= 0). `y' is an array containing the target values. And + `x' is an array of pointers, + each of which points to a sparse representation (array of feature_node) of one + training vector. + + For example, if we have the following training data: + + LABEL ATTR1 ATTR2 ATTR3 ATTR4 ATTR5 + ----- ----- ----- ----- ----- ----- + 1 0 0.1 0.2 0 0 + 2 0 0.1 0.3 -1.2 0 + 1 0.4 0 0 0 0 + 2 0 0.1 0 1.4 0.5 + 3 -0.1 -0.2 0.1 1.1 0.1 + + and bias = 1, then the components of problem are: + + l = 5 + n = 6 + + y -> 1 2 1 2 3 + + x -> [ ] -> (2,0.1) (3,0.2) (6,1) (-1,?) + [ ] -> (2,0.1) (3,0.3) (4,-1.2) (6,1) (-1,?) + [ ] -> (1,0.4) (6,1) (-1,?) + [ ] -> (2,0.1) (4,1.4) (5,0.5) (6,1) (-1,?) + [ ] -> (1,-0.1) (2,-0.2) (3,0.1) (4,1.1) (5,0.1) (6,1) (-1,?) + + struct parameter describes the parameters of a linear classification model: + + struct parameter + { + int solver_type; + + /* these are for training only */ + double eps; /* stopping criteria */ + double C; + int nr_weight; + int *weight_label; + double* weight; + }; + + solver_type can be one of L2_LR, L2LOSS_SVM_DUAL, L2LOSS_SVM, L1LOSS_SVM_DUAL, MCSVM_CS. + + L2_LR L2-regularized logistic regression + L2LOSS_SVM_DUAL L2-loss support vector machines (dual) + L2LOSS_SVM L2-loss support vector machines (primal) + L1LOSS_SVM_DUAL L1-loss support vector machines (dual) + MCSVM_CS multi-class support vector machines by Crammer and Singer + + C is the cost of constraints violation. + eps is the stopping criterion. + + nr_weight, weight_label, and weight are used to change the penalty + for some classes (If the weight for a class is not changed, it is + set to 1). This is useful for training classifier using unbalanced + input data or with asymmetric misclassification cost. + + nr_weight is the number of elements in the array weight_label and + weight. Each weight[i] corresponds to weight_label[i], meaning that + the penalty of class weight_label[i] is scaled by a factor of weight[i]. + + If you do not want to change penalty for any of the classes, + just set nr_weight to 0. + + *NOTE* To avoid wrong parameters, check_parameter() should be + called before train(). + +- Function: void cross_validation(const problem *prob, const parameter *param, int nr_fold, int *target); + + This function conducts cross validation. Data are separated to + nr_fold folds. Under given parameters, sequentially each fold is + validated using the model from training the remaining. Predicted + labels in the validation process are stored in the array called + target. + + The format of prob is same as that for train(). + +- Function: int predict(const model *model_, const feature_node *x); + + This functions classifies a test vector using the given + model. The predicted label is returned. + +- Function: int predict_values(const struct model *model_, + const struct feature_node *x, double* dec_values); + + This function gives nr_w decision values in the array + dec_values. nr_w is 1 if there are two classes except multi-class + svm by Crammer and Singer (-s 4), and is the number of classes otherwise. + + We implement one-vs-the rest multi-class strategy (-s 0,1,2,3) and + multi-class svm by Crammer and Singer (-s 4) for multi-class SVM. + The class with the highest decision value is returned. + +- Function: int predict_probability(const struct model *model_, + const struct feature_node *x, double* prob_estimates); + + This function gives nr_class probability estimates in the array + prob_estimates. nr_class can be obtained from the function + get_nr_class. The class with the highest probability is + returned. Currently, we support only the probability outputs of + logistic regression. + +- Function: int get_nr_feature(const model *model_); + + The function gives the number of attributes of the model. + +- Function: int get_nr_class(const model *model_); + + The function gives the number of classes of the model. + +- Function: void get_labels(const model *model_, int* label); + + This function outputs the name of labels into an array called label. + +- Function: const char *check_parameter(const struct problem *prob, + const struct parameter *param); + + This function checks whether the parameters are within the feasible + range of the problem. This function should be called before calling + train() and cross_validation(). It returns NULL if the + parameters are feasible, otherwise an error message is returned. + +- Function: int save_model(const char *model_file_name, + const struct model *model_); + + This function saves a model to a file; returns 0 on success, or -1 + if an error occurs. + +- Function: struct model *load_model(const char *model_file_name); + + This function returns a pointer to the model read from the file, + or a null pointer if the model could not be loaded. + +- Function: void destroy_model(struct model *model_); + + This function frees the memory used by a model. + +- Function: void destroy_param(struct parameter *param); + + This function frees the memory used by a parameter set. + +Building Windows Binaries +========================= + +Windows binaries are in the directory `windows'. To build them via +Visual C++, use the following steps: + +1. Open a dos command box and change to liblinear directory. If +environment variables of VC++ have not been set, type + +"C:\Program Files\Microsoft Visual Studio 8\VC\bin\vcvars32.bat" + +You may have to modify the above according which version of VC++ or +where it is installed. + +2. Type + +nmake -f Makefile.win clean all + + +MATLAB/OCTAVE Interface +======================= + +Please check the file README in the directory `matlab'. + +Additional Information +====================== + +If you find LIBLINEAR helpful, please cite it as + +R.-E. Fan, K.-W. Chang, C.-J. Hsieh, X.-R. Wang, and C.-J. Lin. +LIBLINEAR: A Library for Large Linear Classification, Journal of +Machine Learning Research 9(2008), 1871-1874. Software available at +http://www.csie.ntu.edu.tw/~cjlin/liblinear + +For any questions and comments, please send your email to +cjlin@csie.ntu.edu.tw + + diff --git a/blas/Makefile b/blas/Makefile new file mode 100644 index 0000000..2be0186 --- /dev/null +++ b/blas/Makefile @@ -0,0 +1,22 @@ +AR = ar rcv +RANLIB = ranlib + +HEADERS = blas.h blas.h blasp.h +FILES = dnrm2.o daxpy.o ddot.o dscal.o + +CFLAGS = $(OPTFLAGS) +FFLAGS = $(OPTFLAGS) + +blas: $(FILES) $(HEADERS) + $(AR) blas.a $(FILES) + $(RANLIB) blas.a + +clean: + - rm -f *.o + - rm -f *.a + - rm -f *~ + +.c.o: + $(CC) $(CFLAGS) -c $*.c + + diff --git a/blas/blas.h b/blas/blas.h new file mode 100644 index 0000000..558893a --- /dev/null +++ b/blas/blas.h @@ -0,0 +1,25 @@ +/* blas.h -- C header file for BLAS Ver 1.0 */ +/* Jesse Bennett March 23, 2000 */ + +/** barf [ba:rf] 2. "He suggested using FORTRAN, and everybody barfed." + + - From The Shogakukan DICTIONARY OF NEW ENGLISH (Second edition) */ + +#ifndef BLAS_INCLUDE +#define BLAS_INCLUDE + +/* Data types specific to BLAS implementation */ +typedef struct { float r, i; } fcomplex; +typedef struct { double r, i; } dcomplex; +typedef int blasbool; + +#include "blasp.h" /* Prototypes for all BLAS functions */ + +#define FALSE 0 +#define TRUE 1 + +/* Macro functions */ +#define MIN(a,b) ((a) <= (b) ? (a) : (b)) +#define MAX(a,b) ((a) >= (b) ? (a) : (b)) + +#endif diff --git a/blas/blasp.h b/blas/blasp.h new file mode 100644 index 0000000..745836d --- /dev/null +++ b/blas/blasp.h @@ -0,0 +1,430 @@ +/* blasp.h -- C prototypes for BLAS Ver 1.0 */ +/* Jesse Bennett March 23, 2000 */ + +/* Functions listed in alphabetical order */ + +#ifdef F2C_COMPAT + +void cdotc_(fcomplex *dotval, int *n, fcomplex *cx, int *incx, + fcomplex *cy, int *incy); + +void cdotu_(fcomplex *dotval, int *n, fcomplex *cx, int *incx, + fcomplex *cy, int *incy); + +double sasum_(int *n, float *sx, int *incx); + +double scasum_(int *n, fcomplex *cx, int *incx); + +double scnrm2_(int *n, fcomplex *x, int *incx); + +double sdot_(int *n, float *sx, int *incx, float *sy, int *incy); + +double snrm2_(int *n, float *x, int *incx); + +void zdotc_(dcomplex *dotval, int *n, dcomplex *cx, int *incx, + dcomplex *cy, int *incy); + +void zdotu_(dcomplex *dotval, int *n, dcomplex *cx, int *incx, + dcomplex *cy, int *incy); + +#else + +fcomplex cdotc_(int *n, fcomplex *cx, int *incx, fcomplex *cy, int *incy); + +fcomplex cdotu_(int *n, fcomplex *cx, int *incx, fcomplex *cy, int *incy); + +float sasum_(int *n, float *sx, int *incx); + +float scasum_(int *n, fcomplex *cx, int *incx); + +float scnrm2_(int *n, fcomplex *x, int *incx); + +float sdot_(int *n, float *sx, int *incx, float *sy, int *incy); + +float snrm2_(int *n, float *x, int *incx); + +dcomplex zdotc_(int *n, dcomplex *cx, int *incx, dcomplex *cy, int *incy); + +dcomplex zdotu_(int *n, dcomplex *cx, int *incx, dcomplex *cy, int *incy); + +#endif + +/* Remaining functions listed in alphabetical order */ + +int caxpy_(int *n, fcomplex *ca, fcomplex *cx, int *incx, fcomplex *cy, + int *incy); + +int ccopy_(int *n, fcomplex *cx, int *incx, fcomplex *cy, int *incy); + +int cgbmv_(char *trans, int *m, int *n, int *kl, int *ku, + fcomplex *alpha, fcomplex *a, int *lda, fcomplex *x, int *incx, + fcomplex *beta, fcomplex *y, int *incy); + +int cgemm_(char *transa, char *transb, int *m, int *n, int *k, + fcomplex *alpha, fcomplex *a, int *lda, fcomplex *b, int *ldb, + fcomplex *beta, fcomplex *c, int *ldc); + +int cgemv_(char *trans, int *m, int *n, fcomplex *alpha, fcomplex *a, + int *lda, fcomplex *x, int *incx, fcomplex *beta, fcomplex *y, + int *incy); + +int cgerc_(int *m, int *n, fcomplex *alpha, fcomplex *x, int *incx, + fcomplex *y, int *incy, fcomplex *a, int *lda); + +int cgeru_(int *m, int *n, fcomplex *alpha, fcomplex *x, int *incx, + fcomplex *y, int *incy, fcomplex *a, int *lda); + +int chbmv_(char *uplo, int *n, int *k, fcomplex *alpha, fcomplex *a, + int *lda, fcomplex *x, int *incx, fcomplex *beta, fcomplex *y, + int *incy); + +int chemm_(char *side, char *uplo, int *m, int *n, fcomplex *alpha, + fcomplex *a, int *lda, fcomplex *b, int *ldb, fcomplex *beta, + fcomplex *c, int *ldc); + +int chemv_(char *uplo, int *n, fcomplex *alpha, fcomplex *a, int *lda, + fcomplex *x, int *incx, fcomplex *beta, fcomplex *y, int *incy); + +int cher_(char *uplo, int *n, float *alpha, fcomplex *x, int *incx, + fcomplex *a, int *lda); + +int cher2_(char *uplo, int *n, fcomplex *alpha, fcomplex *x, int *incx, + fcomplex *y, int *incy, fcomplex *a, int *lda); + +int cher2k_(char *uplo, char *trans, int *n, int *k, fcomplex *alpha, + fcomplex *a, int *lda, fcomplex *b, int *ldb, float *beta, + fcomplex *c, int *ldc); + +int cherk_(char *uplo, char *trans, int *n, int *k, float *alpha, + fcomplex *a, int *lda, float *beta, fcomplex *c, int *ldc); + +int chpmv_(char *uplo, int *n, fcomplex *alpha, fcomplex *ap, fcomplex *x, + int *incx, fcomplex *beta, fcomplex *y, int *incy); + +int chpr_(char *uplo, int *n, float *alpha, fcomplex *x, int *incx, + fcomplex *ap); + +int chpr2_(char *uplo, int *n, fcomplex *alpha, fcomplex *x, int *incx, + fcomplex *y, int *incy, fcomplex *ap); + +int crotg_(fcomplex *ca, fcomplex *cb, float *c, fcomplex *s); + +int cscal_(int *n, fcomplex *ca, fcomplex *cx, int *incx); + +int csscal_(int *n, float *sa, fcomplex *cx, int *incx); + +int cswap_(int *n, fcomplex *cx, int *incx, fcomplex *cy, int *incy); + +int csymm_(char *side, char *uplo, int *m, int *n, fcomplex *alpha, + fcomplex *a, int *lda, fcomplex *b, int *ldb, fcomplex *beta, + fcomplex *c, int *ldc); + +int csyr2k_(char *uplo, char *trans, int *n, int *k, fcomplex *alpha, + fcomplex *a, int *lda, fcomplex *b, int *ldb, fcomplex *beta, + fcomplex *c, int *ldc); + +int csyrk_(char *uplo, char *trans, int *n, int *k, fcomplex *alpha, + fcomplex *a, int *lda, fcomplex *beta, fcomplex *c, int *ldc); + +int ctbmv_(char *uplo, char *trans, char *diag, int *n, int *k, + fcomplex *a, int *lda, fcomplex *x, int *incx); + +int ctbsv_(char *uplo, char *trans, char *diag, int *n, int *k, + fcomplex *a, int *lda, fcomplex *x, int *incx); + +int ctpmv_(char *uplo, char *trans, char *diag, int *n, fcomplex *ap, + fcomplex *x, int *incx); + +int ctpsv_(char *uplo, char *trans, char *diag, int *n, fcomplex *ap, + fcomplex *x, int *incx); + +int ctrmm_(char *side, char *uplo, char *transa, char *diag, int *m, + int *n, fcomplex *alpha, fcomplex *a, int *lda, fcomplex *b, + int *ldb); + +int ctrmv_(char *uplo, char *trans, char *diag, int *n, fcomplex *a, + int *lda, fcomplex *x, int *incx); + +int ctrsm_(char *side, char *uplo, char *transa, char *diag, int *m, + int *n, fcomplex *alpha, fcomplex *a, int *lda, fcomplex *b, + int *ldb); + +int ctrsv_(char *uplo, char *trans, char *diag, int *n, fcomplex *a, + int *lda, fcomplex *x, int *incx); + +int daxpy_(int *n, double *sa, double *sx, int *incx, double *sy, + int *incy); + +int dcopy_(int *n, double *sx, int *incx, double *sy, int *incy); + +int dgbmv_(char *trans, int *m, int *n, int *kl, int *ku, + double *alpha, double *a, int *lda, double *x, int *incx, + double *beta, double *y, int *incy); + +int dgemm_(char *transa, char *transb, int *m, int *n, int *k, + double *alpha, double *a, int *lda, double *b, int *ldb, + double *beta, double *c, int *ldc); + +int dgemv_(char *trans, int *m, int *n, double *alpha, double *a, + int *lda, double *x, int *incx, double *beta, double *y, + int *incy); + +int dger_(int *m, int *n, double *alpha, double *x, int *incx, + double *y, int *incy, double *a, int *lda); + +int drot_(int *n, double *sx, int *incx, double *sy, int *incy, + double *c, double *s); + +int drotg_(double *sa, double *sb, double *c, double *s); + +int dsbmv_(char *uplo, int *n, int *k, double *alpha, double *a, + int *lda, double *x, int *incx, double *beta, double *y, + int *incy); + +int dscal_(int *n, double *sa, double *sx, int *incx); + +int dspmv_(char *uplo, int *n, double *alpha, double *ap, double *x, + int *incx, double *beta, double *y, int *incy); + +int dspr_(char *uplo, int *n, double *alpha, double *x, int *incx, + double *ap); + +int dspr2_(char *uplo, int *n, double *alpha, double *x, int *incx, + double *y, int *incy, double *ap); + +int dswap_(int *n, double *sx, int *incx, double *sy, int *incy); + +int dsymm_(char *side, char *uplo, int *m, int *n, double *alpha, + double *a, int *lda, double *b, int *ldb, double *beta, + double *c, int *ldc); + +int dsymv_(char *uplo, int *n, double *alpha, double *a, int *lda, + double *x, int *incx, double *beta, double *y, int *incy); + +int dsyr_(char *uplo, int *n, double *alpha, double *x, int *incx, + double *a, int *lda); + +int dsyr2_(char *uplo, int *n, double *alpha, double *x, int *incx, + double *y, int *incy, double *a, int *lda); + +int dsyr2k_(char *uplo, char *trans, int *n, int *k, double *alpha, + double *a, int *lda, double *b, int *ldb, double *beta, + double *c, int *ldc); + +int dsyrk_(char *uplo, char *trans, int *n, int *k, double *alpha, + double *a, int *lda, double *beta, double *c, int *ldc); + +int dtbmv_(char *uplo, char *trans, char *diag, int *n, int *k, + double *a, int *lda, double *x, int *incx); + +int dtbsv_(char *uplo, char *trans, char *diag, int *n, int *k, + double *a, int *lda, double *x, int *incx); + +int dtpmv_(char *uplo, char *trans, char *diag, int *n, double *ap, + double *x, int *incx); + +int dtpsv_(char *uplo, char *trans, char *diag, int *n, double *ap, + double *x, int *incx); + +int dtrmm_(char *side, char *uplo, char *transa, char *diag, int *m, + int *n, double *alpha, double *a, int *lda, double *b, + int *ldb); + +int dtrmv_(char *uplo, char *trans, char *diag, int *n, double *a, + int *lda, double *x, int *incx); + +int dtrsm_(char *side, char *uplo, char *transa, char *diag, int *m, + int *n, double *alpha, double *a, int *lda, double *b, + int *ldb); + +int dtrsv_(char *uplo, char *trans, char *diag, int *n, double *a, + int *lda, double *x, int *incx); + + +int saxpy_(int *n, float *sa, float *sx, int *incx, float *sy, int *incy); + +int scopy_(int *n, float *sx, int *incx, float *sy, int *incy); + +int sgbmv_(char *trans, int *m, int *n, int *kl, int *ku, + float *alpha, float *a, int *lda, float *x, int *incx, + float *beta, float *y, int *incy); + +int sgemm_(char *transa, char *transb, int *m, int *n, int *k, + float *alpha, float *a, int *lda, float *b, int *ldb, + float *beta, float *c, int *ldc); + +int sgemv_(char *trans, int *m, int *n, float *alpha, float *a, + int *lda, float *x, int *incx, float *beta, float *y, + int *incy); + +int sger_(int *m, int *n, float *alpha, float *x, int *incx, + float *y, int *incy, float *a, int *lda); + +int srot_(int *n, float *sx, int *incx, float *sy, int *incy, + float *c, float *s); + +int srotg_(float *sa, float *sb, float *c, float *s); + +int ssbmv_(char *uplo, int *n, int *k, float *alpha, float *a, + int *lda, float *x, int *incx, float *beta, float *y, + int *incy); + +int sscal_(int *n, float *sa, float *sx, int *incx); + +int sspmv_(char *uplo, int *n, float *alpha, float *ap, float *x, + int *incx, float *beta, float *y, int *incy); + +int sspr_(char *uplo, int *n, float *alpha, float *x, int *incx, + float *ap); + +int sspr2_(char *uplo, int *n, float *alpha, float *x, int *incx, + float *y, int *incy, float *ap); + +int sswap_(int *n, float *sx, int *incx, float *sy, int *incy); + +int ssymm_(char *side, char *uplo, int *m, int *n, float *alpha, + float *a, int *lda, float *b, int *ldb, float *beta, + float *c, int *ldc); + +int ssymv_(char *uplo, int *n, float *alpha, float *a, int *lda, + float *x, int *incx, float *beta, float *y, int *incy); + +int ssyr_(char *uplo, int *n, float *alpha, float *x, int *incx, + float *a, int *lda); + +int ssyr2_(char *uplo, int *n, float *alpha, float *x, int *incx, + float *y, int *incy, float *a, int *lda); + +int ssyr2k_(char *uplo, char *trans, int *n, int *k, float *alpha, + float *a, int *lda, float *b, int *ldb, float *beta, + float *c, int *ldc); + +int ssyrk_(char *uplo, char *trans, int *n, int *k, float *alpha, + float *a, int *lda, float *beta, float *c, int *ldc); + +int stbmv_(char *uplo, char *trans, char *diag, int *n, int *k, + float *a, int *lda, float *x, int *incx); + +int stbsv_(char *uplo, char *trans, char *diag, int *n, int *k, + float *a, int *lda, float *x, int *incx); + +int stpmv_(char *uplo, char *trans, char *diag, int *n, float *ap, + float *x, int *incx); + +int stpsv_(char *uplo, char *trans, char *diag, int *n, float *ap, + float *x, int *incx); + +int strmm_(char *side, char *uplo, char *transa, char *diag, int *m, + int *n, float *alpha, float *a, int *lda, float *b, + int *ldb); + +int strmv_(char *uplo, char *trans, char *diag, int *n, float *a, + int *lda, float *x, int *incx); + +int strsm_(char *side, char *uplo, char *transa, char *diag, int *m, + int *n, float *alpha, float *a, int *lda, float *b, + int *ldb); + +int strsv_(char *uplo, char *trans, char *diag, int *n, float *a, + int *lda, float *x, int *incx); + +int zaxpy_(int *n, dcomplex *ca, dcomplex *cx, int *incx, dcomplex *cy, + int *incy); + +int zcopy_(int *n, dcomplex *cx, int *incx, dcomplex *cy, int *incy); + +int zdscal_(int *n, double *sa, dcomplex *cx, int *incx); + +int zgbmv_(char *trans, int *m, int *n, int *kl, int *ku, + dcomplex *alpha, dcomplex *a, int *lda, dcomplex *x, int *incx, + dcomplex *beta, dcomplex *y, int *incy); + +int zgemm_(char *transa, char *transb, int *m, int *n, int *k, + dcomplex *alpha, dcomplex *a, int *lda, dcomplex *b, int *ldb, + dcomplex *beta, dcomplex *c, int *ldc); + +int zgemv_(char *trans, int *m, int *n, dcomplex *alpha, dcomplex *a, + int *lda, dcomplex *x, int *incx, dcomplex *beta, dcomplex *y, + int *incy); + +int zgerc_(int *m, int *n, dcomplex *alpha, dcomplex *x, int *incx, + dcomplex *y, int *incy, dcomplex *a, int *lda); + +int zgeru_(int *m, int *n, dcomplex *alpha, dcomplex *x, int *incx, + dcomplex *y, int *incy, dcomplex *a, int *lda); + +int zhbmv_(char *uplo, int *n, int *k, dcomplex *alpha, dcomplex *a, + int *lda, dcomplex *x, int *incx, dcomplex *beta, dcomplex *y, + int *incy); + +int zhemm_(char *side, char *uplo, int *m, int *n, dcomplex *alpha, + dcomplex *a, int *lda, dcomplex *b, int *ldb, dcomplex *beta, + dcomplex *c, int *ldc); + +int zhemv_(char *uplo, int *n, dcomplex *alpha, dcomplex *a, int *lda, + dcomplex *x, int *incx, dcomplex *beta, dcomplex *y, int *incy); + +int zher_(char *uplo, int *n, double *alpha, dcomplex *x, int *incx, + dcomplex *a, int *lda); + +int zher2_(char *uplo, int *n, dcomplex *alpha, dcomplex *x, int *incx, + dcomplex *y, int *incy, dcomplex *a, int *lda); + +int zher2k_(char *uplo, char *trans, int *n, int *k, dcomplex *alpha, + dcomplex *a, int *lda, dcomplex *b, int *ldb, double *beta, + dcomplex *c, int *ldc); + +int zherk_(char *uplo, char *trans, int *n, int *k, double *alpha, + dcomplex *a, int *lda, double *beta, dcomplex *c, int *ldc); + +int zhpmv_(char *uplo, int *n, dcomplex *alpha, dcomplex *ap, dcomplex *x, + int *incx, dcomplex *beta, dcomplex *y, int *incy); + +int zhpr_(char *uplo, int *n, double *alpha, dcomplex *x, int *incx, + dcomplex *ap); + +int zhpr2_(char *uplo, int *n, dcomplex *alpha, dcomplex *x, int *incx, + dcomplex *y, int *incy, dcomplex *ap); + +int zrotg_(dcomplex *ca, dcomplex *cb, double *c, dcomplex *s); + +int zscal_(int *n, dcomplex *ca, dcomplex *cx, int *incx); + +int zswap_(int *n, dcomplex *cx, int *incx, dcomplex *cy, int *incy); + +int zsymm_(char *side, char *uplo, int *m, int *n, dcomplex *alpha, + dcomplex *a, int *lda, dcomplex *b, int *ldb, dcomplex *beta, + dcomplex *c, int *ldc); + +int zsyr2k_(char *uplo, char *trans, int *n, int *k, dcomplex *alpha, + dcomplex *a, int *lda, dcomplex *b, int *ldb, dcomplex *beta, + dcomplex *c, int *ldc); + +int zsyrk_(char *uplo, char *trans, int *n, int *k, dcomplex *alpha, + dcomplex *a, int *lda, dcomplex *beta, dcomplex *c, int *ldc); + +int ztbmv_(char *uplo, char *trans, char *diag, int *n, int *k, + dcomplex *a, int *lda, dcomplex *x, int *incx); + +int ztbsv_(char *uplo, char *trans, char *diag, int *n, int *k, + dcomplex *a, int *lda, dcomplex *x, int *incx); + +int ztpmv_(char *uplo, char *trans, char *diag, int *n, dcomplex *ap, + dcomplex *x, int *incx); + +int ztpsv_(char *uplo, char *trans, char *diag, int *n, dcomplex *ap, + dcomplex *x, int *incx); + +int ztrmm_(char *side, char *uplo, char *transa, char *diag, int *m, + int *n, dcomplex *alpha, dcomplex *a, int *lda, dcomplex *b, + int *ldb); + +int ztrmv_(char *uplo, char *trans, char *diag, int *n, dcomplex *a, + int *lda, dcomplex *x, int *incx); + +int ztrsm_(char *side, char *uplo, char *transa, char *diag, int *m, + int *n, dcomplex *alpha, dcomplex *a, int *lda, dcomplex *b, + int *ldb); + +int ztrsv_(char *uplo, char *trans, char *diag, int *n, dcomplex *a, + int *lda, dcomplex *x, int *incx); diff --git a/blas/daxpy.c b/blas/daxpy.c new file mode 100644 index 0000000..58f345a --- /dev/null +++ b/blas/daxpy.c @@ -0,0 +1,49 @@ +#include "blas.h" + +int daxpy_(int *n, double *sa, double *sx, int *incx, double *sy, + int *incy) +{ + long int i, m, ix, iy, nn, iincx, iincy; + register double ssa; + + /* constant times a vector plus a vector. + uses unrolled loop for increments equal to one. + jack dongarra, linpack, 3/11/78. + modified 12/3/93, array(1) declarations changed to array(*) */ + + /* Dereference inputs */ + nn = *n; + ssa = *sa; + iincx = *incx; + iincy = *incy; + + if( nn > 0 && ssa != 0.0 ) + { + if (iincx == 1 && iincy == 1) /* code for both increments equal to 1 */ + { + m = nn-3; + for (i = 0; i < m; i += 4) + { + sy[i] += ssa * sx[i]; + sy[i+1] += ssa * sx[i+1]; + sy[i+2] += ssa * sx[i+2]; + sy[i+3] += ssa * sx[i+3]; + } + for ( ; i < nn; ++i) /* clean-up loop */ + sy[i] += ssa * sx[i]; + } + else /* code for unequal increments or equal increments not equal to 1 */ + { + ix = iincx >= 0 ? 0 : (1 - nn) * iincx; + iy = iincy >= 0 ? 0 : (1 - nn) * iincy; + for (i = 0; i < nn; i++) + { + sy[iy] += ssa * sx[ix]; + ix += iincx; + iy += iincy; + } + } + } + + return 0; +} /* daxpy_ */ diff --git a/blas/ddot.c b/blas/ddot.c new file mode 100644 index 0000000..a64a280 --- /dev/null +++ b/blas/ddot.c @@ -0,0 +1,50 @@ +#include "blas.h" + +double ddot_(int *n, double *sx, int *incx, double *sy, int *incy) +{ + long int i, m, nn, iincx, iincy; + double stemp; + long int ix, iy; + + /* forms the dot product of two vectors. + uses unrolled loops for increments equal to one. + jack dongarra, linpack, 3/11/78. + modified 12/3/93, array(1) declarations changed to array(*) */ + + /* Dereference inputs */ + nn = *n; + iincx = *incx; + iincy = *incy; + + stemp = 0.0; + if (nn > 0) + { + if (iincx == 1 && iincy == 1) /* code for both increments equal to 1 */ + { + m = nn-4; + for (i = 0; i < m; i += 5) + stemp += sx[i] * sy[i] + sx[i+1] * sy[i+1] + sx[i+2] * sy[i+2] + + sx[i+3] * sy[i+3] + sx[i+4] * sy[i+4]; + + for ( ; i < nn; i++) /* clean-up loop */ + stemp += sx[i] * sy[i]; + } + else /* code for unequal increments or equal increments not equal to 1 */ + { + ix = 0; + iy = 0; + if (iincx < 0) + ix = (1 - nn) * iincx; + if (iincy < 0) + iy = (1 - nn) * iincy; + for (i = 0; i < nn; i++) + { + stemp += sx[ix] * sy[iy]; + ix += iincx; + iy += iincy; + } + } + } + + return stemp; +} /* ddot_ */ diff --git a/blas/dnrm2.c b/blas/dnrm2.c new file mode 100644 index 0000000..e50cdf7 --- /dev/null +++ b/blas/dnrm2.c @@ -0,0 +1,62 @@ +#include /* Needed for fabs() and sqrt() */ +#include "blas.h" + +double dnrm2_(int *n, double *x, int *incx) +{ + long int ix, nn, iincx; + double norm, scale, absxi, ssq, temp; + +/* DNRM2 returns the euclidean norm of a vector via the function + name, so that + + DNRM2 := sqrt( x'*x ) + + -- This version written on 25-October-1982. + Modified on 14-October-1993 to inline the call to SLASSQ. + Sven Hammarling, Nag Ltd. */ + + /* Dereference inputs */ + nn = *n; + iincx = *incx; + + if( nn > 0 && iincx > 0 ) + { + if (nn == 1) + { + norm = fabs(x[0]); + } + else + { + scale = 0.0; + ssq = 1.0; + + /* The following loop is equivalent to this call to the LAPACK + auxiliary routine: CALL SLASSQ( N, X, INCX, SCALE, SSQ ) */ + + for (ix=(nn-1)*iincx; ix>=0; ix-=iincx) + { + if (x[ix] != 0.0) + { + absxi = fabs(x[ix]); + if (scale < absxi) + { + temp = scale / absxi; + ssq = ssq * (temp * temp) + 1.0; + scale = absxi; + } + else + { + temp = absxi / scale; + ssq += temp * temp; + } + } + } + norm = scale * sqrt(ssq); + } + } + else + norm = 0.0; + + return norm; + +} /* dnrm2_ */ diff --git a/blas/dscal.c b/blas/dscal.c new file mode 100644 index 0000000..a0eca0c --- /dev/null +++ b/blas/dscal.c @@ -0,0 +1,44 @@ +#include "blas.h" + +int dscal_(int *n, double *sa, double *sx, int *incx) +{ + long int i, m, nincx, nn, iincx; + double ssa; + + /* scales a vector by a constant. + uses unrolled loops for increment equal to 1. + jack dongarra, linpack, 3/11/78. + modified 3/93 to return if incx .le. 0. + modified 12/3/93, array(1) declarations changed to array(*) */ + + /* Dereference inputs */ + nn = *n; + iincx = *incx; + ssa = *sa; + + if (nn > 0 && iincx > 0) + { + if (iincx == 1) /* code for increment equal to 1 */ + { + m = nn-4; + for (i = 0; i < m; i += 5) + { + sx[i] = ssa * sx[i]; + sx[i+1] = ssa * sx[i+1]; + sx[i+2] = ssa * sx[i+2]; + sx[i+3] = ssa * sx[i+3]; + sx[i+4] = ssa * sx[i+4]; + } + for ( ; i < nn; ++i) /* clean-up loop */ + sx[i] = ssa * sx[i]; + } + else /* code for increment not equal to 1 */ + { + nincx = nn * iincx; + for (i = 0; i < nincx; i += iincx) + sx[i] = ssa * sx[i]; + } + } + + return 0; +} /* dscal_ */ diff --git a/heart_scale b/heart_scale new file mode 100644 index 0000000..23bac94 --- /dev/null +++ b/heart_scale @@ -0,0 +1,270 @@ ++1 1:0.708333 2:1 3:1 4:-0.320755 5:-0.105023 6:-1 7:1 8:-0.419847 9:-1 10:-0.225806 12:1 13:-1 +-1 1:0.583333 2:-1 3:0.333333 4:-0.603774 5:1 6:-1 7:1 8:0.358779 9:-1 10:-0.483871 12:-1 13:1 ++1 1:0.166667 2:1 3:-0.333333 4:-0.433962 5:-0.383562 6:-1 7:-1 8:0.0687023 9:-1 10:-0.903226 11:-1 12:-1 13:1 +-1 1:0.458333 2:1 3:1 4:-0.358491 5:-0.374429 6:-1 7:-1 8:-0.480916 9:1 10:-0.935484 12:-0.333333 13:1 +-1 1:0.875 2:-1 3:-0.333333 4:-0.509434 5:-0.347032 6:-1 7:1 8:-0.236641 9:1 10:-0.935484 11:-1 12:-0.333333 13:-1 +-1 1:0.5 2:1 3:1 4:-0.509434 5:-0.767123 6:-1 7:-1 8:0.0534351 9:-1 10:-0.870968 11:-1 12:-1 13:1 ++1 1:0.125 2:1 3:0.333333 4:-0.320755 5:-0.406393 6:1 7:1 8:0.0839695 9:1 10:-0.806452 12:-0.333333 13:0.5 ++1 1:0.25 2:1 3:1 4:-0.698113 5:-0.484018 6:-1 7:1 8:0.0839695 9:1 10:-0.612903 12:-0.333333 13:1 ++1 1:0.291667 2:1 3:1 4:-0.132075 5:-0.237443 6:-1 7:1 8:0.51145 9:-1 10:-0.612903 12:0.333333 13:1 ++1 1:0.416667 2:-1 3:1 4:0.0566038 5:0.283105 6:-1 7:1 8:0.267176 9:-1 10:0.290323 12:1 13:1 +-1 1:0.25 2:1 3:1 4:-0.226415 5:-0.506849 6:-1 7:-1 8:0.374046 9:-1 10:-0.83871 12:-1 13:1 +-1 2:1 3:1 4:-0.0943396 5:-0.543379 6:-1 7:1 8:-0.389313 9:1 10:-1 11:-1 12:-1 13:1 +-1 1:-0.375 2:1 3:0.333333 4:-0.132075 5:-0.502283 6:-1 7:1 8:0.664122 9:-1 10:-1 11:-1 12:-1 13:-1 ++1 1:0.333333 2:1 3:-1 4:-0.245283 5:-0.506849 6:-1 7:-1 8:0.129771 9:-1 10:-0.16129 12:0.333333 13:-1 +-1 1:0.166667 2:-1 3:1 4:-0.358491 5:-0.191781 6:-1 7:1 8:0.343511 9:-1 10:-1 11:-1 12:-0.333333 13:-1 +-1 1:0.75 2:-1 3:1 4:-0.660377 5:-0.894977 6:-1 7:-1 8:-0.175573 9:-1 10:-0.483871 12:-1 13:-1 ++1 1:-0.291667 2:1 3:1 4:-0.132075 5:-0.155251 6:-1 7:-1 8:-0.251908 9:1 10:-0.419355 12:0.333333 13:1 ++1 2:1 3:1 4:-0.132075 5:-0.648402 6:1 7:1 8:0.282443 9:1 11:1 12:-1 13:1 +-1 1:0.458333 2:1 3:-1 4:-0.698113 5:-0.611872 6:-1 7:1 8:0.114504 9:1 10:-0.419355 12:-1 13:-1 +-1 1:-0.541667 2:1 3:-1 4:-0.132075 5:-0.666667 6:-1 7:-1 8:0.633588 9:1 10:-0.548387 11:-1 12:-1 13:1 ++1 1:0.583333 2:1 3:1 4:-0.509434 5:-0.52968 6:-1 7:1 8:-0.114504 9:1 10:-0.16129 12:0.333333 13:1 +-1 1:-0.208333 2:1 3:-0.333333 4:-0.320755 5:-0.456621 6:-1 7:1 8:0.664122 9:-1 10:-0.935484 12:-1 13:-1 +-1 1:-0.416667 2:1 3:1 4:-0.603774 5:-0.191781 6:-1 7:-1 8:0.679389 9:-1 10:-0.612903 12:-1 13:-1 +-1 1:-0.25 2:1 3:1 4:-0.660377 5:-0.643836 6:-1 7:-1 8:0.0992366 9:-1 10:-0.967742 11:-1 12:-1 13:-1 +-1 1:0.0416667 2:-1 3:-0.333333 4:-0.283019 5:-0.260274 6:1 7:1 8:0.343511 9:1 10:-1 11:-1 12:-0.333333 13:-1 +-1 1:-0.208333 2:-1 3:0.333333 4:-0.320755 5:-0.319635 6:-1 7:-1 8:0.0381679 9:-1 10:-0.935484 11:-1 12:-1 13:-1 +-1 1:-0.291667 2:-1 3:1 4:-0.169811 5:-0.465753 6:-1 7:1 8:0.236641 9:1 10:-1 12:-1 13:-1 +-1 1:-0.0833333 2:-1 3:0.333333 4:-0.509434 5:-0.228311 6:-1 7:1 8:0.312977 9:-1 10:-0.806452 11:-1 12:-1 13:-1 ++1 1:0.208333 2:1 3:0.333333 4:-0.660377 5:-0.525114 6:-1 7:1 8:0.435115 9:-1 10:-0.193548 12:-0.333333 13:1 +-1 1:0.75 2:-1 3:0.333333 4:-0.698113 5:-0.365297 6:1 7:1 8:-0.0992366 9:-1 10:-1 11:-1 12:-0.333333 13:-1 ++1 1:0.166667 2:1 3:0.333333 4:-0.358491 5:-0.52968 6:-1 7:1 8:0.206107 9:-1 10:-0.870968 12:-0.333333 13:1 +-1 1:0.541667 2:1 3:1 4:0.245283 5:-0.534247 6:-1 7:1 8:0.0229008 9:-1 10:-0.258065 11:-1 12:-1 13:0.5 +-1 1:-0.666667 2:-1 3:0.333333 4:-0.509434 5:-0.593607 6:-1 7:-1 8:0.51145 9:-1 10:-1 11:-1 12:-1 13:-1 ++1 1:0.25 2:1 3:1 4:0.433962 5:-0.086758 6:-1 7:1 8:0.0534351 9:1 10:0.0967742 11:1 12:-1 13:1 ++1 1:-0.125 2:1 3:1 4:-0.0566038 5:-0.6621 6:-1 7:1 8:-0.160305 9:1 10:-0.709677 12:-1 13:1 ++1 1:-0.208333 2:1 3:1 4:-0.320755 5:-0.406393 6:1 7:1 8:0.206107 9:1 10:-1 11:-1 12:0.333333 13:1 ++1 1:0.333333 2:1 3:1 4:-0.132075 5:-0.630137 6:-1 7:1 8:0.0229008 9:1 10:-0.387097 11:-1 12:-0.333333 13:1 ++1 1:0.25 2:1 3:-1 4:0.245283 5:-0.328767 6:-1 7:1 8:-0.175573 9:-1 10:-1 11:-1 12:-1 13:-1 +-1 1:-0.458333 2:1 3:0.333333 4:-0.320755 5:-0.753425 6:-1 7:-1 8:0.206107 9:-1 10:-1 11:-1 12:-1 13:-1 +-1 1:-0.208333 2:1 3:1 4:-0.471698 5:-0.561644 6:-1 7:1 8:0.755725 9:-1 10:-1 11:-1 12:-1 13:-1 ++1 1:-0.541667 2:1 3:1 4:0.0943396 5:-0.557078 6:-1 7:-1 8:0.679389 9:-1 10:-1 11:-1 12:-1 13:1 +-1 1:0.375 2:-1 3:1 4:-0.433962 5:-0.621005 6:-1 7:-1 8:0.40458 9:-1 10:-1 11:-1 12:-1 13:-1 +-1 1:-0.375 2:1 3:0.333333 4:-0.320755 5:-0.511416 6:-1 7:-1 8:0.648855 9:1 10:-0.870968 11:-1 12:-1 13:-1 +-1 1:-0.291667 2:1 3:-0.333333 4:-0.867925 5:-0.675799 6:1 7:-1 8:0.29771 9:-1 10:-1 11:-1 12:-1 13:1 ++1 1:0.25 2:1 3:0.333333 4:-0.396226 5:-0.579909 6:1 7:-1 8:-0.0381679 9:-1 10:-0.290323 12:-0.333333 13:0.5 +-1 1:0.208333 2:1 3:0.333333 4:-0.132075 5:-0.611872 6:1 7:1 8:0.435115 9:-1 10:-1 11:-1 12:-1 13:-1 ++1 1:-0.166667 2:1 3:0.333333 4:-0.54717 5:-0.894977 6:-1 7:1 8:-0.160305 9:-1 10:-0.741935 11:-1 12:1 13:-1 ++1 1:-0.375 2:1 3:1 4:-0.698113 5:-0.675799 6:-1 7:1 8:0.618321 9:-1 10:-1 11:-1 12:-0.333333 13:-1 ++1 1:0.541667 2:1 3:-0.333333 4:0.245283 5:-0.452055 6:-1 7:-1 8:-0.251908 9:1 10:-1 12:1 13:0.5 ++1 1:0.5 2:-1 3:1 4:0.0566038 5:-0.547945 6:-1 7:1 8:-0.343511 9:-1 10:-0.677419 12:1 13:1 ++1 1:-0.458333 2:1 3:1 4:-0.207547 5:-0.136986 6:-1 7:-1 8:-0.175573 9:1 10:-0.419355 12:-1 13:0.5 +-1 1:-0.0416667 2:1 3:-0.333333 4:-0.358491 5:-0.639269 6:1 7:-1 8:0.725191 9:-1 10:-1 11:-1 12:-1 13:-1 +-1 1:0.5 2:-1 3:0.333333 4:-0.132075 5:0.328767 6:1 7:1 8:0.312977 9:-1 10:-0.741935 11:-1 12:-0.333333 13:-1 +-1 1:0.416667 2:-1 3:-0.333333 4:-0.132075 5:-0.684932 6:-1 7:-1 8:0.648855 9:-1 10:-1 11:-1 12:0.333333 13:-1 +-1 1:-0.333333 2:-1 3:-0.333333 4:-0.320755 5:-0.506849 6:-1 7:1 8:0.587786 9:-1 10:-0.806452 12:-1 13:-1 +-1 1:-0.5 2:-1 3:-0.333333 4:-0.792453 5:-0.671233 6:-1 7:-1 8:0.480916 9:-1 10:-1 11:-1 12:-0.333333 13:-1 ++1 1:0.333333 2:1 3:1 4:-0.169811 5:-0.817352 6:-1 7:1 8:-0.175573 9:1 10:0.16129 12:-0.333333 13:-1 +-1 1:0.291667 2:-1 3:0.333333 4:-0.509434 5:-0.762557 6:1 7:-1 8:-0.618321 9:-1 10:-1 11:-1 12:-1 13:-1 ++1 1:0.25 2:-1 3:1 4:0.509434 5:-0.438356 6:-1 7:-1 8:0.0992366 9:1 10:-1 12:-1 13:-1 ++1 1:0.375 2:1 3:-0.333333 4:-0.509434 5:-0.292237 6:-1 7:1 8:-0.51145 9:-1 10:-0.548387 12:-0.333333 13:1 +-1 1:0.166667 2:1 3:0.333333 4:0.0566038 5:-1 6:1 7:-1 8:0.557252 9:-1 10:-0.935484 11:-1 12:-0.333333 13:1 ++1 1:-0.0833333 2:-1 3:1 4:-0.320755 5:-0.182648 6:-1 7:-1 8:0.0839695 9:1 10:-0.612903 12:-1 13:1 +-1 1:-0.375 2:1 3:0.333333 4:-0.509434 5:-0.543379 6:-1 7:-1 8:0.496183 9:-1 10:-1 11:-1 12:-1 13:-1 +-1 1:0.291667 2:-1 3:-1 4:0.0566038 5:-0.479452 6:-1 7:-1 8:0.526718 9:-1 10:-0.709677 11:-1 12:-1 13:-1 +-1 1:0.416667 2:1 3:-1 4:-0.0377358 5:-0.511416 6:1 7:1 8:0.206107 9:-1 10:-0.258065 11:1 12:-1 13:0.5 ++1 1:0.166667 2:1 3:1 4:0.0566038 5:-0.315068 6:-1 7:1 8:-0.374046 9:1 10:-0.806452 12:-0.333333 13:0.5 +-1 1:-0.0833333 2:1 3:1 4:-0.132075 5:-0.383562 6:-1 7:1 8:0.755725 9:1 10:-1 11:-1 12:-1 13:-1 ++1 1:0.208333 2:-1 3:-0.333333 4:-0.207547 5:-0.118721 6:1 7:1 8:0.236641 9:-1 10:-1 11:-1 12:0.333333 13:-1 +-1 1:-0.375 2:-1 3:0.333333 4:-0.54717 5:-0.47032 6:-1 7:-1 8:0.19084 9:-1 10:-0.903226 12:-0.333333 13:-1 ++1 1:-0.25 2:1 3:0.333333 4:-0.735849 5:-0.465753 6:-1 7:-1 8:0.236641 9:-1 10:-1 11:-1 12:-1 13:-1 ++1 1:0.333333 2:1 3:1 4:-0.509434 5:-0.388128 6:-1 7:-1 8:0.0534351 9:1 10:0.16129 12:-0.333333 13:1 +-1 1:0.166667 2:-1 3:1 4:-0.509434 5:0.0410959 6:-1 7:-1 8:0.40458 9:1 10:-0.806452 11:-1 12:-1 13:-1 +-1 1:0.708333 2:1 3:-0.333333 4:0.169811 5:-0.456621 6:-1 7:1 8:0.0992366 9:-1 10:-1 11:-1 12:-1 13:-1 +-1 1:0.958333 2:-1 3:0.333333 4:-0.132075 5:-0.675799 6:-1 8:-0.312977 9:-1 10:-0.645161 12:-1 13:-1 +-1 1:0.583333 2:-1 3:1 4:-0.773585 5:-0.557078 6:-1 7:-1 8:0.0839695 9:-1 10:-0.903226 11:-1 12:0.333333 13:-1 ++1 1:-0.333333 2:1 3:1 4:-0.0943396 5:-0.164384 6:-1 7:1 8:0.160305 9:1 10:-1 12:1 13:1 +-1 1:-0.333333 2:1 3:1 4:-0.811321 5:-0.625571 6:-1 7:1 8:0.175573 9:1 10:-0.0322581 12:-1 13:-1 +-1 1:-0.583333 2:-1 3:0.333333 4:-1 5:-0.666667 6:-1 7:-1 8:0.648855 9:-1 10:-1 11:-1 12:-1 13:-1 +-1 1:-0.458333 2:-1 3:0.333333 4:-0.509434 5:-0.621005 6:-1 7:-1 8:0.557252 9:-1 10:-1 12:-1 13:-1 +-1 1:0.125 2:1 3:-0.333333 4:-0.509434 5:-0.497717 6:-1 7:-1 8:0.633588 9:-1 10:-0.741935 11:-1 12:-1 13:-1 ++1 1:0.208333 2:1 3:1 4:-0.0188679 5:-0.579909 6:-1 7:-1 8:-0.480916 9:-1 10:-0.354839 12:-0.333333 13:1 ++1 1:-0.75 2:1 3:1 4:-0.509434 5:-0.671233 6:-1 7:-1 8:-0.0992366 9:1 10:-0.483871 12:-1 13:1 ++1 1:0.208333 2:1 3:1 4:0.0566038 5:-0.342466 6:-1 7:1 8:-0.389313 9:1 10:-0.741935 11:-1 12:-1 13:1 +-1 1:-0.5 2:1 3:0.333333 4:-0.320755 5:-0.598174 6:-1 7:1 8:0.480916 9:-1 10:-0.354839 12:-1 13:-1 +-1 1:0.166667 2:1 3:1 4:-0.698113 5:-0.657534 6:-1 7:-1 8:-0.160305 9:1 10:-0.516129 12:-1 13:0.5 +-1 1:-0.458333 2:1 3:-1 4:0.0188679 5:-0.461187 6:-1 7:1 8:0.633588 9:-1 10:-0.741935 11:-1 12:0.333333 13:-1 +-1 1:0.375 2:1 3:-0.333333 4:-0.358491 5:-0.625571 6:1 7:1 8:0.0534351 9:-1 10:-1 11:-1 12:-1 13:-1 +-1 1:0.25 2:1 3:-1 4:0.584906 5:-0.342466 6:-1 7:1 8:0.129771 9:-1 10:0.354839 11:1 12:-1 13:1 +-1 1:-0.5 2:-1 3:-0.333333 4:-0.396226 5:-0.178082 6:-1 7:-1 8:0.40458 9:-1 10:-1 11:-1 12:-1 13:-1 ++1 1:-0.125 2:1 3:1 4:0.0566038 5:-0.465753 6:-1 7:1 8:-0.129771 9:-1 10:-0.16129 12:-1 13:1 +-1 1:0.25 2:1 3:-0.333333 4:-0.132075 5:-0.56621 6:-1 7:-1 8:0.419847 9:1 10:-1 11:-1 12:-1 13:-1 ++1 1:0.333333 2:-1 3:1 4:-0.320755 5:-0.0684932 6:-1 7:1 8:0.496183 9:-1 10:-1 11:-1 12:-1 13:-1 ++1 1:0.0416667 2:1 3:1 4:-0.433962 5:-0.360731 6:-1 7:1 8:-0.419847 9:1 10:-0.290323 12:-0.333333 13:1 ++1 1:0.0416667 2:1 3:1 4:-0.698113 5:-0.634703 6:-1 7:1 8:-0.435115 9:1 10:-1 12:-0.333333 13:-1 ++1 1:-0.0416667 2:1 3:1 4:-0.415094 5:-0.607306 6:-1 7:-1 8:0.480916 9:-1 10:-0.677419 11:-1 12:0.333333 13:1 ++1 1:-0.25 2:1 3:1 4:-0.698113 5:-0.319635 6:-1 7:1 8:-0.282443 9:1 10:-0.677419 12:-0.333333 13:-1 +-1 1:0.541667 2:1 3:1 4:-0.509434 5:-0.196347 6:-1 7:1 8:0.221374 9:-1 10:-0.870968 12:-1 13:-1 ++1 1:0.208333 2:1 3:1 4:-0.886792 5:-0.506849 6:-1 7:-1 8:0.29771 9:-1 10:-0.967742 11:-1 12:-0.333333 13:1 +-1 1:0.458333 2:-1 3:0.333333 4:-0.132075 5:-0.146119 6:-1 7:-1 8:-0.0534351 9:-1 10:-0.935484 11:-1 12:-1 13:1 +-1 1:-0.125 2:-1 3:-0.333333 4:-0.509434 5:-0.461187 6:-1 7:-1 8:0.389313 9:-1 10:-0.645161 11:-1 12:-1 13:-1 +-1 1:-0.375 2:-1 3:0.333333 4:-0.735849 5:-0.931507 6:-1 7:-1 8:0.587786 9:-1 10:-0.806452 12:-1 13:-1 ++1 1:0.583333 2:1 3:1 4:-0.509434 5:-0.493151 6:-1 7:-1 8:-1 9:-1 10:-0.677419 12:-1 13:-1 +-1 1:-0.166667 2:-1 3:1 4:-0.320755 5:-0.347032 6:-1 7:-1 8:0.40458 9:-1 10:-1 11:-1 12:-1 13:-1 ++1 1:0.166667 2:1 3:1 4:0.339623 5:-0.255708 6:1 7:1 8:-0.19084 9:-1 10:-0.677419 12:1 13:1 ++1 1:0.416667 2:1 3:1 4:-0.320755 5:-0.415525 6:-1 7:1 8:0.160305 9:-1 10:-0.548387 12:-0.333333 13:1 ++1 1:-0.208333 2:1 3:1 4:-0.433962 5:-0.324201 6:-1 7:1 8:0.450382 9:-1 10:-0.83871 12:-1 13:1 +-1 1:-0.0833333 2:1 3:0.333333 4:-0.886792 5:-0.561644 6:-1 7:-1 8:0.0992366 9:1 10:-0.612903 12:-1 13:-1 ++1 1:0.291667 2:-1 3:1 4:0.0566038 5:-0.39726 6:-1 7:1 8:0.312977 9:-1 10:-0.16129 12:0.333333 13:1 ++1 1:0.25 2:1 3:1 4:-0.132075 5:-0.767123 6:-1 7:-1 8:0.389313 9:1 10:-1 11:-1 12:-0.333333 13:1 +-1 1:-0.333333 2:-1 3:-0.333333 4:-0.660377 5:-0.844749 6:-1 7:-1 8:0.0229008 9:-1 10:-1 12:-1 13:-1 ++1 1:0.0833333 2:-1 3:1 4:0.622642 5:-0.0821918 6:-1 8:-0.29771 9:1 10:0.0967742 12:-1 13:-1 +-1 1:-0.5 2:1 3:-0.333333 4:-0.698113 5:-0.502283 6:-1 7:-1 8:0.251908 9:-1 10:-1 11:-1 12:-1 13:-1 ++1 1:0.291667 2:-1 3:1 4:0.207547 5:-0.182648 6:-1 7:1 8:0.374046 9:-1 10:-1 11:-1 12:-1 13:-1 +-1 1:0.0416667 2:-1 3:0.333333 4:-0.226415 5:-0.187215 6:1 7:-1 8:0.51145 9:-1 10:-1 11:-1 12:-1 13:-1 +-1 1:-0.458333 2:1 3:-0.333333 4:-0.509434 5:-0.228311 6:-1 7:-1 8:0.389313 9:-1 10:-1 11:-1 12:-1 13:-1 +-1 1:-0.166667 2:-1 3:-0.333333 4:-0.245283 5:-0.3379 6:-1 7:-1 8:0.389313 9:-1 10:-1 12:-1 13:-1 ++1 1:-0.291667 2:1 3:1 4:-0.509434 5:-0.438356 6:-1 7:1 8:0.114504 9:-1 10:-0.741935 11:-1 12:-1 13:1 ++1 1:0.125 2:-1 3:1 4:1 5:-0.260274 6:1 7:1 8:-0.0534351 9:1 10:0.290323 11:1 12:0.333333 13:1 +-1 1:0.541667 2:-1 3:-1 4:0.0566038 5:-0.543379 6:-1 7:-1 8:-0.343511 9:-1 10:-0.16129 11:1 12:-1 13:-1 ++1 1:0.125 2:1 3:1 4:-0.320755 5:-0.283105 6:1 7:1 8:-0.51145 9:1 10:-0.483871 11:1 12:-1 13:1 ++1 1:-0.166667 2:1 3:0.333333 4:-0.509434 5:-0.716895 6:-1 7:-1 8:0.0381679 9:-1 10:-0.354839 12:1 13:1 ++1 1:0.0416667 2:1 3:1 4:-0.471698 5:-0.269406 6:-1 7:1 8:-0.312977 9:1 10:0.0322581 12:0.333333 13:-1 ++1 1:0.166667 2:1 3:1 4:0.0943396 5:-0.324201 6:-1 7:-1 8:-0.740458 9:1 10:-0.612903 12:-0.333333 13:1 +-1 1:0.5 2:-1 3:0.333333 4:0.245283 5:0.0684932 6:-1 7:1 8:0.221374 9:-1 10:-0.741935 11:-1 12:-1 13:-1 +-1 1:0.0416667 2:1 3:0.333333 4:-0.415094 5:-0.328767 6:-1 7:1 8:0.236641 9:-1 10:-0.83871 11:1 12:-0.333333 13:-1 +-1 1:0.0416667 2:-1 3:0.333333 4:0.245283 5:-0.657534 6:-1 7:-1 8:0.40458 9:-1 10:-1 11:-1 12:-0.333333 13:-1 ++1 1:0.375 2:1 3:1 4:-0.509434 5:-0.356164 6:-1 7:-1 8:-0.572519 9:1 10:-0.419355 12:0.333333 13:1 +-1 1:-0.0416667 2:-1 3:0.333333 4:-0.207547 5:-0.680365 6:-1 7:1 8:0.496183 9:-1 10:-0.967742 12:-1 13:-1 +-1 1:-0.0416667 2:1 3:-0.333333 4:-0.245283 5:-0.657534 6:-1 7:-1 8:0.328244 9:-1 10:-0.741935 11:-1 12:-0.333333 13:-1 ++1 1:0.291667 2:1 3:1 4:-0.566038 5:-0.525114 6:1 7:-1 8:0.358779 9:1 10:-0.548387 11:-1 12:0.333333 13:1 ++1 1:0.416667 2:-1 3:1 4:-0.735849 5:-0.347032 6:-1 7:-1 8:0.496183 9:1 10:-0.419355 12:0.333333 13:-1 ++1 1:0.541667 2:1 3:1 4:-0.660377 5:-0.607306 6:-1 7:1 8:-0.0687023 9:1 10:-0.967742 11:-1 12:-0.333333 13:-1 +-1 1:-0.458333 2:1 3:1 4:-0.132075 5:-0.543379 6:-1 7:-1 8:0.633588 9:-1 10:-1 11:-1 12:-1 13:-1 ++1 1:0.458333 2:1 3:1 4:-0.509434 5:-0.452055 6:-1 7:1 8:-0.618321 9:1 10:-0.290323 11:1 12:-0.333333 13:-1 +-1 1:0.0416667 2:1 3:0.333333 4:0.0566038 5:-0.515982 6:-1 7:1 8:0.435115 9:-1 10:-0.483871 11:-1 12:-1 13:1 +-1 1:-0.291667 2:-1 3:0.333333 4:-0.0943396 5:-0.767123 6:-1 7:1 8:0.358779 9:1 10:-0.548387 11:1 12:-1 13:-1 +-1 1:0.583333 2:-1 3:0.333333 4:0.0943396 5:-0.310502 6:-1 7:-1 8:0.541985 9:-1 10:-1 11:-1 12:-0.333333 13:-1 ++1 1:0.125 2:1 3:1 4:-0.415094 5:-0.438356 6:1 7:1 8:0.114504 9:1 10:-0.612903 12:-0.333333 13:-1 +-1 1:-0.791667 2:-1 3:-0.333333 4:-0.54717 5:-0.616438 6:-1 7:-1 8:0.847328 9:-1 10:-0.774194 11:-1 12:-1 13:-1 +-1 1:0.166667 2:1 3:1 4:-0.283019 5:-0.630137 6:-1 7:-1 8:0.480916 9:1 10:-1 11:-1 12:-1 13:1 ++1 1:0.458333 2:1 3:1 4:-0.0377358 5:-0.607306 6:-1 7:1 8:-0.0687023 9:-1 10:-0.354839 12:0.333333 13:0.5 +-1 1:0.25 2:1 3:1 4:-0.169811 5:-0.3379 6:-1 7:1 8:0.694656 9:-1 10:-1 11:-1 12:-1 13:-1 ++1 1:-0.125 2:1 3:0.333333 4:-0.132075 5:-0.511416 6:-1 7:-1 8:0.40458 9:-1 10:-0.806452 12:-0.333333 13:1 +-1 1:-0.0833333 2:1 3:-1 4:-0.415094 5:-0.60274 6:-1 7:1 8:-0.175573 9:1 10:-0.548387 11:-1 12:-0.333333 13:-1 ++1 1:0.0416667 2:1 3:-0.333333 4:0.849057 5:-0.283105 6:-1 7:1 8:0.89313 9:-1 10:-1 11:-1 12:-0.333333 13:1 ++1 2:1 3:1 4:-0.45283 5:-0.287671 6:-1 7:-1 8:-0.633588 9:1 10:-0.354839 12:0.333333 13:1 ++1 1:-0.0416667 2:1 3:1 4:-0.660377 5:-0.525114 6:-1 7:-1 8:0.358779 9:-1 10:-1 11:-1 12:-0.333333 13:-1 ++1 1:-0.541667 2:1 3:1 4:-0.698113 5:-0.812785 6:-1 7:1 8:-0.343511 9:1 10:-0.354839 12:-1 13:1 ++1 1:0.208333 2:1 3:0.333333 4:-0.283019 5:-0.552511 6:-1 7:1 8:0.557252 9:-1 10:0.0322581 11:-1 12:0.333333 13:1 +-1 1:-0.5 2:-1 3:0.333333 4:-0.660377 5:-0.351598 6:-1 7:1 8:0.541985 9:1 10:-1 11:-1 12:-1 13:-1 +-1 1:-0.5 2:1 3:0.333333 4:-0.660377 5:-0.43379 6:-1 7:-1 8:0.648855 9:-1 10:-1 11:-1 12:-1 13:-1 +-1 1:-0.125 2:-1 3:0.333333 4:-0.509434 5:-0.575342 6:-1 7:-1 8:0.328244 9:-1 10:-0.483871 12:-1 13:-1 +-1 1:0.0416667 2:-1 3:0.333333 4:-0.735849 5:-0.356164 6:-1 7:1 8:0.465649 9:-1 10:-1 11:-1 12:-1 13:-1 +-1 1:0.458333 2:-1 3:1 4:-0.320755 5:-0.191781 6:-1 7:-1 8:-0.221374 9:-1 10:-0.354839 12:0.333333 13:-1 +-1 1:-0.0833333 2:-1 3:0.333333 4:-0.320755 5:-0.406393 6:-1 7:1 8:0.19084 9:-1 10:-0.83871 11:-1 12:-1 13:-1 +-1 1:-0.291667 2:-1 3:-0.333333 4:-0.792453 5:-0.643836 6:-1 7:-1 8:0.541985 9:-1 10:-1 11:-1 12:-1 13:-1 ++1 1:0.0833333 2:1 3:1 4:-0.132075 5:-0.584475 6:-1 7:-1 8:-0.389313 9:1 10:0.806452 11:1 12:-1 13:1 +-1 1:-0.333333 2:1 3:-0.333333 4:-0.358491 5:-0.16895 6:-1 7:1 8:0.51145 9:-1 10:-1 11:-1 12:-1 13:-1 +-1 1:0.125 2:1 3:-1 4:-0.509434 5:-0.694064 6:-1 7:1 8:0.389313 9:-1 10:-0.387097 12:-1 13:1 ++1 1:0.541667 2:-1 3:1 4:0.584906 5:-0.534247 6:1 7:-1 8:0.435115 9:1 10:-0.677419 12:0.333333 13:1 ++1 1:-0.625 2:1 3:-1 4:-0.509434 5:-0.520548 6:-1 7:-1 8:0.694656 9:1 10:0.225806 12:-1 13:1 ++1 1:0.375 2:-1 3:1 4:0.0566038 5:-0.461187 6:-1 7:-1 8:0.267176 9:1 10:-0.548387 12:-1 13:-1 +-1 1:0.0833333 2:1 3:-0.333333 4:-0.320755 5:-0.378995 6:-1 7:-1 8:0.282443 9:-1 10:-1 11:-1 12:-1 13:-1 ++1 1:0.208333 2:1 3:1 4:-0.358491 5:-0.392694 6:-1 7:1 8:-0.0992366 9:1 10:-0.0322581 12:0.333333 13:1 +-1 1:-0.416667 2:1 3:1 4:-0.698113 5:-0.611872 6:-1 7:-1 8:0.374046 9:-1 10:-1 11:-1 12:-1 13:1 +-1 1:0.458333 2:-1 3:1 4:0.622642 5:-0.0913242 6:-1 7:-1 8:0.267176 9:1 10:-1 11:-1 12:-1 13:-1 +-1 1:-0.125 2:-1 3:1 4:-0.698113 5:-0.415525 6:-1 7:1 8:0.343511 9:-1 10:-1 11:-1 12:-1 13:-1 +-1 2:1 3:0.333333 4:-0.320755 5:-0.675799 6:1 7:1 8:0.236641 9:-1 10:-0.612903 11:1 12:-1 13:-1 +-1 1:-0.333333 2:-1 3:1 4:-0.169811 5:-0.497717 6:-1 7:1 8:0.236641 9:1 10:-0.935484 12:-1 13:-1 ++1 1:0.5 2:1 3:-1 4:-0.169811 5:-0.287671 6:1 7:1 8:0.572519 9:-1 10:-0.548387 12:-0.333333 13:-1 +-1 1:0.666667 2:1 3:-1 4:0.245283 5:-0.506849 6:1 7:1 8:-0.0839695 9:-1 10:-0.967742 12:-0.333333 13:-1 ++1 1:0.666667 2:1 3:0.333333 4:-0.132075 5:-0.415525 6:-1 7:1 8:0.145038 9:-1 10:-0.354839 12:1 13:1 ++1 1:0.583333 2:1 3:1 4:-0.886792 5:-0.210046 6:-1 7:1 8:-0.175573 9:1 10:-0.709677 12:0.333333 13:-1 +-1 1:0.625 2:-1 3:0.333333 4:-0.509434 5:-0.611872 6:-1 7:1 8:-0.328244 9:-1 10:-0.516129 12:-1 13:-1 +-1 1:-0.791667 2:1 3:-1 4:-0.54717 5:-0.744292 6:-1 7:1 8:0.572519 9:-1 10:-1 11:-1 12:-1 13:-1 ++1 1:0.375 2:-1 3:1 4:-0.169811 5:-0.232877 6:1 7:-1 8:-0.465649 9:-1 10:-0.387097 12:1 13:-1 ++1 1:-0.0833333 2:1 3:1 4:-0.132075 5:-0.214612 6:-1 7:-1 8:-0.221374 9:1 10:0.354839 12:1 13:1 ++1 1:-0.291667 2:1 3:0.333333 4:0.0566038 5:-0.520548 6:-1 7:-1 8:0.160305 9:-1 10:0.16129 12:-1 13:-1 ++1 1:0.583333 2:1 3:1 4:-0.415094 5:-0.415525 6:1 7:-1 8:0.40458 9:-1 10:-0.935484 12:0.333333 13:1 +-1 1:-0.125 2:1 3:0.333333 4:-0.339623 5:-0.680365 6:-1 7:-1 8:0.40458 9:-1 10:-1 11:-1 12:-1 13:-1 +-1 1:-0.458333 2:1 3:0.333333 4:-0.509434 5:-0.479452 6:1 7:-1 8:0.877863 9:-1 10:-0.741935 11:1 12:-1 13:1 ++1 1:0.125 2:-1 3:1 4:-0.245283 5:0.292237 6:-1 7:1 8:0.206107 9:1 10:-0.387097 12:0.333333 13:1 ++1 1:-0.5 2:1 3:1 4:-0.698113 5:-0.789954 6:-1 7:1 8:0.328244 9:-1 10:-1 11:-1 12:-1 13:1 +-1 1:-0.458333 2:-1 3:1 4:-0.849057 5:-0.365297 6:-1 7:1 8:-0.221374 9:-1 10:-0.806452 12:-1 13:-1 +-1 2:1 3:0.333333 4:-0.320755 5:-0.452055 6:1 7:1 8:0.557252 9:-1 10:-1 11:-1 12:1 13:-1 +-1 1:-0.416667 2:1 3:0.333333 4:-0.320755 5:-0.136986 6:-1 7:-1 8:0.389313 9:-1 10:-0.387097 11:-1 12:-0.333333 13:-1 ++1 1:0.125 2:1 3:1 4:-0.283019 5:-0.73516 6:-1 7:1 8:-0.480916 9:1 10:-0.322581 12:-0.333333 13:0.5 +-1 1:-0.0416667 2:1 3:1 4:-0.735849 5:-0.511416 6:1 7:-1 8:0.160305 9:-1 10:-0.967742 11:-1 12:1 13:1 +-1 1:0.375 2:-1 3:1 4:-0.132075 5:0.223744 6:-1 7:1 8:0.312977 9:-1 10:-0.612903 12:-1 13:-1 ++1 1:0.708333 2:1 3:0.333333 4:0.245283 5:-0.347032 6:-1 7:-1 8:-0.374046 9:1 10:-0.0645161 12:-0.333333 13:1 +-1 1:0.0416667 2:1 3:1 4:-0.132075 5:-0.484018 6:-1 7:-1 8:0.358779 9:-1 10:-0.612903 11:-1 12:-1 13:-1 ++1 1:0.708333 2:1 3:1 4:-0.0377358 5:-0.780822 6:-1 7:-1 8:-0.175573 9:1 10:-0.16129 11:1 12:-1 13:1 +-1 1:0.0416667 2:1 3:-0.333333 4:-0.735849 5:-0.164384 6:-1 7:-1 8:0.29771 9:-1 10:-1 11:-1 12:-1 13:1 ++1 1:-0.75 2:1 3:1 4:-0.396226 5:-0.287671 6:-1 7:1 8:0.29771 9:1 10:-1 11:-1 12:-1 13:1 +-1 1:-0.208333 2:1 3:0.333333 4:-0.433962 5:-0.410959 6:1 7:-1 8:0.587786 9:-1 10:-1 11:-1 12:0.333333 13:-1 +-1 1:0.0833333 2:-1 3:-0.333333 4:-0.226415 5:-0.43379 6:-1 7:1 8:0.374046 9:-1 10:-0.548387 12:-1 13:-1 +-1 1:0.208333 2:-1 3:1 4:-0.886792 5:-0.442922 6:-1 7:1 8:-0.221374 9:-1 10:-0.677419 12:-1 13:-1 +-1 1:0.0416667 2:-1 3:0.333333 4:-0.698113 5:-0.598174 6:-1 7:-1 8:0.328244 9:-1 10:-0.483871 12:-1 13:-1 +-1 1:0.666667 2:-1 3:-1 4:-0.132075 5:-0.484018 6:-1 7:-1 8:0.221374 9:-1 10:-0.419355 11:-1 12:0.333333 13:-1 ++1 1:1 2:1 3:1 4:-0.415094 5:-0.187215 6:-1 7:1 8:0.389313 9:1 10:-1 11:-1 12:1 13:-1 +-1 1:0.625 2:1 3:0.333333 4:-0.54717 5:-0.310502 6:-1 7:-1 8:0.221374 9:-1 10:-0.677419 11:-1 12:-0.333333 13:1 ++1 1:0.208333 2:1 3:1 4:-0.415094 5:-0.205479 6:-1 7:1 8:0.526718 9:-1 10:-1 11:-1 12:0.333333 13:1 ++1 1:0.291667 2:1 3:1 4:-0.415094 5:-0.39726 6:-1 7:1 8:0.0687023 9:1 10:-0.0967742 12:-0.333333 13:1 ++1 1:-0.0833333 2:1 3:1 4:-0.132075 5:-0.210046 6:-1 7:-1 8:0.557252 9:1 10:-0.483871 11:-1 12:-1 13:1 ++1 1:0.0833333 2:1 3:1 4:0.245283 5:-0.255708 6:-1 7:1 8:0.129771 9:1 10:-0.741935 12:-0.333333 13:1 +-1 1:-0.0416667 2:1 3:-1 4:0.0943396 5:-0.214612 6:1 7:-1 8:0.633588 9:-1 10:-0.612903 12:-1 13:1 +-1 1:0.291667 2:-1 3:0.333333 4:-0.849057 5:-0.123288 6:-1 7:-1 8:0.358779 9:-1 10:-1 11:-1 12:-0.333333 13:-1 +-1 1:0.208333 2:1 3:0.333333 4:-0.792453 5:-0.479452 6:-1 7:1 8:0.267176 9:1 10:-0.806452 12:-1 13:1 ++1 1:0.458333 2:1 3:0.333333 4:-0.415094 5:-0.164384 6:-1 7:-1 8:-0.0839695 9:1 10:-0.419355 12:-1 13:1 +-1 1:-0.666667 2:1 3:0.333333 4:-0.320755 5:-0.43379 6:-1 7:-1 8:0.770992 9:-1 10:0.129032 11:1 12:-1 13:-1 ++1 1:0.25 2:1 3:-1 4:0.433962 5:-0.260274 6:-1 7:1 8:0.343511 9:-1 10:-0.935484 12:-1 13:1 +-1 1:-0.0833333 2:1 3:0.333333 4:-0.415094 5:-0.456621 6:1 7:1 8:0.450382 9:-1 10:-0.225806 12:-1 13:-1 +-1 1:-0.416667 2:-1 3:0.333333 4:-0.471698 5:-0.60274 6:-1 7:-1 8:0.435115 9:-1 10:-0.935484 12:-1 13:-1 ++1 1:0.208333 2:1 3:1 4:-0.358491 5:-0.589041 6:-1 7:1 8:-0.0839695 9:1 10:-0.290323 12:1 13:1 +-1 1:-1 2:1 3:-0.333333 4:-0.320755 5:-0.643836 6:-1 7:1 8:1 9:-1 10:-1 11:-1 12:-1 13:-1 +-1 1:-0.5 2:-1 3:-0.333333 4:-0.320755 5:-0.643836 6:-1 7:1 8:0.541985 9:-1 10:-0.548387 11:-1 12:-1 13:-1 +-1 1:0.416667 2:-1 3:0.333333 4:-0.226415 5:-0.424658 6:-1 7:1 8:0.541985 9:-1 10:-1 11:-1 12:-1 13:-1 +-1 1:-0.0833333 2:1 3:0.333333 4:-1 5:-0.538813 6:-1 7:-1 8:0.267176 9:1 10:-1 11:-1 12:-0.333333 13:1 +-1 1:0.0416667 2:1 3:0.333333 4:-0.509434 5:-0.39726 6:-1 7:1 8:0.160305 9:-1 10:-0.870968 12:-1 13:1 +-1 1:-0.375 2:1 3:-0.333333 4:-0.509434 5:-0.570776 6:-1 7:-1 8:0.51145 9:-1 10:-1 11:-1 12:-1 13:-1 ++1 1:0.0416667 2:1 3:1 4:-0.698113 5:-0.484018 6:-1 7:-1 8:-0.160305 9:1 10:-0.0967742 12:-0.333333 13:1 ++1 1:0.5 2:1 3:1 4:-0.226415 5:-0.415525 6:-1 7:1 8:-0.145038 9:-1 10:-0.0967742 12:-0.333333 13:1 +-1 1:0.166667 2:1 3:0.333333 4:0.0566038 5:-0.808219 6:-1 7:-1 8:0.572519 9:-1 10:-0.483871 11:-1 12:-1 13:-1 ++1 1:0.416667 2:1 3:1 4:-0.320755 5:-0.0684932 6:1 7:1 8:-0.0687023 9:1 10:-0.419355 11:-1 12:1 13:1 +-1 1:-0.75 2:-1 3:1 4:-0.169811 5:-0.739726 6:-1 7:-1 8:0.694656 9:-1 10:-0.548387 11:-1 12:-1 13:-1 +-1 1:-0.5 2:1 3:-0.333333 4:-0.226415 5:-0.648402 6:-1 7:-1 8:-0.0687023 9:-1 10:-1 12:-1 13:0.5 ++1 1:0.375 2:-1 3:0.333333 4:-0.320755 5:-0.374429 6:-1 7:-1 8:-0.603053 9:-1 10:-0.612903 12:-0.333333 13:1 ++1 1:-0.416667 2:-1 3:1 4:-0.283019 5:-0.0182648 6:1 7:1 8:-0.00763359 9:1 10:-0.0322581 12:-1 13:1 +-1 1:0.208333 2:-1 3:-1 4:0.0566038 5:-0.283105 6:1 7:1 8:0.389313 9:-1 10:-0.677419 11:-1 12:-1 13:-1 +-1 1:-0.0416667 2:1 3:-1 4:-0.54717 5:-0.726027 6:-1 7:1 8:0.816794 9:-1 10:-1 12:-1 13:0.5 ++1 1:0.333333 2:-1 3:1 4:-0.0377358 5:-0.173516 6:-1 7:1 8:0.145038 9:1 10:-0.677419 12:-1 13:1 ++1 1:-0.583333 2:1 3:1 4:-0.54717 5:-0.575342 6:-1 7:-1 8:0.0534351 9:-1 10:-0.612903 12:-1 13:1 +-1 1:-0.333333 2:1 3:1 4:-0.603774 5:-0.388128 6:-1 7:1 8:0.740458 9:-1 10:-1 11:-1 12:-1 13:-1 ++1 1:-0.0416667 2:1 3:1 4:-0.358491 5:-0.410959 6:-1 7:-1 8:0.374046 9:1 10:-1 11:-1 12:-0.333333 13:1 +-1 1:0.375 2:1 3:0.333333 4:-0.320755 5:-0.520548 6:-1 7:-1 8:0.145038 9:-1 10:-0.419355 12:1 13:1 ++1 1:0.375 2:-1 3:1 4:0.245283 5:-0.826484 6:-1 7:1 8:0.129771 9:-1 10:1 11:1 12:1 13:1 +-1 2:-1 3:1 4:-0.169811 5:-0.506849 6:-1 7:1 8:0.358779 9:-1 10:-1 11:-1 12:-1 13:-1 ++1 1:-0.416667 2:1 3:1 4:-0.509434 5:-0.767123 6:-1 7:1 8:-0.251908 9:1 10:-0.193548 12:-1 13:1 +-1 1:-0.25 2:1 3:0.333333 4:-0.169811 5:-0.401826 6:-1 7:1 8:0.29771 9:-1 10:-1 11:-1 12:-1 13:-1 +-1 1:-0.0416667 2:1 3:-0.333333 4:-0.509434 5:-0.0913242 6:-1 7:-1 8:0.541985 9:-1 10:-0.935484 11:-1 12:-1 13:-1 ++1 1:0.625 2:1 3:0.333333 4:0.622642 5:-0.324201 6:1 7:1 8:0.206107 9:1 10:-0.483871 12:-1 13:1 +-1 1:-0.583333 2:1 3:0.333333 4:-0.132075 5:-0.109589 6:-1 7:1 8:0.694656 9:-1 10:-1 11:-1 12:-1 13:-1 +-1 2:-1 3:1 4:-0.320755 5:-0.369863 6:-1 7:1 8:0.0992366 9:-1 10:-0.870968 12:-1 13:-1 ++1 1:0.375 2:-1 3:1 4:-0.132075 5:-0.351598 6:-1 7:1 8:0.358779 9:-1 10:0.16129 11:1 12:0.333333 13:-1 +-1 1:-0.0833333 2:-1 3:0.333333 4:-0.132075 5:-0.16895 6:-1 7:1 8:0.0839695 9:-1 10:-0.516129 11:-1 12:-0.333333 13:-1 ++1 1:0.291667 2:1 3:1 4:-0.320755 5:-0.420091 6:-1 7:-1 8:0.114504 9:1 10:-0.548387 11:-1 12:-0.333333 13:1 ++1 1:0.5 2:1 3:1 4:-0.698113 5:-0.442922 6:-1 7:1 8:0.328244 9:-1 10:-0.806452 11:-1 12:0.333333 13:0.5 +-1 1:0.5 2:-1 3:0.333333 4:0.150943 5:-0.347032 6:-1 7:-1 8:0.175573 9:-1 10:-0.741935 11:-1 12:-1 13:-1 ++1 1:0.291667 2:1 3:0.333333 4:-0.132075 5:-0.730594 6:-1 7:1 8:0.282443 9:-1 10:-0.0322581 12:-1 13:-1 ++1 1:0.291667 2:1 3:1 4:-0.0377358 5:-0.287671 6:-1 7:1 8:0.0839695 9:1 10:-0.0967742 12:0.333333 13:1 ++1 1:0.0416667 2:1 3:1 4:-0.509434 5:-0.716895 6:-1 7:-1 8:-0.358779 9:-1 10:-0.548387 12:-0.333333 13:1 +-1 1:-0.375 2:1 3:-0.333333 4:-0.320755 5:-0.575342 6:-1 7:1 8:0.78626 9:-1 10:-1 11:-1 12:-1 13:-1 ++1 1:-0.375 2:1 3:1 4:-0.660377 5:-0.251142 6:-1 7:1 8:0.251908 9:-1 10:-1 11:-1 12:-0.333333 13:-1 +-1 1:-0.0833333 2:1 3:0.333333 4:-0.698113 5:-0.776256 6:-1 7:-1 8:-0.206107 9:-1 10:-0.806452 11:-1 12:-1 13:-1 +-1 1:0.25 2:1 3:0.333333 4:0.0566038 5:-0.607306 6:1 7:-1 8:0.312977 9:-1 10:-0.483871 11:-1 12:-1 13:-1 +-1 1:0.75 2:-1 3:-0.333333 4:0.245283 5:-0.196347 6:-1 7:-1 8:0.389313 9:-1 10:-0.870968 11:-1 12:0.333333 13:-1 +-1 1:0.333333 2:1 3:0.333333 4:0.0566038 5:-0.465753 6:1 7:-1 8:0.00763359 9:1 10:-0.677419 12:-1 13:-1 ++1 1:0.0833333 2:1 3:1 4:-0.283019 5:0.0365297 6:-1 7:-1 8:-0.0687023 9:1 10:-0.612903 12:-0.333333 13:1 ++1 1:0.458333 2:1 3:0.333333 4:-0.132075 5:-0.0456621 6:-1 7:-1 8:0.328244 9:-1 10:-1 11:-1 12:-1 13:-1 +-1 1:-0.416667 2:1 3:1 4:0.0566038 5:-0.447489 6:-1 7:-1 8:0.526718 9:-1 10:-0.516129 11:-1 12:-1 13:-1 +-1 1:0.208333 2:-1 3:0.333333 4:-0.509434 5:-0.0228311 6:-1 7:-1 8:0.541985 9:-1 10:-1 11:-1 12:-1 13:-1 ++1 1:0.291667 2:1 3:1 4:-0.320755 5:-0.634703 6:-1 7:1 8:-0.0687023 9:1 10:-0.225806 12:0.333333 13:1 ++1 1:0.208333 2:1 3:-0.333333 4:-0.509434 5:-0.278539 6:-1 7:1 8:0.358779 9:-1 10:-0.419355 12:-1 13:-1 +-1 1:-0.166667 2:1 3:-0.333333 4:-0.320755 5:-0.360731 6:-1 7:-1 8:0.526718 9:-1 10:-0.806452 11:-1 12:-1 13:-1 ++1 1:-0.208333 2:1 3:-0.333333 4:-0.698113 5:-0.52968 6:-1 7:-1 8:0.480916 9:-1 10:-0.677419 11:1 12:-1 13:1 +-1 1:-0.0416667 2:1 3:0.333333 4:0.471698 5:-0.666667 6:1 7:-1 8:0.389313 9:-1 10:-0.83871 11:-1 12:-1 13:1 +-1 1:-0.375 2:1 3:-0.333333 4:-0.509434 5:-0.374429 6:-1 7:-1 8:0.557252 9:-1 10:-1 11:-1 12:-1 13:1 +-1 1:0.125 2:-1 3:-0.333333 4:-0.132075 5:-0.232877 6:-1 7:1 8:0.251908 9:-1 10:-0.580645 12:-1 13:-1 +-1 1:0.166667 2:1 3:1 4:-0.132075 5:-0.69863 6:-1 7:-1 8:0.175573 9:-1 10:-0.870968 12:-1 13:0.5 ++1 1:0.583333 2:1 3:1 4:0.245283 5:-0.269406 6:-1 7:1 8:-0.435115 9:1 10:-0.516129 12:1 13:-1 diff --git a/linear.cpp b/linear.cpp new file mode 100644 index 0000000..22373ed --- /dev/null +++ b/linear.cpp @@ -0,0 +1,1446 @@ +#include +#include +#include +#include +#include +#include "linear.h" +#include "tron.h" +typedef signed char schar; +template inline void swap(T& x, T& y) { T t=x; x=y; y=t; } +#ifndef min +template inline T min(T x,T y) { return (x inline T max(T x,T y) { return (x>y)?x:y; } +#endif +template inline void clone(T*& dst, S* src, int n) +{ + dst = new T[n]; + memcpy((void *)dst,(void *)src,sizeof(T)*n); +} +#define Malloc(type,n) (type *)malloc((n)*sizeof(type)) +#define INF HUGE_VAL + +#if 1 +static void info(const char *fmt,...) +{ + va_list ap; + va_start(ap,fmt); + vprintf(fmt,ap); + va_end(ap); +} +static void info_flush() +{ + fflush(stdout); +} +#else +static void info(char *fmt,...) {} +static void info_flush() {} +#endif + +class l2_lr_fun : public function +{ +public: + l2_lr_fun(const problem *prob, double Cp, double Cn); + ~l2_lr_fun(); + + double fun(double *w); + void grad(double *w, double *g); + void Hv(double *s, double *Hs); + + int get_nr_variable(void); + +private: + void Xv(double *v, double *Xv); + void XTv(double *v, double *XTv); + + double *C; + double *z; + double *D; + const problem *prob; +}; + +l2_lr_fun::l2_lr_fun(const problem *prob, double Cp, double Cn) +{ + int i; + int l=prob->l; + int *y=prob->y; + + this->prob = prob; + + z = new double[l]; + D = new double[l]; + C = new double[l]; + + for (i=0; iy; + int l=prob->l; + int n=prob->n; + + Xv(w, z); + for(i=0;i= 0) + f += C[i]*log(1 + exp(-yz)); + else + f += C[i]*(-yz+log(1 + exp(yz))); + } + f = 2*f; + for(i=0;iy; + int l=prob->l; + int n=prob->n; + + for(i=0;in; +} + +void l2_lr_fun::Hv(double *s, double *Hs) +{ + int i; + int l=prob->l; + int n=prob->n; + double *wa = new double[l]; + + Xv(s, wa); + for(i=0;il; + feature_node **x=prob->x; + + for(i=0;iindex!=-1) + { + Xv[i]+=v[s->index-1]*s->value; + s++; + } + } +} + +void l2_lr_fun::XTv(double *v, double *XTv) +{ + int i; + int l=prob->l; + int n=prob->n; + feature_node **x=prob->x; + + for(i=0;iindex!=-1) + { + XTv[s->index-1]+=v[i]*s->value; + s++; + } + } +} + +class l2loss_svm_fun : public function +{ +public: + l2loss_svm_fun(const problem *prob, double Cp, double Cn); + ~l2loss_svm_fun(); + + double fun(double *w); + void grad(double *w, double *g); + void Hv(double *s, double *Hs); + + int get_nr_variable(void); + +private: + void Xv(double *v, double *Xv); + void subXv(double *v, double *Xv); + void subXTv(double *v, double *XTv); + + double *C; + double *z; + double *D; + int *I; + int sizeI; + const problem *prob; +}; + +l2loss_svm_fun::l2loss_svm_fun(const problem *prob, double Cp, double Cn) +{ + int i; + int l=prob->l; + int *y=prob->y; + + this->prob = prob; + + z = new double[l]; + D = new double[l]; + C = new double[l]; + I = new int[l]; + + for (i=0; iy; + int l=prob->l; + int n=prob->n; + + Xv(w, z); + for(i=0;i 0) + f += C[i]*d*d; + } + f = 2*f; + for(i=0;iy; + int l=prob->l; + int n=prob->n; + + sizeI = 0; + for (i=0;in; +} + +void l2loss_svm_fun::Hv(double *s, double *Hs) +{ + int i; + int l=prob->l; + int n=prob->n; + double *wa = new double[l]; + + subXv(s, wa); + for(i=0;il; + feature_node **x=prob->x; + + for(i=0;iindex!=-1) + { + Xv[i]+=v[s->index-1]*s->value; + s++; + } + } +} + +void l2loss_svm_fun::subXv(double *v, double *Xv) +{ + int i; + feature_node **x=prob->x; + + for(i=0;iindex!=-1) + { + Xv[i]+=v[s->index-1]*s->value; + s++; + } + } +} + +void l2loss_svm_fun::subXTv(double *v, double *XTv) +{ + int i; + int n=prob->n; + feature_node **x=prob->x; + + for(i=0;iindex!=-1) + { + XTv[s->index-1]+=v[i]*s->value; + s++; + } + } +} + +// A coordinate descent algorithm for +// multi-class support vector machines by Crammer and Singer +// +// min_{\alpha} 0.5 \sum_m ||w_m(\alpha)||^2 + \sum_i \sum_m e^m_i alpha^m_i +// s.t. \alpha^m_i <= C^m_i \forall m,i , \sum_m \alpha^m_i=0 \forall i +// +// where e^m_i = 0 if y_i = m, +// e^m_i = 1 if y_i != m, +// C^m_i = C if m = y_i, +// C^m_i = 0 if m != y_i, +// and w_m(\alpha) = \sum_i \alpha^m_i x_i +// +// Given: +// x, y, C +// eps is the stopping tolerance +// +// solution will be put in w +class Solver_MCSVM_CS +{ + public: + Solver_MCSVM_CS(const problem *prob, int nr_class, double *C, double eps=0.1, int max_iter=100000); + ~Solver_MCSVM_CS(); + void Solve(double *w); + private: + void solve_sub_problem(double A_i, int yi, double C_yi, int active_i, double *alpha_new); + bool be_shrunk(int m, int yi, double alpha_i, double minG); + double *B, *C, *G; + int n, l; + int nr_class; + int max_iter; + double eps; + const problem *prob; +}; + +Solver_MCSVM_CS::Solver_MCSVM_CS(const problem *prob, int nr_class, double *C, double eps, int max_iter) +{ + this->n = prob->n; + this->l = prob->l; + this->nr_class = nr_class; + this->eps = eps; + this->max_iter = max_iter; + this->prob = prob; + this->C = C; + this->B = new double[nr_class]; + this->G = new double[nr_class]; +} + +Solver_MCSVM_CS::~Solver_MCSVM_CS() +{ + delete[] B; + delete[] G; +} + +int compare_double(const void *a, const void *b) +{ + if(*(double *)a > *(double *)b) + return -1; + if(*(double *)a < *(double *)b) + return 1; + return 0; +} + +void Solver_MCSVM_CS::solve_sub_problem(double A_i, int yi, double C_yi, int active_i, double *alpha_new) +{ + int r; + double *D; + + clone(D, B, active_i); + if(yi < active_i) + D[yi] += A_i*C_yi; + qsort(D, active_i, sizeof(double), compare_double); + + double beta = D[0] - A_i*C_yi; + for(r=1;rx[i]; + QD[i] = 0; + while(xi->index != -1) + { + QD[i] += (xi->value)*(xi->value); + xi++; + } + active_size_i[i] = nr_class; + y_index[i] = prob->y[i]; + index[i] = i; + } + + while(iter < max_iter) + { + double stopping = -INF; + for(i=0;i 0) + { + for(m=0;mx[i]; + while(xi->index!= -1) + { + double *w_i = &w[(xi->index-1)*nr_class]; + for(m=0;mvalue); + xi++; + } + + double minG = INF; + double maxG = -INF; + for(m=0;m maxG) + maxG = G[m]; + } + if(y_index[i] < active_size_i[i]) + if(alpha_i[prob->y[i]] < C[prob->y[i]] && G[y_index[i]] < minG) + minG = G[y_index[i]]; + + for(m=0;mm) + { + if(!be_shrunk(active_size_i[i], y_index[i], + alpha_i[alpha_index_i[active_size_i[i]]], minG)) + { + swap(alpha_index_i[m], alpha_index_i[active_size_i[i]]); + swap(G[m], G[active_size_i[i]]); + if(y_index[i] == active_size_i[i]) + y_index[i] = m; + else if(y_index[i] == m) + y_index[i] = active_size_i[i]; + break; + } + active_size_i[i]--; + } + } + } + + if(active_size_i[i] <= 1) + { + active_size--; + swap(index[s], index[active_size]); + s--; + continue; + } + + if(maxG-minG <= 1e-12) + continue; + else + stopping = max(maxG - minG, stopping); + + for(m=0;my[i]], active_size_i[i], alpha_new); + int nz_d = 0; + for(m=0;m= 1e-12) + { + d_ind[nz_d] = alpha_index_i[m]; + d_val[nz_d] = d; + nz_d++; + } + } + + xi = prob->x[i]; + while(xi->index != -1) + { + double *w_i = &w[(xi->index-1)*nr_class]; + for(m=0;mvalue; + xi++; + } + } + } + + iter++; + if(iter % 10 == 0) + { + info("."); + info_flush(); + } + + if(stopping < eps_shrink) + { + if(stopping < eps && start_from_all == true) + break; + else + { + active_size = l; + for(i=0;i= max_iter) + info("Warning: reaching max number of iterations\n"); + + // calculate objective value + double v = 0; + int nSV = 0; + for(i=0;i 0) + nSV++; + } + for(i=0;iy[i]]; + info("Objective value = %lf\n",v); + info("nSV = %d\n",nSV); + + delete [] alpha; + delete [] alpha_new; + delete [] index; + delete [] QD; + delete [] d_ind; + delete [] d_val; + delete [] alpha_index; + delete [] y_index; + delete [] active_size_i; +} + +// A coordinate descent algorithm for +// L1-loss and L2-loss SVM dual problems +// +// min_\alpha 0.5(\alpha^T (Q + D)\alpha) - e^T \alpha, +// s.t. 0 <= alpha_i <= upper_bound_i, +// +// where Qij = yi yj xi^T xj and +// D is a diagonal matrix +// +// In L1-SVM case: +// upper_bound_i = Cp if y_i = 1 +// upper_bound_i = Cn if y_i = -1 +// D_ii = 0 +// In L2-Svm case: +// upper_bound_i = INF +// D_ii = 1/(2*Cp) if y_i = 1 +// D_ii = 1/(2*Cn) if y_i = -1 +// +// Given: +// x, y, Cp, Cn +// eps is the stopping tolerance +// +// solution will be put in w + +static void solve_linear_c_svc( + const problem *prob, double *w, double eps, + double Cp, double Cn, int solver_type) +{ + int l = prob->l; + int n = prob->n; + int i, s, iter = 0; + double C, d, G; + double *QD = new double[l]; + int max_iter = 20000; + int *index = new int[l]; + double *alpha = new double[l]; + schar *y = new schar[l]; + int active_size = l; + + // PG: projected gradient, for shrinking and stopping + double PG; + double PGmax_old = INF; + double PGmin_old = -INF; + double PGmax_new, PGmin_new; + + // default solver_type: L2LOSS_SVM_DUAL + double diag_p = 0.5/Cp, diag_n = 0.5/Cn; + double upper_bound_p = INF, upper_bound_n = INF; + if(solver_type == L1LOSS_SVM_DUAL) + { + diag_p = 0; diag_n = 0; + upper_bound_p = Cp; upper_bound_n = Cn; + } + + for(i=0; iy[i] > 0) + { + y[i] = +1; + QD[i] = diag_p; + } + else + { + y[i] = -1; + QD[i] = diag_n; + } + + feature_node *xi = prob->x[i]; + while (xi->index != -1) + { + QD[i] += (xi->value)*(xi->value); + xi++; + } + index[i] = i; + } + + while (iter < max_iter) + { + PGmax_new = -INF; + PGmin_new = INF; + + for (i=0; ix[i]; + while(xi->index!= -1) + { + G += w[xi->index-1]*(xi->value); + xi++; + } + G = G*yi-1; + + if(yi == 1) + { + C = upper_bound_p; + G += alpha[i]*diag_p; + } + else + { + C = upper_bound_n; + G += alpha[i]*diag_n; + } + + PG = 0; + if (alpha[i] == 0) + { + if (G > PGmax_old) + { + active_size--; + swap(index[s], index[active_size]); + s--; + continue; + } + else if (G < 0) + PG = G; + } + else if (alpha[i] == C) + { + if (G < PGmin_old) + { + active_size--; + swap(index[s], index[active_size]); + s--; + continue; + } + else if (G > 0) + PG = G; + } + else + PG = G; + + PGmax_new = max(PGmax_new, PG); + PGmin_new = min(PGmin_new, PG); + + if(fabs(PG) > 1.0e-12) + { + double alpha_old = alpha[i]; + alpha[i] = min(max(alpha[i] - G/QD[i], 0.0), C); + d = (alpha[i] - alpha_old)*yi; + xi = prob->x[i]; + while (xi->index != -1) + { + w[xi->index-1] += d*xi->value; + xi++; + } + } + } + + iter++; + if(iter % 10 == 0) + { + info("."); + info_flush(); + } + + if(PGmax_new - PGmin_new <= eps) + { + if(active_size == l) + break; + else + { + active_size = l; + info("*"); info_flush(); + PGmax_old = INF; + PGmin_old = -INF; + continue; + } + } + PGmax_old = PGmax_new; + PGmin_old = PGmin_new; + if (PGmax_old <= 0) + PGmax_old = INF; + if (PGmin_old >= 0) + PGmin_old = -INF; + } + + info("\noptimization finished, #iter = %d\n",iter); + if (iter >= max_iter) + info("Warning: reaching max number of iterations\n"); + + // calculate objective value + + double v = 0; + int nSV = 0; + for(i=0; i 0) + ++nSV; + } + info("Objective value = %lf\n",v/2); + info("nSV = %d\n",nSV); + + delete [] QD; + delete [] alpha; + delete [] y; + delete [] index; +} + +// label: label name, start: begin of each class, count: #data of classes, perm: indices to the original data +// perm, length l, must be allocated before calling this subroutine +void group_classes(const problem *prob, int *nr_class_ret, int **label_ret, int **start_ret, int **count_ret, int *perm) +{ + int l = prob->l; + int max_nr_class = 16; + int nr_class = 0; + int *label = Malloc(int,max_nr_class); + int *count = Malloc(int,max_nr_class); + int *data_label = Malloc(int,l); + int i; + + for(i=0;iy[i]; + int j; + for(j=0;jeps; + int pos = 0; + int neg = 0; + for(int i=0;il;i++) + if(prob->y[i]==+1) + pos++; + neg = prob->l - pos; + + function *fun_obj=NULL; + switch(param->solver_type) + { + case L2_LR: + { + fun_obj=new l2_lr_fun(prob, Cp, Cn); + TRON tron_obj(fun_obj, eps*min(pos,neg)/prob->l); + tron_obj.tron(w); + delete fun_obj; + break; + } + case L2LOSS_SVM: + { + fun_obj=new l2loss_svm_fun(prob, Cp, Cn); + TRON tron_obj(fun_obj, eps*min(pos,neg)/prob->l); + tron_obj.tron(w); + delete fun_obj; + break; + } + case L2LOSS_SVM_DUAL: + solve_linear_c_svc(prob, w, eps, Cp, Cn, L2LOSS_SVM_DUAL); + break; + case L1LOSS_SVM_DUAL: + solve_linear_c_svc(prob, w, eps, Cp, Cn, L1LOSS_SVM_DUAL); + break; + default: + fprintf(stderr, "Error: unknown solver_type\n"); + break; + } +} + +// +// Interface functions +// +model* train(const problem *prob, const parameter *param) +{ + int i,j; + int l = prob->l; + int n = prob->n; + model *model_ = Malloc(model,1); + + if(prob->bias>=0) + model_->nr_feature=n-1; + else + model_->nr_feature=n; + model_->param = *param; + model_->bias = prob->bias; + + int nr_class; + int *label = NULL; + int *start = NULL; + int *count = NULL; + int *perm = Malloc(int,l); + + // group training data of the same class + group_classes(prob,&nr_class,&label,&start,&count,perm); + + model_->nr_class=nr_class; + model_->label = Malloc(int,nr_class); + for(i=0;ilabel[i] = label[i]; + + // calculate weighted C + double *weighted_C = Malloc(double, nr_class); + for(i=0;iC; + for(i=0;inr_weight;i++) + { + for(j=0;jweight_label[i] == label[j]) + break; + if(j == nr_class) + fprintf(stderr,"warning: class label %d specified in weight is not found\n", param->weight_label[i]); + else + weighted_C[j] *= param->weight[i]; + } + + // constructing the subproblem + feature_node **x = Malloc(feature_node *,l); + for(i=0;ix[perm[i]]; + + int k; + problem sub_prob; + sub_prob.l = l; + sub_prob.n = n; + sub_prob.x = Malloc(feature_node *,sub_prob.l); + sub_prob.y = Malloc(int,sub_prob.l); + + for(k=0; ksolver_type == MCSVM_CS) + { + model_->w=Malloc(double, n*nr_class); + for(i=0;ieps); + Solver.Solve(model_->w); + } + else + { + if(nr_class == 2) + { + model_->w=Malloc(double, n); + + int e0 = start[0]+count[0]; + k=0; + for(; kw[0], weighted_C[0], weighted_C[1]); + } + else + { + model_->w=Malloc(double, n*nr_class); + double *w=Malloc(double, n); + for(i=0;iC); + + for(int j=0;jw[j*nr_class+i] = w[j]; + } + free(w); + } + + } + + free(x); + free(label); + free(start); + free(count); + free(perm); + free(sub_prob.x); + free(sub_prob.y); + free(weighted_C); + return model_; +} + +void destroy_model(struct model *model_) +{ + if(model_->w != NULL) + free(model_->w); + if(model_->label != NULL) + free(model_->label); + free(model_); +} + +const char *solver_type_table[]= +{ + "L2_LR", "L2LOSS_SVM_DUAL", "L2LOSS_SVM","L1LOSS_SVM_DUAL","MCSVM_CS", NULL +}; + +int save_model(const char *model_file_name, const struct model *model_) +{ + int i; + int nr_feature=model_->nr_feature; + int n; + const parameter& param = model_->param; + + if(model_->bias>=0) + n=nr_feature+1; + else + n=nr_feature; + FILE *fp = fopen(model_file_name,"w"); + if(fp==NULL) return -1; + + int nr_w; + if(model_->nr_class==2 && model_->param.solver_type != MCSVM_CS) + nr_w=1; + else + nr_w=model_->nr_class; + + fprintf(fp, "solver_type %s\n", solver_type_table[param.solver_type]); + fprintf(fp, "nr_class %d\n", model_->nr_class); + fprintf(fp, "label"); + for(i=0; inr_class; i++) + fprintf(fp, " %d", model_->label[i]); + fprintf(fp, "\n"); + + fprintf(fp, "nr_feature %d\n", nr_feature); + + fprintf(fp, "bias %.16g\n", model_->bias); + + fprintf(fp, "w\n"); + for(i=0; iw[i*nr_w+j]); + fprintf(fp, "\n"); + } + + if (ferror(fp) != 0 || fclose(fp) != 0) return -1; + else return 0; +} + +struct model *load_model(const char *model_file_name) +{ + FILE *fp = fopen(model_file_name,"r"); + if(fp==NULL) return NULL; + + int i; + int nr_feature; + int n; + int nr_class; + double bias; + model *model_ = Malloc(model,1); + parameter& param = model_->param; + + model_->label = NULL; + + char cmd[81]; + while(1) + { + fscanf(fp,"%80s",cmd); + if(strcmp(cmd,"solver_type")==0) + { + fscanf(fp,"%80s",cmd); + int i; + for(i=0;solver_type_table[i];i++) + { + if(strcmp(solver_type_table[i],cmd)==0) + { + param.solver_type=i; + break; + } + } + if(solver_type_table[i] == NULL) + { + fprintf(stderr,"unknown solver type.\n"); + free(model_->label); + free(model_); + return NULL; + } + } + else if(strcmp(cmd,"nr_class")==0) + { + fscanf(fp,"%d",&nr_class); + model_->nr_class=nr_class; + } + else if(strcmp(cmd,"nr_feature")==0) + { + fscanf(fp,"%d",&nr_feature); + model_->nr_feature=nr_feature; + } + else if(strcmp(cmd,"bias")==0) + { + fscanf(fp,"%lf",&bias); + model_->bias=bias; + } + else if(strcmp(cmd,"w")==0) + { + break; + } + else if(strcmp(cmd,"label")==0) + { + int nr_class = model_->nr_class; + model_->label = Malloc(int,nr_class); + for(int i=0;ilabel[i]); + } + else + { + fprintf(stderr,"unknown text in model file: [%s]\n",cmd); + free(model_); + return NULL; + } + } + + nr_feature=model_->nr_feature; + if(model_->bias>=0) + n=nr_feature+1; + else + n=nr_feature; + + int nr_w; + if(nr_class==2 && param.solver_type != MCSVM_CS) + nr_w = 1; + else + nr_w = nr_class; + + model_->w=Malloc(double, n*nr_w); + for(i=0; iw[i*nr_w+j]); + fscanf(fp, "\n"); + } + if (ferror(fp) != 0 || fclose(fp) != 0) return NULL; + + return model_; +} + +int predict_values(const struct model *model_, const struct feature_node *x, double *dec_values) +{ + int idx; + int n; + if(model_->bias>=0) + n=model_->nr_feature+1; + else + n=model_->nr_feature; + double *w=model_->w; + int nr_class=model_->nr_class; + int i; + int nr_w; + if(nr_class==2 && model_->param.solver_type != MCSVM_CS) + nr_w = 1; + else + nr_w = nr_class; + + const feature_node *lx=x; + for(i=0;iindex)!=-1; lx++) + { + // the dimension of testing data may exceed that of training + if(idx<=n) + for(i=0;ivalue; + } + + if(nr_class==2) + return (dec_values[0]>0)?model_->label[0]:model_->label[1]; + else + { + int dec_max_idx = 0; + for(i=1;i dec_values[dec_max_idx]) + dec_max_idx = i; + } + return model_->label[dec_max_idx]; + } +} + +int predict(const model *model_, const feature_node *x) +{ + double *dec_values = Malloc(double, model_->nr_class); + int label=predict_values(model_, x, dec_values); + free(dec_values); + return label; +} + +int predict_probability(const struct model *model_, const struct feature_node *x, double* prob_estimates) +{ + if(model_->param.solver_type==L2_LR) + { + int i; + int nr_class=model_->nr_class; + int nr_w; + if(nr_class==2) + nr_w = 1; + else + nr_w = nr_class; + + int label=predict_values(model_, x, prob_estimates); + for(i=0;iweight_label != NULL) + free(param->weight_label); + if(param->weight != NULL) + free(param->weight); +} + +const char *check_parameter(const problem *prob, const parameter *param) +{ + if(param->eps <= 0) + return "eps <= 0"; + + if(param->C <= 0) + return "C <= 0"; + + if(param->solver_type != L2_LR + && param->solver_type != L2LOSS_SVM_DUAL + && param->solver_type != L2LOSS_SVM + && param->solver_type != L1LOSS_SVM_DUAL + && param->solver_type != MCSVM_CS) + return "unknown solver type"; + + return NULL; +} + +void cross_validation(const problem *prob, const parameter *param, int nr_fold, int *target) +{ + int i; + int *fold_start = Malloc(int,nr_fold+1); + int l = prob->l; + int *perm = Malloc(int,l); + + for(i=0;ibias; + subprob.n = prob->n; + subprob.l = l-(end-begin); + subprob.x = Malloc(struct feature_node*,subprob.l); + subprob.y = Malloc(int,subprob.l); + + k=0; + for(j=0;jx[perm[j]]; + subprob.y[k] = prob->y[perm[j]]; + ++k; + } + for(j=end;jx[perm[j]]; + subprob.y[k] = prob->y[perm[j]]; + ++k; + } + struct model *submodel = train(&subprob,param); + for(j=begin;jx[perm[j]]); + destroy_model(submodel); + free(subprob.x); + free(subprob.y); + } + free(fold_start); + free(perm); +} + +int get_nr_feature(const model *model_) +{ + return model_->nr_feature; +} + +int get_nr_class(const model *model_) +{ + return model_->nr_class; +} + +void get_labels(const model *model_, int* label) +{ + if (model_->label != NULL) + for(int i=0;inr_class;i++) + label[i] = model_->label[i]; +} + diff --git a/linear.h b/linear.h new file mode 100644 index 0000000..5031d5a --- /dev/null +++ b/linear.h @@ -0,0 +1,69 @@ +#ifndef _LIBLINEAR_H +#define _LIBLINEAR_H + +#ifdef __cplusplus +extern "C" { +#endif + +struct feature_node +{ + int index; + double value; +}; + +struct problem +{ + int l, n; + int *y; + struct feature_node **x; + double bias; /* < 0 if no bias term */ +}; + +enum { L2_LR, L2LOSS_SVM_DUAL, L2LOSS_SVM, L1LOSS_SVM_DUAL, MCSVM_CS }; /* solver_type */ + +struct parameter +{ + int solver_type; + + /* these are for training only */ + double eps; /* stopping criteria */ + double C; + int nr_weight; + int *weight_label; + double* weight; +}; + +struct model +{ + struct parameter param; + int nr_class; /* number of classes */ + int nr_feature; + double *w; + int *label; /* label of each class (label[n]) */ + double bias; +}; + +struct model* train(const struct problem *prob, const struct parameter *param); +void cross_validation(const struct problem *prob, const struct parameter *param, int nr_fold, int *target); + +int predict_values(const struct model *model_, const struct feature_node *x, double* dec_values); +int predict(const struct model *model_, const struct feature_node *x); +int predict_probability(const struct model *model_, const struct feature_node *x, double* prob_estimates); + +int save_model(const char *model_file_name, const struct model *model_); +struct model *load_model(const char *model_file_name); + +int get_nr_feature(const struct model *model_); +int get_nr_class(const struct model *model_); +void get_labels(const struct model *model_, int* label); + +void destroy_model(struct model *model_); +void destroy_param(struct parameter *param); +const char *check_parameter(const struct problem *prob, const struct parameter *param); + +#ifdef __cplusplus +} +#endif + +#endif /* _LIBLINEAR_H */ + diff --git a/matlab/Makefile b/matlab/Makefile new file mode 100644 index 0000000..e44cd1e --- /dev/null +++ b/matlab/Makefile @@ -0,0 +1,58 @@ +# This Makefile is used under Linux + +MATLABDIR ?= /usr/local/matlab +CXX ?= g++ +#CXX = g++-3.3 +CC ?= gcc +CFLAGS = -Wall -Wconversion -O3 -fPIC -I$(MATLABDIR)/extern/include -I.. + +MEX = $(MATLABDIR)/bin/mex +MEX_OPTION = CC\#$(CXX) CXX\#$(CXX) CFLAGS\#"$(CFLAGS)" CXXFLAGS\#"$(CFLAGS)" +# comment the following line if you use MATLAB on a 32-bit computer +MEX_OPTION += -largeArrayDims +MEX_EXT = $(shell $(MATLABDIR)/bin/mexext) + +OCTAVEDIR ?= /usr/include/octave +OCTAVE_MEX = env CC=$(CXX) mkoctfile +OCTAVE_MEX_OPTION = --mex +OCTAVE_MEX_EXT = mex +OCTAVE_CFLAGS = -Wall -O3 -fPIC -I$(OCTAVEDIR) -I.. + +all: matlab + +matlab: binary + +octave: + @make MEX="$(OCTAVE_MEX)" MEX_OPTION="$(OCTAVE_MEX_OPTION)" \ + MEX_EXT="$(OCTAVE_MEX_EXT)" CFLAGS="$(OCTAVE_CFLAGS)" \ + binary + +binary: train.$(MEX_EXT) predict.$(MEX_EXT) libsvmread.$(MEX_EXT) libsvmwrite.$(MEX_EXT) + +train.$(MEX_EXT): train.c ../linear.h tron.o linear.o linear_model_matlab.o ../blas/blas.a + $(MEX) $(MEX_OPTION) train.c tron.o linear.o linear_model_matlab.o ../blas/blas.a + +predict.$(MEX_EXT): predict.c ../linear.h tron.o linear.o linear_model_matlab.o ../blas/blas.a + $(MEX) $(MEX_OPTION) predict.c tron.o linear.o linear_model_matlab.o ../blas/blas.a + +libsvmread.$(MEX_EXT): libsvmread.c + $(MEX) $(MEX_OPTION) libsvmread.c + +libsvmwrite.$(MEX_EXT): libsvmwrite.c + $(MEX) $(MEX_OPTION) libsvmwrite.c + +linear_model_matlab.o: linear_model_matlab.c ../linear.h + $(CXX) $(CFLAGS) -c linear_model_matlab.c + +linear.o: ../linear.cpp ../linear.h + $(CXX) $(CFLAGS) -c ../linear.cpp + +tron.o: ../tron.cpp ../tron.h + $(CXX) $(CFLAGS) -c ../tron.cpp + +../blas/blas.a: + cd ../blas; make OPTFLAGS='$(CFLAGS)' CC='$(CC)'; + +clean: + cd ../blas; make clean + rm -f *~ *.o *.mex* *.obj diff --git a/matlab/README b/matlab/README new file mode 100644 index 0000000..d20884c --- /dev/null +++ b/matlab/README @@ -0,0 +1,184 @@ +-------------------------------------------- +--- MATLAB/OCTAVE interface of LIBLINEAR --- +-------------------------------------------- + +Table of Contents +================= + +- Introduction +- Installation +- Usage +- Returned Model Structure +- Examples +- Other Utilities +- Additional Information + + +Introduction +============ + +This tool provides a simple interface to LIBLINEAR, a library for +large-scale regularized linear classification +(http://www.csie.ntu.edu.tw/~cjlin/liblinear). It is very easy to use +as the usage and the way of specifying parameters are the same as that +of LIBLINEAR. + +Installation +============ + +On Unix systems, we recommend using GNU g++ as your compiler and type +'make' to build 'train.mexglx' and 'predict.mexglx'. Note that we +assume your MATLAB is installed in '/usr/local/matlab', if not, please +change MATLABDIR in Makefile. + +Example: + linux> make + +To use Octave, type 'make octave': + +Example: + linux> make octave + +On Windows systems, pre-built 'train.mexw32' and 'predict.mexw32' are +included in this package (in ..\windows), so no need to conduct +installation unless you run 64 bit windows. If you have modified the +sources and would like to re-build the package, type 'mex -setup' in +MATLAB to choose a compiler for mex first. Then type 'make' to start +the installation. + +Example: + matlab> mex -setup + (ps: MATLAB will show the following messages to setup default compiler.) + Please choose your compiler for building external interface (MEX) files: + Would you like mex to locate installed compilers [y]/n? y + Select a compiler: + [1] Microsoft Visual C/C++ 2005 in C:\Program Files\Microsoft Visual Studio 8 + [0] None + Compiler: 1 + Please verify your choices: + Compiler: Microsoft Visual C/C++ 2005 + Location: C:\Program Files\Microsoft Visual Studio 8 + Are these correct?([y]/n): y + + matlab> make + +Under 64-bit Windows, Visual Studio 2005 user will need "X64 Compiler and Tools". +The package won't be installed by default, but you can find it in customized +installation options. + +For list of supported/compatible compilers for MATLAB, please check the +following page: + +http://www.mathworks.com/support/compilers/current_release/ + +Usage +===== + +matlab> model = train(training_label_vector, training_instance_matrix [,'liblinear_options', 'col']); + + -training_label_vector: + An m by 1 vector of training labels. (type must be double) + -training_instance_matrix: + An m by n matrix of m training instances with n features. + It must be a sparse matrix. (type must be double) + -liblinear_options: + A string of training options in the same format as that of LIBLINEAR. + -col: + if 'col' is set, each column of training_instance_matrix is a data instance. Otherwise each row is a data instance. + +matlab> [predicted_label, accuracy, decision_values/prob_estimates] = predict(testing_label_vector, testing_instance_matrix, model [, 'liblinear_options', 'col']); + + -testing_label_vector: + An m by 1 vector of prediction labels. If labels of test + data are unknown, simply use any random values. (type must be double) + -testing_instance_matrix: + An m by n matrix of m testing instances with n features. + It must be a sparse matrix. (type must be double) + -model: + The output of train. + -liblinear_options: + A string of testing options in the same format as that of LIBLINEAR. + -col: + if 'col' is set, each column of testing_instance_matrix is a data instance. Otherwise each row is a data instance. + +Returned Model Structure +======================== + +The 'train' function returns a model which can be used for future +prediction. It is a structure and is organized as [Parameters, nr_class, +nr_feature, bias, Label, w]: + + -Parameters: Parameters + -nr_class: number of classes + -nr_feature: number of features in training data (without including the bias term) + -bias: If >= 0, we assume one additional feature is added to the end + of each data instance. + -Label: label of each class + -w: a nr_w-by-n matrix for the weights, where n is nr_feature + or nr_feature+1 depending on the existence of the bias term. + nr_w is 1 if nr_class=2 and -s is not 4 (i.e., not + multi-class svm by Crammer and Singer). It is + nr_class otherwise. + +If the '-v' option is specified, cross validation is conducted and the +returned model is just a scalar: cross-validation accuracy. + +Result of Prediction +==================== + +The function 'predict' has three outputs. The first one, +predicted_label, is a vector of predicted labels. +The second output is a scalar meaning accuracy. +The third is a matrix containing decision values or probability +estimates (if '-b 1' is specified). If k is the number of classes +and k' is the number of classifiers (k'=1 if k=2, otherwise k'=k), for decision values, +each row includes results of k' binary linear classifiers. For probabilities, +each row contains k values indicating the probability that the testing instance is in +each class. Note that the order of classes here is the same as 'Label' +field in the model structure. + +Examples +======== + +Train and test on the provided data heart_scale: + +matlab> [heart_scale_label, heart_scale_inst] = libsvmread('../heart_scale'); +matlab> model = train(heart_scale_label, heart_scale_inst, '-c 1'); +matlab> [predict_label, accuracy, dec_values] = predict(heart_scale_label, heart_scale_inst, model); % test the training data + +Note that for testing, you can put anything in the testing_label_vector. + +For probability estimates, you need '-b 1' for training and testing: + +matlab> [predict_label, accuracy, prob_estimates] = predict(heart_scale_label, heart_scale_inst, model, '-b 1'); + +Other Utilities +=============== + +A matlab function libsvmread reads files in LIBSVM format: + +[label_vector, instance_matrix] = libsvmread('data.txt'); + +Two outputs are labels and instances, which can then be used as inputs +of svmtrain or svmpredict. + +A matlab function libsvmwrite writes Matlab matrix to a file in LIBSVM format: + +libsvmwrite('data.txt', label_vector, instance_matrix] + +The instance_matrix must be a sparse matrix. (type must be double) +These codes are prepared by Rong-En Fan and Kai-Wei Chang from National +Taiwan University. + +Additional Information +====================== + +Please cite LIBLINEAR as follows + +R.-E. Fan, K.-W. Chang, C.-J. Hsieh, X.-R. Wang, and C.-J. Lin. +LIBLINEAR: A Library for Large Linear Classification, Journal of +Machine Learning Research 9(2008), 1871-1874.Software available at +http://www.csie.ntu.edu.tw/~cjlin/liblinear + +For any question, please contact Chih-Jen Lin . + diff --git a/matlab/libsvmread.c b/matlab/libsvmread.c new file mode 120000 index 0000000..1b227d1 --- /dev/null +++ b/matlab/libsvmread.c @@ -0,0 +1 @@ +/home/faculty/cjlin/software/svm/libsvm/libsvm-matlab/libsvmread.c \ No newline at end of file diff --git a/matlab/libsvmwrite.c b/matlab/libsvmwrite.c new file mode 120000 index 0000000..5fcd858 --- /dev/null +++ b/matlab/libsvmwrite.c @@ -0,0 +1 @@ +/home/faculty/cjlin/software/svm/libsvm/libsvm-matlab/libsvmwrite.c \ No newline at end of file diff --git a/matlab/linear_model_matlab.c b/matlab/linear_model_matlab.c new file mode 100644 index 0000000..d330fb7 --- /dev/null +++ b/matlab/linear_model_matlab.c @@ -0,0 +1,168 @@ +#include +#include +#include "linear.h" + +#include "mex.h" + +#if MX_API_VER < 0x07030000 +typedef int mwIndex; +#endif + +#define Malloc(type,n) (type *)malloc((n)*sizeof(type)) + +#define NUM_OF_RETURN_FIELD 6 + +static const char *field_names[] = { + "Parameters", + "nr_class", + "nr_feature", + "bias", + "Label", + "w", +}; + +const char *model_to_matlab_structure(mxArray *plhs[], struct model *model_) +{ + int i; + int nr_w; + double *ptr; + mxArray *return_model, **rhs; + int out_id = 0; + int n; + + rhs = (mxArray **)mxMalloc(sizeof(mxArray *)*NUM_OF_RETURN_FIELD); + + // Parameters + // for now, only solver_type is needed + rhs[out_id] = mxCreateDoubleMatrix(1, 1, mxREAL); + ptr = mxGetPr(rhs[out_id]); + ptr[0] = model_->param.solver_type; + out_id++; + + // nr_class + rhs[out_id] = mxCreateDoubleMatrix(1, 1, mxREAL); + ptr = mxGetPr(rhs[out_id]); + ptr[0] = model_->nr_class; + out_id++; + + if(model_->nr_class==2 && model_->param.solver_type != MCSVM_CS) + nr_w=1; + else + nr_w=model_->nr_class; + + // nr_feature + rhs[out_id] = mxCreateDoubleMatrix(1, 1, mxREAL); + ptr = mxGetPr(rhs[out_id]); + ptr[0] = model_->nr_feature; + out_id++; + + // bias + rhs[out_id] = mxCreateDoubleMatrix(1, 1, mxREAL); + ptr = mxGetPr(rhs[out_id]); + ptr[0] = model_->bias; + out_id++; + + if(model_->bias>=0) + n=model_->nr_feature+1; + else + n=model_->nr_feature; + + // Label + if(model_->label) + { + rhs[out_id] = mxCreateDoubleMatrix(model_->nr_class, 1, mxREAL); + ptr = mxGetPr(rhs[out_id]); + for(i = 0; i < model_->nr_class; i++) + ptr[i] = model_->label[i]; + } + else + rhs[out_id] = mxCreateDoubleMatrix(0, 0, mxREAL); + out_id++; + + // w + rhs[out_id] = mxCreateDoubleMatrix(nr_w, n, mxREAL); + ptr = mxGetPr(rhs[out_id]); + for(i = 0; i < n*nr_w; i++) + ptr[i]=model_->w[i]; + out_id++; + + /* Create a struct matrix contains NUM_OF_RETURN_FIELD fields */ + return_model = mxCreateStructMatrix(1, 1, NUM_OF_RETURN_FIELD, field_names); + + /* Fill struct matrix with input arguments */ + for(i = 0; i < NUM_OF_RETURN_FIELD; i++) + mxSetField(return_model,0,field_names[i],mxDuplicateArray(rhs[i])); + /* return */ + plhs[0] = return_model; + mxFree(rhs); + + return NULL; +} + +const char *matlab_matrix_to_model(struct model *model_, const mxArray *matlab_struct) +{ + int i, num_of_fields; + int nr_w; + double *ptr; + int id = 0; + int n; + mxArray **rhs; + + num_of_fields = mxGetNumberOfFields(matlab_struct); + rhs = (mxArray **) mxMalloc(sizeof(mxArray *)*num_of_fields); + + for(i=0;inr_class=0; + nr_w=0; + model_->nr_feature=0; + model_->w=NULL; + model_->label=NULL; + + // Parameters + ptr = mxGetPr(rhs[id]); + model_->param.solver_type = (int)ptr[0]; + id++; + + // nr_class + ptr = mxGetPr(rhs[id]); + model_->nr_class = (int)ptr[0]; + id++; + + if(model_->nr_class==2 && model_->param.solver_type != MCSVM_CS) + nr_w=1; + else + nr_w=model_->nr_class; + + // nr_feature + ptr = mxGetPr(rhs[id]); + model_->nr_feature = (int)ptr[0]; + id++; + + // bias + ptr = mxGetPr(rhs[id]); + model_->bias = (int)ptr[0]; + id++; + + if(model_->bias>=0) + n=model_->nr_feature+1; + else + n=model_->nr_feature; + + ptr = mxGetPr(rhs[id]); + model_->label=Malloc(int, model_->nr_class); + for(i=0; inr_class; i++) + model_->label[i]=(int)ptr[i]; + id++; + + ptr = mxGetPr(rhs[id]); + model_->w=Malloc(double, nr_w*n); + for(i = 0; i < n*nr_w; i++) + model_->w[i]=ptr[i]; + id++; + mxFree(rhs); + + return NULL; +} + diff --git a/matlab/linear_model_matlab.h b/matlab/linear_model_matlab.h new file mode 100644 index 0000000..2d1b16a --- /dev/null +++ b/matlab/linear_model_matlab.h @@ -0,0 +1,2 @@ +const char *model_to_matlab_structure(mxArray *plhs[], struct model *model_); +const char *matlab_matrix_to_model(struct model *model_, const mxArray *matlab_struct); diff --git a/matlab/make.m b/matlab/make.m new file mode 100644 index 0000000..1258e13 --- /dev/null +++ b/matlab/make.m @@ -0,0 +1,10 @@ +% This make.m is used under Windows + +mex -O -largeArrayDims -c ..\blas\*.c -outdir ..\blas +mex -O -largeArrayDims -c ..\linear.cpp +mex -O -largeArrayDims -c ..\tron.cpp +mex -O -largeArrayDims -c linear_model_matlab.c -I..\ +mex -O -largeArrayDims train.c -I..\ tron.obj linear.obj linear_model_matlab.obj ..\blas\*.obj +mex -O -largeArrayDims predict.c -I..\ tron.obj linear.obj linear_model_matlab.obj ..\blas\*.obj +mex -O -largeArrayDims libsvmread.c +mex -O -largeArrayDims libsvmwrite.c diff --git a/matlab/predict.c b/matlab/predict.c new file mode 100644 index 0000000..7bd9027 --- /dev/null +++ b/matlab/predict.c @@ -0,0 +1,300 @@ +#include +#include +#include +#include "linear.h" + +#include "mex.h" +#include "linear_model_matlab.h" + +#if MX_API_VER < 0x07030000 +typedef int mwIndex; +#endif + +#define CMD_LEN 2048 + +#define Malloc(type,n) (type *)malloc((n)*sizeof(type)) + +int col_format_flag; + +void read_sparse_instance(const mxArray *prhs, int index, struct feature_node *x, int feature_number, double bias) +{ + int i, j, low, high; + mwIndex *ir, *jc; + double *samples; + + ir = mxGetIr(prhs); + jc = mxGetJc(prhs); + samples = mxGetPr(prhs); + + // each column is one instance + j = 0; + low = (int) jc[index], high = (int) jc[index+1]; + for(i=low; i=0) + { + x[j].index = feature_number+1; + x[j].value = bias; + j++; + } + x[j].index = -1; +} + +static void fake_answer(mxArray *plhs[]) +{ + plhs[0] = mxCreateDoubleMatrix(0, 0, mxREAL); + plhs[1] = mxCreateDoubleMatrix(0, 0, mxREAL); + plhs[2] = mxCreateDoubleMatrix(0, 0, mxREAL); +} + +void do_predict(mxArray *plhs[], const mxArray *prhs[], struct model *model_, const int predict_probability_flag) +{ + int label_vector_row_num, label_vector_col_num; + int feature_number, testing_instance_number; + int instance_index; + double *ptr_instance, *ptr_label, *ptr_predict_label; + double *ptr_prob_estimates, *ptr_dec_values, *ptr; + struct feature_node *x; + mxArray *pplhs[1]; // instance sparse matrix in row format + + int correct = 0; + int total = 0; + + int nr_class=get_nr_class(model_); + int nr_w; + double *prob_estimates=NULL; + + if(nr_class==2 && model_->param.solver_type!=MCSVM_CS) + nr_w=1; + else + nr_w=nr_class; + + // prhs[1] = testing instance matrix + feature_number = get_nr_feature(model_); + testing_instance_number = (int) mxGetM(prhs[1]); + if(col_format_flag) + { + feature_number = (int) mxGetM(prhs[1]); + testing_instance_number = (int) mxGetN(prhs[1]); + } + + label_vector_row_num = (int) mxGetM(prhs[0]); + label_vector_col_num = (int) mxGetN(prhs[0]); + + if(label_vector_row_num!=testing_instance_number) + { + mexPrintf("Length of label vector does not match # of instances.\n"); + fake_answer(plhs); + return; + } + if(label_vector_col_num!=1) + { + mexPrintf("label (1st argument) should be a vector (# of column is 1).\n"); + fake_answer(plhs); + return; + } + + ptr_instance = mxGetPr(prhs[1]); + ptr_label = mxGetPr(prhs[0]); + + // transpose instance matrix + if(mxIsSparse(prhs[1])) + { + if(col_format_flag) + { + pplhs[0] = (mxArray *)prhs[1]; + } + else + { + mxArray *pprhs[1]; + pprhs[0] = mxDuplicateArray(prhs[1]); + if(mexCallMATLAB(1, pplhs, 1, pprhs, "transpose")) + { + mexPrintf("Error: cannot transpose testing instance matrix\n"); + fake_answer(plhs); + return; + } + } + } + else + mexPrintf("Testing_instance_matrix must be sparse\n"); + + + prob_estimates = Malloc(double, nr_class); + + plhs[0] = mxCreateDoubleMatrix(testing_instance_number, 1, mxREAL); + if(predict_probability_flag) + plhs[2] = mxCreateDoubleMatrix(testing_instance_number, nr_class, mxREAL); + else + plhs[2] = mxCreateDoubleMatrix(testing_instance_number, nr_w, mxREAL); + + ptr_predict_label = mxGetPr(plhs[0]); + ptr_prob_estimates = mxGetPr(plhs[2]); + ptr_dec_values = mxGetPr(plhs[2]); + x = Malloc(struct feature_node, feature_number+2); + for(instance_index=0;instance_indexbias); + + if(predict_probability_flag) + { + v = predict_probability(model_, x, prob_estimates); + ptr_predict_label[instance_index] = v; + for(i=0;i 5 || nrhs < 3) + { + exit_with_help(); + fake_answer(plhs); + return; + } + if(nrhs == 5) + { + mxGetString(prhs[4], cmd, mxGetN(prhs[4])+1); + if(strcmp(cmd, "col") == 0) + { + col_format_flag = 1; + } + } + + if(!mxIsDouble(prhs[0]) || !mxIsDouble(prhs[1])) { + mexPrintf("Error: label vector and instance matrix must be double\n"); + fake_answer(plhs); + return; + } + + if(mxIsStruct(prhs[2])) + { + const char *error_msg; + + // parse options + if(nrhs>=4) + { + int i, argc = 1; + char *argv[CMD_LEN/2]; + + // put options in argv[] + mxGetString(prhs[3], cmd, mxGetN(prhs[3]) + 1); + if((argv[argc] = strtok(cmd, " ")) != NULL) + while((argv[++argc] = strtok(NULL, " ")) != NULL) + ; + + for(i=1;i=argc) + { + exit_with_help(); + fake_answer(plhs); + return; + } + switch(argv[i-1][1]) + { + case 'b': + prob_estimate_flag = atoi(argv[i]); + break; + default: + mexPrintf("unknown option\n"); + exit_with_help(); + fake_answer(plhs); + return; + } + } + } + + model_ = Malloc(struct model, 1); + error_msg = matlab_matrix_to_model(model_, prhs[2]); + if(error_msg) + { + mexPrintf("Error: can't read model: %s\n", error_msg); + destroy_model(model_); + fake_answer(plhs); + return; + } + + if(prob_estimate_flag) + { + if(model_->param.solver_type!=L2_LR) + { + mexPrintf("probability output is only supported for logistic regression\n"); + prob_estimate_flag=0; + } + } + + if(mxIsSparse(prhs[1])) + do_predict(plhs, prhs, model_, prob_estimate_flag); + else + { + mexPrintf("Testing_instance_matrix must be sparse\n"); + fake_answer(plhs); + } + + // destroy model_ + destroy_model(model_); + } + else + { + mexPrintf("model file should be a struct array\n"); + fake_answer(plhs); + } + + return; +} diff --git a/matlab/run.m b/matlab/run.m new file mode 100644 index 0000000..8dc7d32 --- /dev/null +++ b/matlab/run.m @@ -0,0 +1,4 @@ +[y,xt] = libsvmread('../heart_scale'); +model=train(y, xt) +[l,a]=predict(y, xt, model); + diff --git a/matlab/train.c b/matlab/train.c new file mode 100644 index 0000000..58ac729 --- /dev/null +++ b/matlab/train.c @@ -0,0 +1,334 @@ +#include +#include +#include +#include +#include +#include "linear.h" + +#include "mex.h" +#include "linear_model_matlab.h" + +#if MX_API_VER < 0x07030000 +typedef int mwIndex; +#endif + +#define CMD_LEN 2048 +#define Malloc(type,n) (type *)malloc((n)*sizeof(type)) +#define INF HUGE_VAL + +void exit_with_help() +{ + mexPrintf( + "Usage: model = train(training_label_vector, training_instance_matrix, 'liblinear_options', 'col');\n" + "liblinear_options:\n" + "-s type : set type of solver (default 1)\n" + " 0 -- L2 logistic regression\n" + " 1 -- L2-loss support vector machines (dual)\n" + " 2 -- L2-loss support vector machines (primal)\n" + " 3 -- L1-loss support vector machines (dual)\n" + " 4 -- multi-class support vector machines from Crammer and Singer\n" + "-c cost : set the parameter C (default 1)\n" + "-e epsilon : set tolerance of termination criterion\n" + " -s 0 and 2\n" + " |f'(w)|_2 <= eps*min(pos,neg)/l*|f'(w0)|_2,\n" + " where f is the primal function, (default 0.01)\n" + " -s 1, 3, and 4\n" + " Dual maximal violation <= eps; similar to libsvm (default 0.1)\n" + "-B bias : if bias >= 0, instance x becomes [x; bias]; if < 0, no bias term added (default 1)\n" + "-wi weight: weights adjust the parameter C of different classes (see README for details)\n" + "-v n: n-fold cross validation mode\n" + "col:\n" + " if 'col' is setted, training_instance_matrix is parsed in column format, otherwise is in row format\n" + ); +} + +// liblinear arguments +struct parameter param; // set by parse_command_line +struct problem prob; // set by read_problem +struct model *model_; +struct feature_node *x_space; +int cross_validation_flag; +int col_format_flag; +int nr_fold; +double bias; + +double do_cross_validation() +{ + int i; + int total_correct = 0; + int *target = Malloc(int,prob.l); + double retval = 0.0; + + cross_validation(&prob,¶m,nr_fold,target); + + for(i=0;i 2) + { + mxGetString(prhs[2], cmd, mxGetN(prhs[2]) + 1); + if((argv[argc] = strtok(cmd, " ")) != NULL) + while((argv[++argc] = strtok(NULL, " ")) != NULL) + ; + } + + // parse options + for(i=1;i=argc) + return 1; + switch(argv[i-1][1]) + { + case 's': + param.solver_type = atoi(argv[i]); + break; + case 'c': + param.C = atof(argv[i]); + break; + case 'e': + param.eps = atof(argv[i]); + break; + case 'B': + bias = atof(argv[i]); + break; + case 'v': + cross_validation_flag = 1; + nr_fold = atoi(argv[i]); + if(nr_fold < 2) + { + mexPrintf("n-fold cross validation: n must >= 2\n"); + return 1; + } + break; + case 'w': + ++param.nr_weight; + param.weight_label = (int *) realloc(param.weight_label,sizeof(int)*param.nr_weight); + param.weight = (double *) realloc(param.weight,sizeof(double)*param.nr_weight); + param.weight_label[param.nr_weight-1] = atoi(&argv[i-1][2]); + param.weight[param.nr_weight-1] = atof(argv[i]); + break; + default: + mexPrintf("unknown option\n"); + return 1; + } + } + + if(param.eps == INF) + { + if(param.solver_type == L2_LR || param.solver_type == L2LOSS_SVM) + param.eps = 0.01; + else if(param.solver_type == L2LOSS_SVM_DUAL || param.solver_type == L1LOSS_SVM_DUAL || param.solver_type == MCSVM_CS) + param.eps = 0.1; + } + return 0; +} + +static void fake_answer(mxArray *plhs[]) +{ + plhs[0] = mxCreateDoubleMatrix(0, 0, mxREAL); +} + +int read_problem_sparse(const mxArray *label_vec, const mxArray *instance_mat) +{ + int i, j, k, low, high; + mwIndex *ir, *jc; + int elements, max_index, num_samples, label_vector_row_num; + double *samples, *labels; + mxArray *instance_mat_col; // instance sparse matrix in column format + + prob.x = NULL; + prob.y = NULL; + x_space = NULL; + + if(col_format_flag) + instance_mat_col = (mxArray *)instance_mat; + else + { + // transpose instance matrix + mxArray *prhs[1], *plhs[1]; + prhs[0] = mxDuplicateArray(instance_mat); + if(mexCallMATLAB(1, plhs, 1, prhs, "transpose")) + { + mexPrintf("Error: cannot transpose training instance matrix\n"); + return -1; + } + instance_mat_col = plhs[0]; + mxDestroyArray(prhs[0]); + } + + // the number of instance + prob.l = (int) mxGetN(instance_mat_col); + label_vector_row_num = (int) mxGetM(label_vec); + + if(label_vector_row_num!=prob.l) + { + mexPrintf("Length of label vector does not match # of instances.\n"); + return -1; + } + + // each column is one instance + labels = mxGetPr(label_vec); + samples = mxGetPr(instance_mat_col); + ir = mxGetIr(instance_mat_col); + jc = mxGetJc(instance_mat_col); + + num_samples = (int) mxGetNzmax(instance_mat_col); + + elements = num_samples + prob.l*2; + max_index = (int) mxGetM(instance_mat_col); + + prob.y = Malloc(int, prob.l); + prob.x = Malloc(struct feature_node*, prob.l); + x_space = Malloc(struct feature_node, elements); + + prob.bias=bias; + + j = 0; + for(i=0;i=0) + { + x_space[j].index = max_index+1; + x_space[j].value = prob.bias; + j++; + } + x_space[j++].index = -1; + } + + if(prob.bias>=0) + prob.n = max_index+1; + else + prob.n = max_index; + + return 0; +} + +// Interface function of matlab +// now assume prhs[0]: label prhs[1]: features +void mexFunction( int nlhs, mxArray *plhs[], + int nrhs, const mxArray *prhs[] ) +{ + const char *error_msg; + // fix random seed to have same results for each run + // (for cross validation) + srand(1); + + // Transform the input Matrix to libsvm format + if(nrhs > 0 && nrhs < 5) + { + int err=0; + + if(!mxIsDouble(prhs[0]) || !mxIsDouble(prhs[1])) { + mexPrintf("Error: label vector and instance matrix must be double\n"); + fake_answer(plhs); + return; + } + + if(parse_command_line(nrhs, prhs, NULL)) + { + exit_with_help(); + destroy_param(¶m); + fake_answer(plhs); + return; + } + + if(mxIsSparse(prhs[1])) + err = read_problem_sparse(prhs[0], prhs[1]); + else + { + mexPrintf("Training_instance_matrix must be sparse\n"); + destroy_param(¶m); + fake_answer(plhs); + return; + } + + // train's original code + error_msg = check_parameter(&prob, ¶m); + + if(err || error_msg) + { + if (error_msg != NULL) + mexPrintf("Error: %s\n", error_msg); + destroy_param(¶m); + free(prob.y); + free(prob.x); + free(x_space); + fake_answer(plhs); + return; + } + + if(cross_validation_flag) + { + double *ptr; + plhs[0] = mxCreateDoubleMatrix(1, 1, mxREAL); + ptr = mxGetPr(plhs[0]); + ptr[0] = do_cross_validation(); + } + else + { + const char *error_msg; + + model_ = train(&prob, ¶m); + error_msg = model_to_matlab_structure(plhs, model_); + if(error_msg) + mexPrintf("Error: can't convert libsvm model to matrix structure: %s\n", error_msg); + destroy_model(model_); + } + destroy_param(¶m); + free(prob.y); + free(prob.x); + free(x_space); + } + else + { + exit_with_help(); + fake_answer(plhs); + return; + } +} diff --git a/predict.c b/predict.c new file mode 100644 index 0000000..9772f72 --- /dev/null +++ b/predict.c @@ -0,0 +1,215 @@ +#include +#include +#include +#include +#include +#include "linear.h" + +struct feature_node *x; +int max_nr_attr = 64; + +struct model* model_; +int flag_predict_probability=0; + +void exit_input_error(int line_num) +{ + fprintf(stderr,"Wrong input format at line %d\n", line_num); + exit(1); +} + +static char *line = NULL; +static int max_line_len; + +static char* readline(FILE *input) +{ + int len; + + if(fgets(line,max_line_len,input) == NULL) + return NULL; + + while(strrchr(line,'\n') == NULL) + { + max_line_len *= 2; + line = (char *) realloc(line,max_line_len); + len = (int) strlen(line); + if(fgets(line+len,max_line_len-len,input) == NULL) + break; + } + return line; +} + +void do_predict(FILE *input, FILE *output, struct model* model_) +{ + int correct = 0; + int total = 0; + + int nr_class=get_nr_class(model_); + double *prob_estimates=NULL; + int j, n; + int nr_feature=get_nr_feature(model_); + if(model_->bias>=0) + n=nr_feature+1; + else + n=nr_feature; + + if(flag_predict_probability) + { + int *labels; + + if(model_->param.solver_type!=L2_LR) + { + fprintf(stderr, "probability output is only supported for logistic regression\n"); + exit(1); + } + + labels=(int *) malloc(nr_class*sizeof(int)); + get_labels(model_,labels); + prob_estimates = (double *) malloc(nr_class*sizeof(double)); + fprintf(output,"labels"); + for(j=0;j=max_nr_attr-2) // need one more for index = -1 + { + max_nr_attr *= 2; + x = (struct feature_node *) realloc(x,max_nr_attr*sizeof(struct feature_node)); + } + + idx = strtok(NULL,":"); + val = strtok(NULL," \t"); + + if(val == NULL) + break; + errno = 0; + x[i].index = (int) strtol(idx,&endptr,10); + if(endptr == idx || errno != 0 || *endptr != '\0' || x[i].index <= inst_max_index) + exit_input_error(total+1); + else + inst_max_index = x[i].index; + + errno = 0; + x[i].value = strtod(val,&endptr); + if(endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr))) + exit_input_error(total+1); + + // feature indices larger than those in training are not used + if(x[i].index <= nr_feature) + ++i; + } + + if(model_->bias>=0) + { + x[i].index = n; + x[i].value = model_->bias; + i++; + } + x[i].index = -1; + + if(flag_predict_probability) + { + int j; + predict_label = predict_probability(model_,x,prob_estimates); + fprintf(output,"%d",predict_label); + for(j=0;jnr_class;j++) + fprintf(output," %g",prob_estimates[j]); + fprintf(output,"\n"); + } + else + { + predict_label = predict(model_,x); + fprintf(output,"%d\n",predict_label); + } + + if(predict_label == target_label) + ++correct; + ++total; + } + printf("Accuracy = %g%% (%d/%d)\n",(double) correct/total*100,correct,total); + if(flag_predict_probability) + free(prob_estimates); +} + +void exit_with_help() +{ + printf( + "Usage: predict [options] test_file model_file output_file\n" + "options:\n" + "-b probability_estimates: whether to output probability estimates, 0 or 1 (default 0)\n" + ); + exit(1); +} + +int main(int argc, char **argv) +{ + FILE *input, *output; + int i; + + // parse options + for(i=1;i=argc) + exit_with_help(); + + input = fopen(argv[i],"r"); + if(input == NULL) + { + fprintf(stderr,"can't open input file %s\n",argv[i]); + exit(1); + } + + output = fopen(argv[i+2],"w"); + if(output == NULL) + { + fprintf(stderr,"can't open output file %s\n",argv[i+2]); + exit(1); + } + + if((model_=load_model(argv[i+1]))==0) + { + fprintf(stderr,"can't open model file %s\n",argv[i+1]); + exit(1); + } + + x = (struct feature_node *) malloc(max_nr_attr*sizeof(struct feature_node)); + do_predict(input, output, model_); + destroy_model(model_); + free(line); + free(x); + fclose(input); + fclose(output); + return 0; +} + diff --git a/train.c b/train.c new file mode 100644 index 0000000..62a8578 --- /dev/null +++ b/train.c @@ -0,0 +1,313 @@ +#include +#include +#include +#include +#include +#include +#include "linear.h" +#define Malloc(type,n) (type *)malloc((n)*sizeof(type)) +#define INF HUGE_VAL + +void exit_with_help() +{ + printf( + "Usage: train [options] training_set_file [model_file]\n" + "options:\n" + "-s type : set type of solver (default 1)\n" + " 0 -- L2-regularized logistic regression\n" + " 1 -- L2-loss support vector machines (dual)\n" + " 2 -- L2-loss support vector machines (primal)\n" + " 3 -- L1-loss support vector machines (dual)\n" + " 4 -- multi-class support vector machines by Crammer and Singer\n" + "-c cost : set the parameter C (default 1)\n" + "-e epsilon : set tolerance of termination criterion\n" + " -s 0 and 2\n" + " |f'(w)|_2 <= eps*min(pos,neg)/l*|f'(w0)|_2,\n" + " where f is the primal function, (default 0.01)\n" + " -s 1, 3, and 4\n" + " Dual maximal violation <= eps; similar to libsvm (default 0.1)\n" + "-B bias : if bias >= 0, instance x becomes [x; bias]; if < 0, no bias term added (default 1)\n" + "-wi weight: weights adjust the parameter C of different classes (see README for details)\n" + "-v n: n-fold cross validation mode\n" + ); + exit(1); +} + +void exit_input_error(int line_num) +{ + fprintf(stderr,"Wrong input format at line %d\n", line_num); + exit(1); +} + +static char *line = NULL; +static int max_line_len; + +static char* readline(FILE *input) +{ + int len; + + if(fgets(line,max_line_len,input) == NULL) + return NULL; + + while(strrchr(line,'\n') == NULL) + { + max_line_len *= 2; + line = (char *) realloc(line,max_line_len); + len = (int) strlen(line); + if(fgets(line+len,max_line_len-len,input) == NULL) + break; + } + return line; +} + +void parse_command_line(int argc, char **argv, char *input_file_name, char *model_file_name); +void read_problem(const char *filename); +void do_cross_validation(); + +struct feature_node *x_space; +struct parameter param; +struct problem prob; +struct model* model_; +int flag_cross_validation; +int nr_fold; +double bias; + +int main(int argc, char **argv) +{ + char input_file_name[1024]; + char model_file_name[1024]; + const char *error_msg; + + parse_command_line(argc, argv, input_file_name, model_file_name); + read_problem(input_file_name); + error_msg = check_parameter(&prob,¶m); + + if(error_msg) + { + fprintf(stderr,"Error: %s\n",error_msg); + exit(1); + } + + if(flag_cross_validation) + { + do_cross_validation(); + } + else + { + model_=train(&prob, ¶m); + save_model(model_file_name, model_); + destroy_model(model_); + } + destroy_param(¶m); + free(prob.y); + free(prob.x); + free(x_space); + free(line); + + return 0; +} + +void do_cross_validation() +{ + int i; + int total_correct = 0; + int *target = Malloc(int, prob.l); + + cross_validation(&prob,¶m,nr_fold,target); + + for(i=0;i=argc) + exit_with_help(); + switch(argv[i-1][1]) + { + case 's': + param.solver_type = atoi(argv[i]); + break; + + case 'c': + param.C = atof(argv[i]); + break; + + case 'e': + param.eps = atof(argv[i]); + break; + + case 'B': + bias = atof(argv[i]); + break; + + case 'w': + ++param.nr_weight; + param.weight_label = (int *) realloc(param.weight_label,sizeof(int)*param.nr_weight); + param.weight = (double *) realloc(param.weight,sizeof(double)*param.nr_weight); + param.weight_label[param.nr_weight-1] = atoi(&argv[i-1][2]); + param.weight[param.nr_weight-1] = atof(argv[i]); + break; + + case 'v': + flag_cross_validation = 1; + nr_fold = atoi(argv[i]); + if(nr_fold < 2) + { + fprintf(stderr,"n-fold cross validation: n must >= 2\n"); + exit_with_help(); + } + break; + + default: + fprintf(stderr,"unknown option: -%c\n", argv[i-1][1]); + exit_with_help(); + break; + } + } + + // determine filenames + if(i>=argc) + exit_with_help(); + + strcpy(input_file_name, argv[i]); + + if(i max_index) + max_index = inst_max_index; + + if(prob.bias >= 0) + x_space[j++].value = prob.bias; + + x_space[j++].index = -1; + } + + if(prob.bias >= 0) + { + prob.n=max_index+1; + for(i=1;iindex = prob.n; + x_space[j-2].index = prob.n; + } + else + prob.n=max_index; + + fclose(fp); +} diff --git a/tron.cpp b/tron.cpp new file mode 100644 index 0000000..293370a --- /dev/null +++ b/tron.cpp @@ -0,0 +1,214 @@ +#include +#include +#include +#include +#include "tron.h" + +#ifndef min +template inline T min(T x,T y) { return (x inline T max(T x,T y) { return (x>y)?x:y; } +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +extern double dnrm2_(int *, double *, int *); +extern double ddot_(int *, double *, int *, double *, int *); +extern int daxpy_(int *, double *, double *, int *, double *, int *); +extern int dscal_(int *, double *, double *, int *); + +#ifdef __cplusplus +} +#endif + + +TRON::TRON(const function *fun_obj, double eps, int max_iter) +{ + this->fun_obj=const_cast(fun_obj); + this->eps=eps; + this->max_iter=max_iter; +} + +TRON::~TRON() +{ +} + +void TRON::tron(double *w) +{ + // Parameters for updating the iterates. + double eta0 = 1e-4, eta1 = 0.25, eta2 = 0.75; + + // Parameters for updating the trust region size delta. + double sigma1 = 0.25, sigma2 = 0.5, sigma3 = 4; + + int n = fun_obj->get_nr_variable(); + int i, cg_iter; + double delta, snorm, one=1.0; + double alpha, f, fnew, prered, actred, gs; + int search = 1, iter = 1, inc = 1; + double *s = new double[n]; + double *r = new double[n]; + double *w_new = new double[n]; + double *g = new double[n]; + + for (i=0; ifun(w); + fun_obj->grad(w, g); + delta = dnrm2_(&n, g, &inc); + double gnorm1 = delta; + double gnorm = gnorm1; + + if (gnorm <= eps*gnorm1) + search = 0; + + iter = 1; + + while (iter <= max_iter && search) + { + cg_iter = trcg(delta, g, s, r); + + memcpy(w_new, w, sizeof(double)*n); + daxpy_(&n, &one, s, &inc, w_new, &inc); + + gs = ddot_(&n, g, &inc, s, &inc); + prered = -0.5*(gs-ddot_(&n, s, &inc, r, &inc)); + fnew = fun_obj->fun(w_new); + + // Compute the actual reduction. + actred = f - fnew; + + // On the first iteration, adjust the initial step bound. + snorm = dnrm2_(&n, s, &inc); + if (iter == 1) + delta = min(delta, snorm); + + // Compute prediction alpha*snorm of the step. + if (fnew - f - gs <= 0) + alpha = sigma3; + else + alpha = max(sigma1, -0.5*(gs/(fnew - f - gs))); + + // Update the trust region bound according to the ratio of actual to predicted reduction. + if (actred < eta0*prered) + delta = min(max(alpha, sigma1)*snorm, sigma2*delta); + else if (actred < eta1*prered) + delta = max(sigma1*delta, min(alpha*snorm, sigma2*delta)); + else if (actred < eta2*prered) + delta = max(sigma1*delta, min(alpha*snorm, sigma3*delta)); + else + delta = max(delta, min(alpha*snorm, sigma3*delta)); + + printf("iter %2d act %5.3e pre %5.3e delta %5.3e f %5.3e |g| %5.3e CG %3d\n", iter, actred, prered, delta, f, gnorm, cg_iter); + + if (actred > eta0*prered) + { + iter++; + memcpy(w, w_new, sizeof(double)*n); + f = fnew; + fun_obj->grad(w, g); + + gnorm = dnrm2_(&n, g, &inc); + if (gnorm <= eps*gnorm1) + break; + } + if (f < -1.0e+32) + { + printf("warning: f < -1.0e+32\n"); + break; + } + if (fabs(actred) <= 0 && prered <= 0) + { + printf("warning: actred and prered <= 0\n"); + break; + } + if (fabs(actred) <= 1.0e-12*fabs(f) && + fabs(prered) <= 1.0e-12*fabs(f)) + { + printf("warning: actred and prered too small\n"); + break; + } + } + + delete[] g; + delete[] r; + delete[] w_new; + delete[] s; +} + +int TRON::trcg(double delta, double *g, double *s, double *r) +{ + int i, inc = 1; + int n = fun_obj->get_nr_variable(); + double one = 1; + double *d = new double[n]; + double *Hd = new double[n]; + double rTr, rnewTrnew, alpha, beta, cgtol; + + for (i=0; iHv(d, Hd); + + alpha = rTr/ddot_(&n, d, &inc, Hd, &inc); + daxpy_(&n, &alpha, d, &inc, s, &inc); + if (dnrm2_(&n, s, &inc) > delta) + { + printf("cg reaches trust region boundary\n"); + alpha = -alpha; + daxpy_(&n, &alpha, d, &inc, s, &inc); + + double std = ddot_(&n, s, &inc, d, &inc); + double sts = ddot_(&n, s, &inc, s, &inc); + double dtd = ddot_(&n, d, &inc, d, &inc); + double dsq = delta*delta; + double rad = sqrt(std*std + dtd*(dsq-sts)); + if (std >= 0) + alpha = (dsq - sts)/(std + rad); + else + alpha = (rad - std)/dtd; + daxpy_(&n, &alpha, d, &inc, s, &inc); + alpha = -alpha; + daxpy_(&n, &alpha, Hd, &inc, r, &inc); + break; + } + alpha = -alpha; + daxpy_(&n, &alpha, Hd, &inc, r, &inc); + rnewTrnew = ddot_(&n, r, &inc, r, &inc); + beta = rnewTrnew/rTr; + dscal_(&n, &beta, d, &inc); + daxpy_(&n, &one, r, &inc, d, &inc); + rTr = rnewTrnew; + } + + delete[] d; + delete[] Hd; + + return(cg_iter); +} + +double TRON::norm_inf(int n, double *x) +{ + double dmax = fabs(x[0]); + for (int i=1; i= dmax) + dmax = fabs(x[i]); + return(dmax); +} diff --git a/tron.h b/tron.h new file mode 100644 index 0000000..fe6a96b --- /dev/null +++ b/tron.h @@ -0,0 +1,32 @@ +#ifndef _TRON_H +#define _TRON_H + +class function +{ +public: + virtual double fun(double *w) = 0 ; + virtual void grad(double *w, double *g) = 0 ; + virtual void Hv(double *s, double *Hs) = 0 ; + + virtual int get_nr_variable(void) = 0 ; + virtual ~function(void){} +}; + +class TRON +{ +public: + TRON(const function *fun_obj, double eps = 0.1, int max_iter = 1000); + ~TRON(); + + void tron(double *w); + +private: + int trcg(double delta, double *g, double *s, double *r); + double norm_inf(int n, double *x); + + double eps; + int max_iter; + function *fun_obj; +}; + +#endif diff --git a/windows/predict.exe b/windows/predict.exe new file mode 100755 index 0000000..3e06621 Binary files /dev/null and b/windows/predict.exe differ diff --git a/windows/predict.mexw32 b/windows/predict.mexw32 new file mode 100755 index 0000000..3980d27 Binary files /dev/null and b/windows/predict.mexw32 differ diff --git a/windows/read_sparse.mexw32 b/windows/read_sparse.mexw32 new file mode 100755 index 0000000..0fa2646 Binary files /dev/null and b/windows/read_sparse.mexw32 differ diff --git a/windows/train.exe b/windows/train.exe new file mode 100755 index 0000000..43f80de Binary files /dev/null and b/windows/train.exe differ diff --git a/windows/train.mexw32 b/windows/train.mexw32 new file mode 100755 index 0000000..c1a266a Binary files /dev/null and b/windows/train.mexw32 differ