@echo "please type make under Octave"
binary: train.$(MEX_EXT) predict.$(MEX_EXT) libsvmread.$(MEX_EXT) libsvmwrite.$(MEX_EXT)
-train.$(MEX_EXT): train.c ../linear.h ../tron.cpp ../linear.cpp linear_model_matlab.c \
+train.$(MEX_EXT): train.c ../linear.h ../newton.cpp ../linear.cpp linear_model_matlab.c \
../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
- $(MEX) $(MEX_OPTION) train.c ../tron.cpp ../linear.cpp linear_model_matlab.c \
+ $(MEX) $(MEX_OPTION) train.c ../newton.cpp ../linear.cpp linear_model_matlab.c \
../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
-predict.$(MEX_EXT): predict.c ../linear.h ../tron.cpp ../linear.cpp linear_model_matlab.c \
+predict.$(MEX_EXT): predict.c ../linear.h ../newton.cpp ../linear.cpp linear_model_matlab.c \
../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
- $(MEX) $(MEX_OPTION) predict.c ../tron.cpp ../linear.cpp linear_model_matlab.c \
+ $(MEX) $(MEX_OPTION) predict.c ../newton.cpp ../linear.cpp linear_model_matlab.c \
../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
libsvmread.$(MEX_EXT): libsvmread.c
if(exist('OCTAVE_VERSION', 'builtin'))
mex libsvmread.c
mex libsvmwrite.c
- mex -I.. train.c linear_model_matlab.c ../linear.cpp ../tron.cpp ../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
- mex -I.. predict.c linear_model_matlab.c ../linear.cpp ../tron.cpp ../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
+ mex -I.. train.c linear_model_matlab.c ../linear.cpp ../newton.cpp ../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
+ mex -I.. predict.c linear_model_matlab.c ../linear.cpp ../newton.cpp ../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
% This part is for MATLAB
% Add -largeArrayDims on 64-bit machines of MATLAB
else
mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims libsvmread.c
mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims libsvmwrite.c
- mex CFLAGS="\$CFLAGS -std=c99" -I.. -largeArrayDims train.c linear_model_matlab.c ../linear.cpp ../tron.cpp ../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
- mex CFLAGS="\$CFLAGS -std=c99" -I.. -largeArrayDims predict.c linear_model_matlab.c ../linear.cpp ../tron.cpp ../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
+ mex CFLAGS="\$CFLAGS -std=c99" -I.. -largeArrayDims train.c linear_model_matlab.c ../linear.cpp ../newton.cpp ../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
+ mex CFLAGS="\$CFLAGS -std=c99" -I.. -largeArrayDims predict.c linear_model_matlab.c ../linear.cpp ../newton.cpp ../blas/daxpy.c ../blas/ddot.c ../blas/dnrm2.c ../blas/dscal.c
end
catch err
fprintf('Error: %s failed (line %d)\n', err.stack(1).file, err.stack(1).line);
" |f'(alpha)|_1 <= eps |f'(alpha0)|,\n"
" where f is the dual function (default 0.1)\n"
"-B bias : if bias >= 0, instance x becomes [x; bias]; if < 0, no bias term added (default -1)\n"
+ "-R : not regularize the bias; must with -B 1 to have the bias; DON'T use this unless you know what it is\n"
+ " (for -s 0, 2, 5, 6, 11)\n"
"-wi weight: weights adjust the parameter C of different classes (see README for details)\n"
"-v n: n-fold cross validation mode\n"
"-C : find parameters (C for -s 0, 2 and C, p for -s 11)\n"
param.weight_label = NULL;
param.weight = NULL;
param.init_sol = NULL;
+ param.regularize_bias = 1;
flag_cross_validation = 0;
col_format_flag = 0;
flag_C_specified = 0;
{
if(argv[i][0] != '-') break;
++i;
- if(i>=argc && argv[i-1][1] != 'q' && argv[i-1][1] != 'C') // since options -q and -C have no parameter
+ if(i>=argc && argv[i-1][1] != 'q' && argv[i-1][1] != 'C'
+ && argv[i-1][1] != 'R') // since options -q and -C have no parameter
return 1;
switch(argv[i-1][1])
{
flag_find_parameters = 1;
i--;
break;
+ case 'R':
+ param.regularize_bias = 0;
+ i--;
+ break;
default:
mexPrintf("unknown option\n");
return 1;
class parameter(Structure):
- _names = ["solver_type", "eps", "C", "nr_weight", "weight_label", "weight", "p", "nu", "init_sol"]
- _types = [c_int, c_double, c_double, c_int, POINTER(c_int), POINTER(c_double), c_double, c_double, POINTER(c_double)]
+ _names = ["solver_type", "eps", "C", "nr_weight", "weight_label", "weight", "p", "nu", "init_sol", "regularize_bias"]
+ _types = [c_int, c_double, c_double, c_int, POINTER(c_int), POINTER(c_double), c_double, c_double, POINTER(c_double), c_int]
_fields_ = genFields(_names, _types)
def __init__(self, options = None):
self.weight = None
self.init_sol = None
self.bias = -1
+ self.regularize_bias = 1
self.flag_cross_validation = False
self.flag_C_specified = False
self.flag_p_specified = False
self.print_func = PRINT_STRING_FUN(print_null)
elif argv[i] == "-C":
self.flag_find_parameters = True
-
+ elif argv[i] == "-R":
+ self.regularize_bias = 0
else:
raise ValueError("Wrong options")
i += 1
|f'(alpha)|_1 <= eps |f'(alpha0)|,
where f is the dual function (default 0.1)
-B bias : if bias >= 0, instance x becomes [x; bias]; if < 0, no bias term added (default -1)
+ -R : not regularize the bias; must with -B 1 to have the bias; DON'T use this unless you know what it is
+ (for -s 0, 2, 5, 6, 11)"
-wi weight: weights adjust the parameter C of different classes (see README for details)
-v n: n-fold cross validation mode
-C : find parameters (C for -s 0, 2 and C, p for -s 11)