org.machinelearning.svm.libsvm.svm.m4 Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of openchemlib Show documentation
Show all versions of openchemlib Show documentation
Open Source Chemistry Library
define(`swap',`do {$1 tmp=$2; $2=$3; $3=tmp;} while(false)')
define(`Qfloat',`float')
define(`SIZE_OF_QFLOAT',4)
define(`TAU',1e-12)
changecom(`//',`')
package libsvm;
import java.io.*;
import java.util.*;
//
// Kernel Cache
//
// l is the number of total data items
// size is the cache size limit in bytes
//
class Cache {
private final int l;
private long size;
private final class head_t
{
head_t prev, next; // a cicular list
Qfloat[] data;
int len; // data[0,len) is cached in this entry
}
private final head_t[] head;
private head_t lru_head;
Cache(int l_, long size_)
{
l = l_;
size = size_;
head = new head_t[l];
for(int i=0;i= len if nothing needs to be filled)
// java: simulate pointer using single-element array
int get_data(int index, Qfloat[][] data, int len)
{
head_t h = head[index];
if(h.len > 0) lru_delete(h);
int more = len - h.len;
if(more > 0)
{
// free old space
while(size < more)
{
head_t old = lru_head.next;
lru_delete(old);
size += old.len;
old.data = null;
old.len = 0;
}
// allocate new space
Qfloat[] new_data = new Qfloat[len];
if(h.data != null) System.arraycopy(h.data,0,new_data,0,h.len);
h.data = new_data;
size -= more;
swap(int,h.len,len);
}
lru_insert(h);
data[0] = h.data;
return len;
}
void swap_index(int i, int j)
{
if(i==j) return;
if(head[i].len > 0) lru_delete(head[i]);
if(head[j].len > 0) lru_delete(head[j]);
swap(Qfloat[],head[i].data,head[j].data);
swap(int,head[i].len,head[j].len);
if(head[i].len > 0) lru_insert(head[i]);
if(head[j].len > 0) lru_insert(head[j]);
if(i>j) swap(int,i,j);
for(head_t h = lru_head.next; h!=lru_head; h=h.next)
{
if(h.len > i)
{
if(h.len > j)
swap(Qfloat,h.data[i],h.data[j]);
else
{
// give up
lru_delete(h);
size += h.len;
h.data = null;
h.len = 0;
}
}
}
}
}
//
// Kernel evaluation
//
// the static method k_function is for doing single kernel evaluation
// the constructor of Kernel prepares to calculate the l*l kernel matrix
// the member function get_Q is for getting one column from the Q Matrix
//
abstract class QMatrix {
abstract Qfloat[] get_Q(int column, int len);
abstract double[] get_QD();
abstract void swap_index(int i, int j);
};
abstract class Kernel extends QMatrix {
private svm_node[][] x;
private final double[] x_square;
// svm_parameter
private final int kernel_type;
private final int degree;
private final double gamma;
private final double coef0;
abstract Qfloat[] get_Q(int column, int len);
abstract double[] get_QD();
void swap_index(int i, int j)
{
swap(svm_node[],x[i],x[j]);
if(x_square != null) swap(double,x_square[i],x_square[j]);
}
private static double powi(double base, int times)
{
double tmp = base, ret = 1.0;
for(int t=times; t>0; t/=2)
{
if(t%2==1) ret*=tmp;
tmp = tmp * tmp;
}
return ret;
}
double kernel_function(int i, int j)
{
switch(kernel_type)
{
case svm_parameter.LINEAR:
return dot(x[i],x[j]);
case svm_parameter.POLY:
return powi(gamma*dot(x[i],x[j])+coef0,degree);
case svm_parameter.RBF:
return Math.exp(-gamma*(x_square[i]+x_square[j]-2*dot(x[i],x[j])));
case svm_parameter.SIGMOID:
return Math.tanh(gamma*dot(x[i],x[j])+coef0);
case svm_parameter.PRECOMPUTED:
return x[i][(int)(x[j][0].value)].value;
default:
return 0; // java
}
}
Kernel(int l, svm_node[][] x_, svm_parameter param)
{
this.kernel_type = param.kernel_type;
this.degree = param.degree;
this.gamma = param.gamma;
this.coef0 = param.coef0;
x = (svm_node[][])x_.clone();
if(kernel_type == svm_parameter.RBF)
{
x_square = new double[l];
for(int i=0;i y[j].index)
++j;
else
++i;
}
}
return sum;
}
static double k_function(svm_node[] x, svm_node[] y,
svm_parameter param)
{
switch(param.kernel_type)
{
case svm_parameter.LINEAR:
return dot(x,y);
case svm_parameter.POLY:
return powi(param.gamma*dot(x,y)+param.coef0,param.degree);
case svm_parameter.RBF:
{
double sum = 0;
int xlen = x.length;
int ylen = y.length;
int i = 0;
int j = 0;
while(i < xlen && j < ylen)
{
if(x[i].index == y[j].index)
{
double d = x[i++].value - y[j++].value;
sum += d*d;
}
else if(x[i].index > y[j].index)
{
sum += y[j].value * y[j].value;
++j;
}
else
{
sum += x[i].value * x[i].value;
++i;
}
}
while(i < xlen)
{
sum += x[i].value * x[i].value;
++i;
}
while(j < ylen)
{
sum += y[j].value * y[j].value;
++j;
}
return Math.exp(-param.gamma*sum);
}
case svm_parameter.SIGMOID:
return Math.tanh(param.gamma*dot(x,y)+param.coef0);
case svm_parameter.PRECOMPUTED:
return x[(int)(y[0].value)].value;
default:
return 0; // java
}
}
}
// An SMO algorithm in Fan et al., JMLR 6(2005), p. 1889--1918
// Solves:
//
// min 0.5(\alpha^T Q \alpha) + p^T \alpha
//
// y^T \alpha = \delta
// y_i = +1 or -1
// 0 <= alpha_i <= Cp for y_i = 1
// 0 <= alpha_i <= Cn for y_i = -1
//
// Given:
//
// Q, p, y, Cp, Cn, and an initial feasible point \alpha
// l is the size of vectors and matrices
// eps is the stopping tolerance
//
// solution will be put in \alpha, objective value will be put in obj
//
class Solver {
int active_size;
byte[] y;
double[] G; // gradient of objective function
static final byte LOWER_BOUND = 0;
static final byte UPPER_BOUND = 1;
static final byte FREE = 2;
byte[] alpha_status; // LOWER_BOUND, UPPER_BOUND, FREE
double[] alpha;
QMatrix Q;
double[] QD;
double eps;
double Cp,Cn;
double[] p;
int[] active_set;
double[] G_bar; // gradient, if we treat free variables as 0
int l;
boolean unshrink; // XXX
static final double INF = java.lang.Double.POSITIVE_INFINITY;
double get_C(int i)
{
return (y[i] > 0)? Cp : Cn;
}
void update_alpha_status(int i)
{
if(alpha[i] >= get_C(i))
alpha_status[i] = UPPER_BOUND;
else if(alpha[i] <= 0)
alpha_status[i] = LOWER_BOUND;
else alpha_status[i] = FREE;
}
boolean is_upper_bound(int i) { return alpha_status[i] == UPPER_BOUND; }
boolean is_lower_bound(int i) { return alpha_status[i] == LOWER_BOUND; }
boolean is_free(int i) { return alpha_status[i] == FREE; }
// java: information about solution except alpha,
// because we cannot return multiple values otherwise...
static class SolutionInfo {
double obj;
double rho;
double upper_bound_p;
double upper_bound_n;
double r; // for Solver_NU
}
void swap_index(int i, int j)
{
Q.swap_index(i,j);
swap(byte, y[i],y[j]);
swap(double, G[i],G[j]);
swap(byte, alpha_status[i],alpha_status[j]);
swap(double, alpha[i],alpha[j]);
swap(double, p[i],p[j]);
swap(int, active_set[i],active_set[j]);
swap(double, G_bar[i],G_bar[j]);
}
void reconstruct_gradient()
{
// reconstruct inactive elements of G from G_bar and free variables
if(active_size == l) return;
int i,j;
int nr_free = 0;
for(j=active_size;j 2*active_size*(l-active_size))
{
for(i=active_size;iInteger.MAX_VALUE/100 ? Integer.MAX_VALUE : 100*l);
int counter = Math.min(l,1000)+1;
int[] working_set = new int[2];
while(iter < max_iter)
{
// show progress and do shrinking
if(--counter == 0)
{
counter = Math.min(l,1000);
if(shrinking!=0) do_shrinking();
svm.info(".");
}
if(select_working_set(working_set)!=0)
{
// reconstruct the whole gradient
reconstruct_gradient();
// reset active set size and check
active_size = l;
svm.info("*");
if(select_working_set(working_set)!=0)
break;
else
counter = 1; // do shrinking next iteration
}
int i = working_set[0];
int j = working_set[1];
++iter;
// update alpha[i] and alpha[j], handle bounds carefully
Qfloat[] Q_i = Q.get_Q(i,active_size);
Qfloat[] Q_j = Q.get_Q(j,active_size);
double C_i = get_C(i);
double C_j = get_C(j);
double old_alpha_i = alpha[i];
double old_alpha_j = alpha[j];
if(y[i]!=y[j])
{
double quad_coef = QD[i]+QD[j]+2*Q_i[j];
if (quad_coef <= 0)
quad_coef = TAU;
double delta = (-G[i]-G[j])/quad_coef;
double diff = alpha[i] - alpha[j];
alpha[i] += delta;
alpha[j] += delta;
if(diff > 0)
{
if(alpha[j] < 0)
{
alpha[j] = 0;
alpha[i] = diff;
}
}
else
{
if(alpha[i] < 0)
{
alpha[i] = 0;
alpha[j] = -diff;
}
}
if(diff > C_i - C_j)
{
if(alpha[i] > C_i)
{
alpha[i] = C_i;
alpha[j] = C_i - diff;
}
}
else
{
if(alpha[j] > C_j)
{
alpha[j] = C_j;
alpha[i] = C_j + diff;
}
}
}
else
{
double quad_coef = QD[i]+QD[j]-2*Q_i[j];
if (quad_coef <= 0)
quad_coef = TAU;
double delta = (G[i]-G[j])/quad_coef;
double sum = alpha[i] + alpha[j];
alpha[i] -= delta;
alpha[j] += delta;
if(sum > C_i)
{
if(alpha[i] > C_i)
{
alpha[i] = C_i;
alpha[j] = sum - C_i;
}
}
else
{
if(alpha[j] < 0)
{
alpha[j] = 0;
alpha[i] = sum;
}
}
if(sum > C_j)
{
if(alpha[j] > C_j)
{
alpha[j] = C_j;
alpha[i] = sum - C_j;
}
}
else
{
if(alpha[i] < 0)
{
alpha[i] = 0;
alpha[j] = sum;
}
}
}
// update G
double delta_alpha_i = alpha[i] - old_alpha_i;
double delta_alpha_j = alpha[j] - old_alpha_j;
for(int k=0;k= max_iter)
{
if(active_size < l)
{
// reconstruct the whole gradient to calculate objective value
reconstruct_gradient();
active_size = l;
svm.info("*");
}
System.err.print("\nWARNING: reaching max number of iterations\n");
}
// calculate rho
si.rho = calculate_rho();
// calculate objective value
{
double v = 0;
int i;
for(i=0;i= Gmax)
{
Gmax = -G[t];
Gmax_idx = t;
}
}
else
{
if(!is_lower_bound(t))
if(G[t] >= Gmax)
{
Gmax = G[t];
Gmax_idx = t;
}
}
int i = Gmax_idx;
Qfloat[] Q_i = null;
if(i != -1) // null Q_i not accessed: Gmax=-INF if i=-1
Q_i = Q.get_Q(i,active_size);
for(int j=0;j= Gmax2)
Gmax2 = G[j];
if (grad_diff > 0)
{
double obj_diff;
double quad_coef = QD[i]+QD[j]-2.0*y[i]*Q_i[j];
if (quad_coef > 0)
obj_diff = -(grad_diff*grad_diff)/quad_coef;
else
obj_diff = -(grad_diff*grad_diff)/TAU;
if (obj_diff <= obj_diff_min)
{
Gmin_idx=j;
obj_diff_min = obj_diff;
}
}
}
}
else
{
if (!is_upper_bound(j))
{
double grad_diff= Gmax-G[j];
if (-G[j] >= Gmax2)
Gmax2 = -G[j];
if (grad_diff > 0)
{
double obj_diff;
double quad_coef = QD[i]+QD[j]+2.0*y[i]*Q_i[j];
if (quad_coef > 0)
obj_diff = -(grad_diff*grad_diff)/quad_coef;
else
obj_diff = -(grad_diff*grad_diff)/TAU;
if (obj_diff <= obj_diff_min)
{
Gmin_idx=j;
obj_diff_min = obj_diff;
}
}
}
}
}
if(Gmax+Gmax2 < eps || Gmin_idx == -1)
return 1;
working_set[0] = Gmax_idx;
working_set[1] = Gmin_idx;
return 0;
}
private boolean be_shrunk(int i, double Gmax1, double Gmax2)
{
if(is_upper_bound(i))
{
if(y[i]==+1)
return(-G[i] > Gmax1);
else
return(-G[i] > Gmax2);
}
else if(is_lower_bound(i))
{
if(y[i]==+1)
return(G[i] > Gmax2);
else
return(G[i] > Gmax1);
}
else
return(false);
}
void do_shrinking()
{
int i;
double Gmax1 = -INF; // max { -y_i * grad(f)_i | i in I_up(\alpha) }
double Gmax2 = -INF; // max { y_i * grad(f)_i | i in I_low(\alpha) }
// find maximal violating pair first
for(i=0;i= Gmax1)
Gmax1 = -G[i];
}
if(!is_lower_bound(i))
{
if(G[i] >= Gmax2)
Gmax2 = G[i];
}
}
else
{
if(!is_upper_bound(i))
{
if(-G[i] >= Gmax2)
Gmax2 = -G[i];
}
if(!is_lower_bound(i))
{
if(G[i] >= Gmax1)
Gmax1 = G[i];
}
}
}
if(unshrink == false && Gmax1 + Gmax2 <= eps*10)
{
unshrink = true;
reconstruct_gradient();
active_size = l;
}
for(i=0;i i)
{
if (!be_shrunk(active_size, Gmax1, Gmax2))
{
swap_index(i,active_size);
break;
}
active_size--;
}
}
}
double calculate_rho()
{
double r;
int nr_free = 0;
double ub = INF, lb = -INF, sum_free = 0;
for(int i=0;i 0)
ub = Math.min(ub,yG);
else
lb = Math.max(lb,yG);
}
else if(is_upper_bound(i))
{
if(y[i] < 0)
ub = Math.min(ub,yG);
else
lb = Math.max(lb,yG);
}
else
{
++nr_free;
sum_free += yG;
}
}
if(nr_free>0)
r = sum_free/nr_free;
else
r = (ub+lb)/2;
return r;
}
}
//
// Solver for nu-svm classification and regression
//
// additional constraint: e^T \alpha = constant
//
final class Solver_NU extends Solver
{
private SolutionInfo si;
void Solve(int l, QMatrix Q, double[] p, byte[] y,
double[] alpha, double Cp, double Cn, double eps,
SolutionInfo si, int shrinking)
{
this.si = si;
super.Solve(l,Q,p,y,alpha,Cp,Cn,eps,si,shrinking);
}
// return 1 if already optimal, return 0 otherwise
int select_working_set(int[] working_set)
{
// return i,j such that y_i = y_j and
// i: maximizes -y_i * grad(f)_i, i in I_up(\alpha)
// j: minimizes the decrease of obj value
// (if quadratic coefficeint <= 0, replace it with tau)
// -y_j*grad(f)_j < -y_i*grad(f)_i, j in I_low(\alpha)
double Gmaxp = -INF;
double Gmaxp2 = -INF;
int Gmaxp_idx = -1;
double Gmaxn = -INF;
double Gmaxn2 = -INF;
int Gmaxn_idx = -1;
int Gmin_idx = -1;
double obj_diff_min = INF;
for(int t=0;t= Gmaxp)
{
Gmaxp = -G[t];
Gmaxp_idx = t;
}
}
else
{
if(!is_lower_bound(t))
if(G[t] >= Gmaxn)
{
Gmaxn = G[t];
Gmaxn_idx = t;
}
}
int ip = Gmaxp_idx;
int in = Gmaxn_idx;
Qfloat[] Q_ip = null;
Qfloat[] Q_in = null;
if(ip != -1) // null Q_ip not accessed: Gmaxp=-INF if ip=-1
Q_ip = Q.get_Q(ip,active_size);
if(in != -1)
Q_in = Q.get_Q(in,active_size);
for(int j=0;j= Gmaxp2)
Gmaxp2 = G[j];
if (grad_diff > 0)
{
double obj_diff;
double quad_coef = QD[ip]+QD[j]-2*Q_ip[j];
if (quad_coef > 0)
obj_diff = -(grad_diff*grad_diff)/quad_coef;
else
obj_diff = -(grad_diff*grad_diff)/TAU;
if (obj_diff <= obj_diff_min)
{
Gmin_idx=j;
obj_diff_min = obj_diff;
}
}
}
}
else
{
if (!is_upper_bound(j))
{
double grad_diff=Gmaxn-G[j];
if (-G[j] >= Gmaxn2)
Gmaxn2 = -G[j];
if (grad_diff > 0)
{
double obj_diff;
double quad_coef = QD[in]+QD[j]-2*Q_in[j];
if (quad_coef > 0)
obj_diff = -(grad_diff*grad_diff)/quad_coef;
else
obj_diff = -(grad_diff*grad_diff)/TAU;
if (obj_diff <= obj_diff_min)
{
Gmin_idx=j;
obj_diff_min = obj_diff;
}
}
}
}
}
if(Math.max(Gmaxp+Gmaxp2,Gmaxn+Gmaxn2) < eps || Gmin_idx == -1)
return 1;
if(y[Gmin_idx] == +1)
working_set[0] = Gmaxp_idx;
else
working_set[0] = Gmaxn_idx;
working_set[1] = Gmin_idx;
return 0;
}
private boolean be_shrunk(int i, double Gmax1, double Gmax2, double Gmax3, double Gmax4)
{
if(is_upper_bound(i))
{
if(y[i]==+1)
return(-G[i] > Gmax1);
else
return(-G[i] > Gmax4);
}
else if(is_lower_bound(i))
{
if(y[i]==+1)
return(G[i] > Gmax2);
else
return(G[i] > Gmax3);
}
else
return(false);
}
void do_shrinking()
{
double Gmax1 = -INF; // max { -y_i * grad(f)_i | y_i = +1, i in I_up(\alpha) }
double Gmax2 = -INF; // max { y_i * grad(f)_i | y_i = +1, i in I_low(\alpha) }
double Gmax3 = -INF; // max { -y_i * grad(f)_i | y_i = -1, i in I_up(\alpha) }
double Gmax4 = -INF; // max { y_i * grad(f)_i | y_i = -1, i in I_low(\alpha) }
// find maximal violating pair first
int i;
for(i=0;i Gmax1) Gmax1 = -G[i];
}
else if(-G[i] > Gmax4) Gmax4 = -G[i];
}
if(!is_lower_bound(i))
{
if(y[i]==+1)
{
if(G[i] > Gmax2) Gmax2 = G[i];
}
else if(G[i] > Gmax3) Gmax3 = G[i];
}
}
if(unshrink == false && Math.max(Gmax1+Gmax2,Gmax3+Gmax4) <= eps*10)
{
unshrink = true;
reconstruct_gradient();
active_size = l;
}
for(i=0;i i)
{
if (!be_shrunk(active_size, Gmax1, Gmax2, Gmax3, Gmax4))
{
swap_index(i,active_size);
break;
}
active_size--;
}
}
}
double calculate_rho()
{
int nr_free1 = 0,nr_free2 = 0;
double ub1 = INF, ub2 = INF;
double lb1 = -INF, lb2 = -INF;
double sum_free1 = 0, sum_free2 = 0;
for(int i=0;i 0)
r1 = sum_free1/nr_free1;
else
r1 = (ub1+lb1)/2;
if(nr_free2 > 0)
r2 = sum_free2/nr_free2;
else
r2 = (ub2+lb2)/2;
si.r = (r1+r2)/2;
return (r1-r2)/2;
}
}
//
// Q matrices for various formulations
//
class SVC_Q extends Kernel
{
private final byte[] y;
private final Cache cache;
private final double[] QD;
SVC_Q(svm_problem prob, svm_parameter param, byte[] y_)
{
super(prob.l, prob.x, param);
y = (byte[])y_.clone();
cache = new Cache(prob.l,(long)(param.cache_size*(1<<20)));
QD = new double[prob.l];
for(int i=0;i 0) y[i] = +1; else y[i] = -1;
}
Solver s = new Solver();
s.Solve(l, new SVC_Q(prob,param,y), minus_ones, y,
alpha, Cp, Cn, param.eps, si, param.shrinking);
double sum_alpha=0;
for(i=0;i0)
y[i] = +1;
else
y[i] = -1;
double sum_pos = nu*l/2;
double sum_neg = nu*l/2;
for(i=0;i 0)
{
++nSV;
if(prob.y[i] > 0)
{
if(Math.abs(alpha[i]) >= si.upper_bound_p)
++nBSV;
}
else
{
if(Math.abs(alpha[i]) >= si.upper_bound_n)
++nBSV;
}
}
}
svm.info("nSV = "+nSV+", nBSV = "+nBSV+"\n");
decision_function f = new decision_function();
f.alpha = alpha;
f.rho = si.rho;
return f;
}
// Platt's binary SVM Probablistic Output: an improvement from Lin et al.
private static void sigmoid_train(int l, double[] dec_values, double[] labels,
double[] probAB)
{
double A, B;
double prior1=0, prior0 = 0;
int i;
for (i=0;i 0) prior1+=1;
else prior0+=1;
int max_iter=100; // Maximal number of iterations
double min_step=1e-10; // Minimal step taken in line search
double sigma=1e-12; // For numerically strict PD of Hessian
double eps=1e-5;
double hiTarget=(prior1+1.0)/(prior1+2.0);
double loTarget=1/(prior0+2.0);
double[] t= new double[l];
double fApB,p,q,h11,h22,h21,g1,g2,det,dA,dB,gd,stepsize;
double newA,newB,newf,d1,d2;
int iter;
// Initial Point and Initial Fun Value
A=0.0; B=Math.log((prior0+1.0)/(prior1+1.0));
double fval = 0.0;
for (i=0;i0) t[i]=hiTarget;
else t[i]=loTarget;
fApB = dec_values[i]*A+B;
if (fApB>=0)
fval += t[i]*fApB + Math.log(1+Math.exp(-fApB));
else
fval += (t[i] - 1)*fApB +Math.log(1+Math.exp(fApB));
}
for (iter=0;iter= 0)
{
p=Math.exp(-fApB)/(1.0+Math.exp(-fApB));
q=1.0/(1.0+Math.exp(-fApB));
}
else
{
p=1.0/(1.0+Math.exp(fApB));
q=Math.exp(fApB)/(1.0+Math.exp(fApB));
}
d2=p*q;
h11+=dec_values[i]*dec_values[i]*d2;
h22+=d2;
h21+=dec_values[i]*d2;
d1=t[i]-p;
g1+=dec_values[i]*d1;
g2+=d1;
}
// Stopping Criteria
if (Math.abs(g1)= min_step)
{
newA = A + stepsize * dA;
newB = B + stepsize * dB;
// New function value
newf = 0.0;
for (i=0;i= 0)
newf += t[i]*fApB + Math.log(1+Math.exp(-fApB));
else
newf += (t[i] - 1)*fApB +Math.log(1+Math.exp(fApB));
}
// Check sufficient decrease
if (newf=max_iter)
svm.info("Reaching maximal iterations in two-class probability estimates\n");
probAB[0]=A;probAB[1]=B;
}
private static double sigmoid_predict(double decision_value, double A, double B)
{
double fApB = decision_value*A+B;
if (fApB >= 0)
return Math.exp(-fApB)/(1.0+Math.exp(-fApB));
else
return 1.0/(1+Math.exp(fApB)) ;
}
// Method 2 from the multiclass_prob paper by Wu, Lin, and Weng
private static void multiclass_probability(int k, double[][] r, double[] p)
{
int t,j;
int iter = 0, max_iter=Math.max(100,k);
double[][] Q=new double[k][k];
double[] Qp=new double[k];
double pQp, eps=0.005/k;
for (t=0;tmax_error)
max_error=error;
}
if (max_error=max_iter)
svm.info("Exceeds max_iter in multiclass_prob\n");
}
// Cross-validation decision values for probability estimates
private static void svm_binary_svc_probability(svm_problem prob, svm_parameter param, double Cp, double Cn, double[] probAB)
{
int i;
int nr_fold = 5;
int[] perm = new int[prob.l];
double[] dec_values = new double[prob.l];
// random shuffle
for(i=0;i0)
p_count++;
else
n_count++;
if(p_count==0 && n_count==0)
for(j=begin;j 0 && n_count == 0)
for(j=begin;j 0)
for(j=begin;j 5*std)
count=count+1;
else
mae+=Math.abs(ymv[i]);
mae /= (prob.l-count);
svm.info("Prob. model for test data: target value = predicted value + z,\nz: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma="+mae+"\n");
return mae;
}
// label: label name, start: begin of each class, count: #data of classes, perm: indices to the original data
// perm, length l, must be allocated before calling this subroutine
private static void svm_group_classes(svm_problem prob, int[] nr_class_ret, int[][] label_ret, int[][] start_ret, int[][] count_ret, int[] perm)
{
int l = prob.l;
int max_nr_class = 16;
int nr_class = 0;
int[] label = new int[max_nr_class];
int[] count = new int[max_nr_class];
int[] data_label = new int[l];
int i;
for(i=0;i 0) ++nSV;
model.l = nSV;
model.SV = new svm_node[nSV][];
model.sv_coef[0] = new double[nSV];
model.sv_indices = new int[nSV];
int j = 0;
for(i=0;i 0)
{
model.SV[j] = prob.x[i];
model.sv_coef[0][j] = f.alpha[i];
model.sv_indices[j] = i+1;
++j;
}
}
else
{
// classification
int l = prob.l;
int[] tmp_nr_class = new int[1];
int[][] tmp_label = new int[1][];
int[][] tmp_start = new int[1][];
int[][] tmp_count = new int[1][];
int[] perm = new int[l];
// group training data of the same class
svm_group_classes(prob,tmp_nr_class,tmp_label,tmp_start,tmp_count,perm);
int nr_class = tmp_nr_class[0];
int[] label = tmp_label[0];
int[] start = tmp_start[0];
int[] count = tmp_count[0];
if(nr_class == 1)
svm.info("WARNING: training data in only one class. See README for details.\n");
svm_node[][] x = new svm_node[l][];
int i;
for(i=0;i 0)
nonzero[si+k] = true;
for(k=0;k 0)
nonzero[sj+k] = true;
++p;
}
// build output
model.nr_class = nr_class;
model.label = new int[nr_class];
for(i=0;i some folds may have zero elements
if((param.svm_type == svm_parameter.C_SVC ||
param.svm_type == svm_parameter.NU_SVC) && nr_fold < l)
{
int[] tmp_nr_class = new int[1];
int[][] tmp_label = new int[1][];
int[][] tmp_start = new int[1][];
int[][] tmp_count = new int[1][];
svm_group_classes(prob,tmp_nr_class,tmp_label,tmp_start,tmp_count,perm);
int nr_class = tmp_nr_class[0];
int[] start = tmp_start[0];
int[] count = tmp_count[0];
// random shuffle and then data grouped by fold using the array perm
int[] fold_count = new int[nr_fold];
int c;
int[] index = new int[l];
for(i=0;i0)?1:-1;
else
return sum;
}
else
{
int nr_class = model.nr_class;
int l = model.l;
double[] kvalue = new double[l];
for(i=0;i 0)
++vote[i];
else
++vote[j];
p++;
}
int vote_max_idx = 0;
for(i=1;i vote[vote_max_idx])
vote_max_idx = i;
return model.label[vote_max_idx];
}
}
public static double svm_predict(svm_model model, svm_node[] x)
{
int nr_class = model.nr_class;
double[] dec_values;
if(model.param.svm_type == svm_parameter.ONE_CLASS ||
model.param.svm_type == svm_parameter.EPSILON_SVR ||
model.param.svm_type == svm_parameter.NU_SVR)
dec_values = new double[1];
else
dec_values = new double[nr_class*(nr_class-1)/2];
double pred_result = svm_predict_values(model, x, dec_values);
return pred_result;
}
public static double svm_predict_probability(svm_model model, svm_node[] x, double[] prob_estimates)
{
if ((model.param.svm_type == svm_parameter.C_SVC || model.param.svm_type == svm_parameter.NU_SVC) &&
model.probA!=null && model.probB!=null)
{
int i;
int nr_class = model.nr_class;
double[] dec_values = new double[nr_class*(nr_class-1)/2];
svm_predict_values(model, x, dec_values);
double min_prob=1e-7;
double[][] pairwise_prob=new double[nr_class][nr_class];
int k=0;
for(i=0;i prob_estimates[prob_max_idx])
prob_max_idx = i;
return model.label[prob_max_idx];
}
else
return svm_predict(model, x);
}
static final String svm_type_table[] =
{
"c_svc","nu_svc","one_class","epsilon_svr","nu_svr",
};
static final String kernel_type_table[]=
{
"linear","polynomial","rbf","sigmoid","precomputed"
};
public static void svm_save_model(String model_file_name, svm_model model) throws IOException
{
DataOutputStream fp = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(model_file_name)));
svm_parameter param = model.param;
fp.writeBytes("svm_type "+svm_type_table[param.svm_type]+"\n");
fp.writeBytes("kernel_type "+kernel_type_table[param.kernel_type]+"\n");
if(param.kernel_type == svm_parameter.POLY)
fp.writeBytes("degree "+param.degree+"\n");
if(param.kernel_type == svm_parameter.POLY ||
param.kernel_type == svm_parameter.RBF ||
param.kernel_type == svm_parameter.SIGMOID)
fp.writeBytes("gamma "+param.gamma+"\n");
if(param.kernel_type == svm_parameter.POLY ||
param.kernel_type == svm_parameter.SIGMOID)
fp.writeBytes("coef0 "+param.coef0+"\n");
int nr_class = model.nr_class;
int l = model.l;
fp.writeBytes("nr_class "+nr_class+"\n");
fp.writeBytes("total_sv "+l+"\n");
{
fp.writeBytes("rho");
for(int i=0;i 1)
return "nu <= 0 or nu > 1";
if(svm_type == svm_parameter.EPSILON_SVR)
if(param.p < 0)
return "p < 0";
if(param.shrinking != 0 &&
param.shrinking != 1)
return "shrinking != 0 and shrinking != 1";
if(param.probability != 0 &&
param.probability != 1)
return "probability != 0 and probability != 1";
if(param.probability == 1 &&
svm_type == svm_parameter.ONE_CLASS)
return "one-class SVM probability output not supported yet";
// check whether nu-svc is feasible
if(svm_type == svm_parameter.NU_SVC)
{
int l = prob.l;
int max_nr_class = 16;
int nr_class = 0;
int[] label = new int[max_nr_class];
int[] count = new int[max_nr_class];
int i;
for(i=0;i Math.min(n1,n2))
return "specified nu is infeasible";
}
}
}
return null;
}
public static int svm_check_probability_model(svm_model model)
{
if (((model.param.svm_type == svm_parameter.C_SVC || model.param.svm_type == svm_parameter.NU_SVC) &&
model.probA!=null && model.probB!=null) ||
((model.param.svm_type == svm_parameter.EPSILON_SVR || model.param.svm_type == svm_parameter.NU_SVR) &&
model.probA!=null))
return 1;
else
return 0;
}
public static void svm_set_print_string_function(svm_print_interface print_func)
{
if (print_func == null)
svm_print_string = svm_print_stdout;
else
svm_print_string = print_func;
}
}