80 likes | 229 Views
A OpenCV ann_mlp.cpp testing case for bug. Eun Young(Regina ) Kim Biomedical Eng. Dept. Univ. of Iowa. //XOR input double in[]={ 0 ,0, 1, 0, 0, 1, 1, 1}; double out[]={ 0, 1, 1,
E N D
A OpenCVann_mlp.cpp testing case for bug EunYoung(Regina) Kim Biomedical Eng. Dept. Univ. of Iowa
//XOR input double in[]={ 0 ,0, 1, 0, 0, 1, 1, 1}; double out[]={ 0, 1, 1, 0}; const int IVS = 2; // Input Vector Size const int OVS = 1; // Output Vector Size const int HN = 3; // Number of Hidden nodes const int NV= 4; //Number of Training Vector const inttotal_iteration =10; int layer[] = { IVS, HN, OVS}; … … CvANN_MLPtrain_model(layersize, CvANN_MLP::SIGMOID_SYM,1,1); std::cout<< " =========== =========== =========== =========== ==========="<<std::endl; std::cout<< " * "<<1<<"th Iteration with initialzation of weights"<<std::endl; std::cout<< " =========== =========== =========== =========== ==========="<<std::endl; intiter = train_model.train( input, output, NULL, 0, CvANN_MLP_TrainParams( cvTermCriteria ( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 10000,0.000000001), CvANN_MLP_TrainParams::RPROP, 1,1), 0 //+ CvANN_MLP::NO_OUTPUT_SCALE ); std::cout << " * iteration :"<<iter<<std::endl; train_model.predict( input, p_output ); Input Output Pair for Training Initial Training with initialization of weights
for(inti = 1;i<total_iteration;i++){ std::cout<< " =========== =========== =========== =========== ==========="<<std::endl; std::cout<< " * "<<i+1<<"th Iteration with initialzation of weights"<<std::endl; std::cout<< " =========== =========== =========== =========== ==========="<<std::endl; intiter = train_model.train( input, output, NULL, 0, CvANN_MLP_TrainParams( cvTermCriteria ( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 10000,0.000000001), CvANN_MLP_TrainParams::RPROP, 1,1), 0 //+ CvANN_MLP::NO_OUTPUT_SCALE + CvANN_MLP::UPDATE_WEIGHTS ); std::cout << " * iteration :"<<iter<<std::endl; train_model.predict( input, p_output ); for(int in=0; in<NV;in++){ std::cout<< CV_MAT_ELEM(*input,double,in,0) << " ," << CV_MAT_ELEM(*input,double,in,1) << " : " << CV_MAT_ELEM(*p_output,double,in,0) <<std::endl; } char prefix[] ="./"; char postfix[] ="model.xml"; char filename[10]; sprintf(filename,"%s_%4d_%s",prefix,i,postfix); train_model.save( filename); } Follow-upTraining with update of weights
For XOR example: Output range of [0,1] should map to [-0.95,+0.95], fixed range for opencv. Scale calculation : @ CvANN_MLP::calc_output_scale Scale : 0.526315, 0.5 Inv_scale: 1.9,-0.95 0.95 0 1.0 -0.95 This calculation is wrong!! This maps from [-0.95,+0.95] to [0,1], which is the other way around! Scale: 1.9,-0.95 Inv_scale : 0.526315, 0.5
$ ./XORTEST_EXE =========== =========== =========== =========== =========== * 1th Iteration with initialzation of weights =========== =========== =========== =========== =========== * iteration :1000 0 ,0 : -0.00967112 1 ,0 : 1.02556 0 ,1 : 0.977691 1 ,1 : 0.107821 =========== =========== =========== =========== =========== * 2th Iteration with initialzation of weights =========== =========== =========== =========== =========== OpenCV Error: One of arguments' values is out of range (Some of new output training vector components run exceed the original range too much) in CvANN_MLP::calc_output_scale, file /ipldev/scratch/eunyokim/src/OpenCV/opencv/modules/ml/src/ann_mlp.cpp, line 641 terminate called after throwing an instance of 'cv::Exception' what(): /ipldev/scratch/eunyokim/src/OpenCV/opencv/modules/ml/src/ann_mlp.cpp:641: error: (-211) Some of new output training vector components run exceed the original range too much in function CvANN_MLP::calc_output_scale Aborted Error from CvANN_MLP::calc_output_scale
Scale : 0.526315, 0.5 Inv_scale: 1.9,-0.95 Correct Calculation 0.95 0 @CvANN_MLP::calc_output_scale //Changed between scale and inv_scale double* inv_scale = weights[l_count]; double* scale = weights[l_count+1]; //change scale <-> inv_scale scale[j*2] = a; scale[j*2+1] = b; a = 1./a; b = -b*a; inv_scale[j*2] = a; inv_scale[j*2+1] = b; 1.0 -0.95 Scale: 1.9,-0.95 Inv_scale : 0.526315, 0.5
$ ./XORTEST_EXE =========== =========== =========== =========== =========== * 1th Iteration with initialzation of weights =========== =========== =========== =========== =========== * iteration :1000 0 ,0 : -0.00967112 1 ,0 : 1.02556 0 ,1 : 0.977691 1 ,1 : 0.107821 =========== =========== =========== =========== =========== * 2th Iteration with initialzation of weights =========== =========== =========== =========== =========== * iteration :1000 0 ,0 : 0.151086 1 ,0 : 1.02622 0 ,1 : 1.02324 1 ,1 : 0.530521 =========== =========== =========== =========== =========== * 3th Iteration with initialzation of weights =========== =========== =========== =========== =========== * iteration :1000 0 ,0 : 0.151086 1 ,0 : 1.02622 0 ,1 : 1.02324 1 ,1 : 0.530521 =========== =========== =========== =========== =========== * 4th Iteration with initialzation of weights =========== =========== =========== =========== =========== * iteration :1000 0 ,0 : 0.151086 1 ,0 : 1.02622 0 ,1 : 1.02324 1 ,1 : 0.530521 =========== =========== =========== =========== =========== * 5th Iteration with initialzation of weights =========== =========== =========== =========== =========== * iteration :1000 0 ,0 : 0.151086 1 ,0 : 1.02622 0 ,1 : 1.02324 1 ,1 : 0.530521 =========== =========== =========== =========== =========== * 6th Iteration with initialzation of weights =========== =========== =========== =========== =========== * iteration :1000 0 ,0 : 0.151086 1 ,0 : 1.02622 0 ,1 : 1.02324 1 ,1 : 0.530521 =========== =========== =========== =========== =========== * 7th Iteration with initialzation of weights =========== =========== =========== =========== =========== * iteration :1000 0 ,0 : 0.151086 1 ,0 : 1.02622 0 ,1 : 1.02324 1 ,1 : 0.530521 =========== =========== =========== =========== =========== * 8th Iteration with initialzation of weights =========== =========== =========== =========== =========== * iteration :1000 0 ,0 : 0.151086 1 ,0 : 1.02622 0 ,1 : 1.02324 1 ,1 : 0.530521 =========== =========== =========== =========== =========== * 9th Iteration with initialzation of weights =========== =========== =========== =========== =========== * iteration :1000 0 ,0 : 0.151086 1 ,0 : 1.02622 0 ,1 : 1.02324 1 ,1 : 0.530521 =========== =========== =========== =========== =========== * 10th Iteration with initialzation of weights =========== =========== =========== =========== =========== * iteration :1000 0 ,0 : 0.151086 1 ,0 : 1.02622 0 ,1 : 1.02324 1 ,1 : 0.530521 Work Well!!
void CvANN_MLP::calc_output_scale( const CvVectors* vecs, int flags ) { inti, j, vcount = layer_sizes->data.i[layer_sizes->cols-1]; int type = vecs->type; double m = min_val, M = max_val, m1 = min_val1, M1 = max_val1; boolreset_weights = (flags & UPDATE_WEIGHTS) == 0; boolno_scale = (flags & NO_OUTPUT_SCALE) != 0; intl_count = layer_sizes->cols; //Changed between scale and inv_scale double* inv_scale = weights[l_count]; double* scale = weights[l_count+1]; int count = vecs->count; …. if( reset_weights ) for( j = 0; j < vcount; j++ ) { // map mj..Mj to m..M double mj = scale[j*2], Mj = scale[j*2+1]; double a, b; double delta = Mj - mj; if( delta < DBL_EPSILON ) a = 1, b = (M + m - Mj - mj)*0.5; else a = (M - m)/delta, b = m - mj*a; //change scale <-> inv_scale scale[j*2] = a; scale[j*2+1] = b; a = 1./a; b = -b*a; inv_scale[j*2] = a; inv_scale[j*2+1] = b; } __END__; } Weights[l_count] and weights[l_count+1] are used as inv_scale and scale respectively through out the program Calculation formula has to be changed as well.