00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018 #include <string>
00019 #include <sstream>
00020
00021 #include <iostream>
00022 #include <fstream>
00023 #include <limits>
00024 #include <cmath>
00025
00026
00027
00028 #include "normalized_squared_error.h"
00029
00030
00031
00032 #include "../../parsers/tinyxml/tinyxml.h"
00033
00034
00035 namespace OpenNN
00036 {
00037
00038
00039
00044
00045 NormalizedSquaredError::NormalizedSquaredError(void) : PerformanceTerm()
00046 {
00047 }
00048
00049
00050
00051
00056
00057 NormalizedSquaredError::NormalizedSquaredError(NeuralNetwork* new_neural_network_pointer)
00058 : PerformanceTerm(new_neural_network_pointer)
00059 {
00060 }
00061
00062
00063
00064
00070
00071 NormalizedSquaredError::NormalizedSquaredError(DataSet* new_data_set_pointer)
00072 : PerformanceTerm(new_data_set_pointer)
00073 {
00074 }
00075
00076
00077
00078
00084
00085 NormalizedSquaredError::NormalizedSquaredError(NeuralNetwork* new_neural_network_pointer, DataSet* new_data_set_pointer)
00086 : PerformanceTerm(new_neural_network_pointer, new_data_set_pointer)
00087 {
00088 }
00089
00090
00091
00092
00097
00098 NormalizedSquaredError::NormalizedSquaredError(TiXmlElement* normalized_squared_error_element)
00099 : PerformanceTerm(normalized_squared_error_element)
00100 {
00101 }
00102
00103
00104
00105
00107
00108 NormalizedSquaredError::~NormalizedSquaredError(void)
00109 {
00110 }
00111
00112
00113
00114
00115
00116
00119
00120 double NormalizedSquaredError::calculate_normalization_coefficient(const Matrix<double>& target_data, const Vector<double>& target_data_mean) const
00121 {
00122 return(target_data.calculate_sum_squared_error(target_data_mean));
00123 }
00124
00125
00126
00127
00131
00132 void NormalizedSquaredError::check(void) const
00133 {
00134 std::ostringstream buffer;
00135
00136
00137
00138 if(!neural_network_pointer)
00139 {
00140 buffer << "OpenNN Exception: NormalizedquaredError class.\n"
00141 << "void check(void) const method.\n"
00142 << "Pointer to neural network is NULL.\n";
00143
00144 throw std::logic_error(buffer.str().c_str());
00145 }
00146
00147 const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
00148
00149 if(!multilayer_perceptron_pointer)
00150 {
00151 buffer << "OpenNN Exception: NormalizedquaredError class.\n"
00152 << "void check(void) const method.\n"
00153 << "Pointer to multilayer perceptron is NULL.\n";
00154
00155 throw std::logic_error(buffer.str().c_str());
00156 }
00157
00158 const unsigned int multilayer_perceptron_inputs_number = multilayer_perceptron_pointer->count_inputs_number();
00159 const unsigned int multilayer_perceptron_outputs_number = multilayer_perceptron_pointer->count_outputs_number();
00160
00161 if(multilayer_perceptron_inputs_number == 0)
00162 {
00163 buffer << "OpenNN Exception: NormalizedquaredError class.\n"
00164 << "void check(void) const method.\n"
00165 << "Number of inputs in multilayer perceptron object is zero.\n";
00166
00167 throw std::logic_error(buffer.str().c_str());
00168 }
00169
00170 if(multilayer_perceptron_outputs_number == 0)
00171 {
00172 buffer << "OpenNN Exception: NormalizedquaredError class.\n"
00173 << "void check(void) const method.\n"
00174 << "Number of outputs in multilayer perceptron object is zero.\n";
00175
00176 throw std::logic_error(buffer.str().c_str());
00177 }
00178
00179
00180
00181 if(!data_set_pointer)
00182 {
00183 buffer << "OpenNN Exception: NormalizedquaredError class.\n"
00184 << "void check(void) const method.\n"
00185 << "Pointer to data set is NULL.\n";
00186
00187 throw std::logic_error(buffer.str().c_str());
00188 }
00189
00190
00191
00192 const VariablesInformation& variables_information = data_set_pointer->get_variables_information();
00193
00194 const unsigned int data_set_inputs_number = variables_information.count_inputs_number();
00195 const unsigned int data_set_targets_number = variables_information.count_targets_number();
00196
00197 if(multilayer_perceptron_inputs_number != data_set_inputs_number)
00198 {
00199 buffer << "OpenNN Exception: NormalizedquaredError class.\n"
00200 << "void check(void) const method.\n"
00201 << "Number of inputs in multilayer perceptron (" << multilayer_perceptron_inputs_number << ") must be equal to number of inputs in data set (" << data_set_inputs_number << ").\n";
00202
00203 throw std::logic_error(buffer.str().c_str());
00204 }
00205
00206 if(multilayer_perceptron_outputs_number != data_set_targets_number)
00207 {
00208 buffer << "OpenNN Exception: NormalizedquaredError class.\n"
00209 << "void check(void) const method.\n"
00210 << "Number of outputs in multilayer perceptron (" << multilayer_perceptron_outputs_number << ") must be equal to number of targets in data set (" << data_set_targets_number << ").\n";
00211
00212 throw std::logic_error(buffer.str().c_str());
00213 }
00214 }
00215
00216
00217
00218
00221
00222 double NormalizedSquaredError::calculate_evaluation(void) const
00223 {
00224
00225
00226 #ifdef _DEBUG
00227
00228 check();
00229
00230 #endif
00231
00232
00233
00234 const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
00235
00236 const unsigned int inputs_number = multilayer_perceptron_pointer->count_inputs_number();
00237 const unsigned int outputs_number = multilayer_perceptron_pointer->count_outputs_number();
00238
00239
00240
00241 const InstancesInformation& instances_information = data_set_pointer->get_instances_information();
00242
00243 const unsigned int training_instances_number = instances_information.count_training_instances_number();
00244
00245 const Vector<double> training_target_data_mean = data_set_pointer->calculate_training_target_data_mean();
00246
00247
00248
00249 Vector<double> inputs(inputs_number);
00250 Vector<double> outputs(outputs_number);
00251 Vector<double> targets(outputs_number);
00252
00253 double sum_squared_error = 0.0;
00254 double normalization_coefficient = 0.0;
00255
00256 for(unsigned int i = 0; i < training_instances_number; i++)
00257 {
00258
00259
00260 inputs = data_set_pointer->get_training_input_instance(i);
00261
00262
00263
00264 outputs = multilayer_perceptron_pointer->calculate_outputs(inputs);
00265
00266
00267
00268 targets = data_set_pointer->get_training_target_instance(i);
00269
00270
00271
00272 sum_squared_error += outputs.calculate_sum_squared_error(targets);
00273
00274
00275
00276 normalization_coefficient += targets.calculate_sum_squared_error(training_target_data_mean);
00277 }
00278
00279 if(normalization_coefficient < 1.0e-99)
00280 {
00281 std::ostringstream buffer;
00282
00283 buffer << "OpenNN Exception: NormalizedSquaredError class.\n"
00284 << "double calculate_evaluation(void) const method.\n"
00285 << "Normalization coefficient is zero.\n";
00286
00287 throw std::logic_error(buffer.str().c_str());
00288 }
00289
00290 return(sum_squared_error/normalization_coefficient);
00291 }
00292
00293
00294
00295
00299
00300 double NormalizedSquaredError::calculate_evaluation(const Vector<double>& parameters) const
00301 {
00302
00303
00304 #ifdef _DEBUG
00305
00306 check();
00307
00308 #endif
00309
00310 #ifdef _DEBUG
00311
00312 std::ostringstream buffer;
00313
00314 const unsigned int size = parameters.size();
00315
00316 const unsigned int parameters_number = neural_network_pointer->count_parameters_number();
00317
00318 if(size != parameters_number)
00319 {
00320 buffer << "OpenNN Exception: NormalizedSquaredError class.\n"
00321 << "double calculate_evaluation(const Vector<double>&) method.\n"
00322 << "Size (" << size << ") must be equal to number of parameters (" << parameters_number << ").\n";
00323
00324 throw std::logic_error(buffer.str().c_str());
00325 }
00326
00327 #endif
00328
00329 NeuralNetwork neural_network_copy(*neural_network_pointer);
00330
00331 neural_network_copy.set_parameters(parameters);
00332
00333 NormalizedSquaredError normalized_squared_error_copy(*this);
00334
00335 normalized_squared_error_copy.set_neural_network_pointer(&neural_network_copy);
00336
00337 return(normalized_squared_error_copy.calculate_evaluation());
00338 }
00339
00340
00341
00342
00343 double NormalizedSquaredError::calculate_generalization_evaluation(void) const
00344 {
00345
00346
00347 #ifdef _DEBUG
00348
00349 check();
00350
00351 #endif
00352
00353
00354
00355 const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
00356
00357 const unsigned int inputs_number = multilayer_perceptron_pointer->count_inputs_number();
00358 const unsigned int outputs_number = multilayer_perceptron_pointer->count_outputs_number();
00359
00360 const InstancesInformation& instances_information = data_set_pointer->get_instances_information();
00361
00362 const unsigned int generalization_instances_number = instances_information.count_generalization_instances_number();
00363
00364 if(generalization_instances_number < 2)
00365 {
00366 return(0.0);
00367 }
00368
00369 const Vector<double> generalization_target_data_mean = data_set_pointer->calculate_generalization_target_data_mean();
00370
00371 Vector<double> inputs(inputs_number);
00372 Vector<double> outputs(outputs_number);
00373 Vector<double> targets(outputs_number);
00374
00375 double sum_squared_error = 0.0;
00376 double normalization_coefficient = 0.0;
00377
00378 for(unsigned int i = 0; i < generalization_instances_number; i++)
00379 {
00380
00381
00382 inputs = data_set_pointer->get_generalization_input_instance(i);
00383
00384
00385
00386 outputs = multilayer_perceptron_pointer->calculate_outputs(inputs);
00387
00388
00389
00390 targets = data_set_pointer->get_generalization_target_instance(i);
00391
00392
00393
00394 sum_squared_error += outputs.calculate_sum_squared_error(targets);
00395
00396
00397
00398 normalization_coefficient += targets.calculate_sum_squared_error(generalization_target_data_mean);
00399 }
00400
00401 if(normalization_coefficient < 1.0e-99)
00402 {
00403 std::ostringstream buffer;
00404
00405 buffer << "OpenNN Exception: NormalizedSquaredError class.\n"
00406 << "double calculate_generalization_evaluation(void) const method.\n"
00407 << "Normalization coefficient is zero.\n";
00408
00409 throw std::logic_error(buffer.str().c_str());
00410 }
00411
00412 return(sum_squared_error/normalization_coefficient);
00413 }
00414
00415
00416
00417
00420
00421 Vector<double> NormalizedSquaredError::calculate_gradient(void) const
00422 {
00423
00424
00425 #ifdef _DEBUG
00426
00427 check();
00428
00429 #endif
00430
00431
00432
00433 const unsigned int parameters_number = neural_network_pointer->count_parameters_number();
00434
00435 const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
00436
00437 const unsigned int inputs_number = multilayer_perceptron_pointer->count_inputs_number();
00438 const unsigned int outputs_number = multilayer_perceptron_pointer->count_outputs_number();
00439
00440 const unsigned int layers_number = multilayer_perceptron_pointer->count_layers_number();
00441
00442 Vector< Vector< Vector<double> > > first_order_forward_propagation(2);
00443
00444 Vector< Vector<double> > layers_inputs(layers_number);
00445
00446 Vector< Matrix<double> > layers_combination_parameters_Jacobian;
00447
00448 const ConditionsLayer* conditions_layer_pointer = neural_network_pointer->get_conditions_layer_pointer();
00449
00450 const bool& conditions_layer_flag = neural_network_pointer->get_conditions_layer_flag();
00451
00452 Vector<double> particular_solution;
00453 Vector<double> homogeneous_solution;
00454
00455
00456
00457 const InstancesInformation& instances_information = data_set_pointer->get_instances_information();
00458
00459 const unsigned int training_instances_number = instances_information.count_training_instances_number();
00460
00461 const Vector<double> training_target_data_mean = data_set_pointer->calculate_training_target_data_mean();
00462
00463 Vector<double> inputs(inputs_number);
00464 Vector<double> targets(outputs_number);
00465
00466
00467
00468 Vector<double> output_objective_gradient(outputs_number);
00469
00470 Vector< Vector<double> > layers_delta;
00471
00472 Vector<double> gradient(parameters_number, 0.0);
00473
00474 double normalization_coefficient = 0.0;
00475
00476
00477
00478 for(unsigned int i = 0; i < training_instances_number; i++)
00479 {
00480
00481
00482 inputs = data_set_pointer->get_training_input_instance(i);
00483
00484 targets = data_set_pointer->get_training_target_instance(i);
00485
00486
00487
00488 first_order_forward_propagation = multilayer_perceptron_pointer->calculate_first_order_forward_propagation(inputs);
00489 const Vector< Vector<double> >& layers_activation = first_order_forward_propagation[0];
00490 const Vector< Vector<double> >& layers_activation_derivative = first_order_forward_propagation[1];
00491
00492 layers_inputs = multilayer_perceptron_pointer->arrange_layers_input(inputs, layers_activation);
00493
00494 layers_combination_parameters_Jacobian = multilayer_perceptron_pointer->calculate_layers_combination_parameters_Jacobian(layers_inputs);
00495
00496
00497
00498 if(!conditions_layer_flag)
00499 {
00500 output_objective_gradient = (layers_activation[layers_number-1]-targets)*2.0;
00501
00502 layers_delta = calculate_layers_delta(layers_activation_derivative, output_objective_gradient);
00503 }
00504 else
00505 {
00506 particular_solution = conditions_layer_pointer->calculate_particular_solution(inputs);
00507 homogeneous_solution = conditions_layer_pointer->calculate_homogeneous_solution(inputs);
00508
00509 output_objective_gradient = (particular_solution+homogeneous_solution*layers_activation[layers_number-1] - targets)*2.0;
00510
00511 layers_delta = calculate_layers_delta(layers_activation_derivative, homogeneous_solution, output_objective_gradient);
00512 }
00513
00514 normalization_coefficient += targets.calculate_sum_squared_error(training_target_data_mean);
00515
00516 gradient += calculate_point_gradient(layers_combination_parameters_Jacobian, layers_delta);
00517 }
00518
00519 return(gradient/normalization_coefficient);
00520 }
00521
00522
00523
00524
00528
00529 Matrix<double> NormalizedSquaredError::calculate_Hessian(void) const
00530 {
00531 Matrix<double> objective_Hessian;
00532
00533 return(objective_Hessian);
00534 }
00535
00536
00537
00538
00541
00542 Vector<double> NormalizedSquaredError::calculate_evaluation_terms(void) const
00543 {
00544
00545
00546 #ifdef _DEBUG
00547
00548 check();
00549
00550 #endif
00551
00552
00553
00554 const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
00555
00556 unsigned int inputs_number = multilayer_perceptron_pointer->count_inputs_number();
00557 unsigned int outputs_number = multilayer_perceptron_pointer->count_outputs_number();
00558
00559
00560
00561 const InstancesInformation& instances_information = data_set_pointer->get_instances_information();
00562
00563 const unsigned int training_instances_number = instances_information.count_training_instances_number();
00564
00565 const Vector<double> training_target_data_mean = data_set_pointer->calculate_training_target_data_mean();
00566
00567
00568
00569 Vector<double> evaluation_terms(training_instances_number);
00570
00571 Vector<double> inputs(inputs_number);
00572 Vector<double> outputs(outputs_number);
00573 Vector<double> targets(outputs_number);
00574
00575 double normalization_coefficient = 0.0;
00576
00577 for(unsigned int i = 0; i < training_instances_number; i++)
00578 {
00579
00580
00581 inputs = data_set_pointer->get_training_input_instance(i);
00582
00583
00584
00585 outputs = multilayer_perceptron_pointer->calculate_outputs(inputs);
00586
00587
00588
00589 targets = data_set_pointer->get_training_target_instance(i);
00590
00591
00592
00593 evaluation_terms[i] = outputs.calculate_distance(targets);
00594
00595
00596
00597 normalization_coefficient += targets.calculate_sum_squared_error(training_target_data_mean);
00598 }
00599
00600 if(normalization_coefficient < 1.0e-99)
00601 {
00602 std::ostringstream buffer;
00603
00604 buffer << "OpenNN Exception: NormalizedSquaredError class.\n"
00605 << "double calculate_evaluation_terms(void) const method.\n"
00606 << "Normalization coefficient is zero.\n";
00607
00608 throw std::logic_error(buffer.str().c_str());
00609 }
00610
00611 return(evaluation_terms/sqrt(normalization_coefficient));
00612 }
00613
00614
00615
00616
00620
00621 Vector<double> NormalizedSquaredError::calculate_evaluation_terms(const Vector<double>& network_parameters) const
00622 {
00623
00624
00625 #ifdef _DEBUG
00626
00627 check();
00628
00629 #endif
00630
00631
00632 #ifdef _DEBUG
00633
00634 const unsigned int size = network_parameters.size();
00635
00636 const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
00637
00638 const unsigned int network_parameters_number = multilayer_perceptron_pointer->count_parameters_number();
00639
00640 if(size != network_parameters_number)
00641 {
00642 std::ostringstream buffer;
00643
00644 buffer << "OpenNN Exception: NormalizedSquaredError class.\n"
00645 << "double calculate_evaluation_terms(const Vector<double>&) const method.\n"
00646 << "Size (" << size << ") must be equal to number of multilayer_perceptron_pointer parameters (" << network_parameters_number << ").\n";
00647
00648 throw std::logic_error(buffer.str().c_str());
00649 }
00650
00651 #endif
00652
00653 NeuralNetwork neural_network_copy(*neural_network_pointer);
00654
00655 neural_network_copy.set_parameters(network_parameters);
00656
00657 NormalizedSquaredError normalized_squared_error_copy(*this);
00658
00659 normalized_squared_error_copy.set_neural_network_pointer(&neural_network_copy);
00660
00661 return(normalized_squared_error_copy.calculate_evaluation_terms());
00662 }
00663
00664
00665
00666
00670
00671 Matrix<double> NormalizedSquaredError::calculate_Jacobian_terms(void) const
00672 {
00673
00674
00675 #ifdef _DEBUG
00676
00677 check();
00678
00679 #endif
00680
00681
00682
00683 const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
00684
00685 const unsigned int inputs_number = multilayer_perceptron_pointer->count_inputs_number();
00686 const unsigned int outputs_number = multilayer_perceptron_pointer->count_outputs_number();
00687 const unsigned int layers_number = multilayer_perceptron_pointer->count_layers_number();
00688
00689 const unsigned int parameters_number = multilayer_perceptron_pointer->count_parameters_number();
00690
00691 Vector< Vector< Vector<double> > > first_order_forward_propagation(2);
00692
00693 Vector< Matrix<double> > layers_combination_parameters_Jacobian;
00694
00695 Vector< Vector<double> > layers_inputs(layers_number);
00696
00697 Vector<double> particular_solution;
00698 Vector<double> homogeneous_solution;
00699
00700 const bool conditions_layer_flag = neural_network_pointer->get_conditions_layer_flag();
00701
00702 const ConditionsLayer* conditions_layer_pointer = neural_network_pointer->get_conditions_layer_pointer();
00703
00704
00705
00706
00707
00708 const InstancesInformation& instances_information = data_set_pointer->get_instances_information();
00709
00710 const unsigned int training_instances_number = instances_information.count_training_instances_number();
00711
00712 const Vector<double> training_target_data_mean = data_set_pointer->calculate_training_target_data_mean();
00713
00714 Vector<double> inputs(inputs_number);
00715 Vector<double> targets(outputs_number);
00716
00717
00718
00719 Vector<double> term(outputs_number);
00720 double term_norm;
00721
00722 Vector<double> output_objective_gradient(outputs_number);
00723
00724 Vector< Vector<double> > layers_delta(layers_number);
00725 Vector<double> point_gradient(parameters_number);
00726
00727 Matrix<double> Jacobian_terms(training_instances_number, parameters_number);
00728
00729 double normalization_coefficient = 0.0;
00730
00731
00732
00733 for(unsigned int i = 0; i < training_instances_number; i++)
00734 {
00735
00736
00737 inputs = data_set_pointer->get_training_input_instance(i);
00738
00739 targets = data_set_pointer->get_training_target_instance(i);
00740
00741
00742
00743 first_order_forward_propagation = multilayer_perceptron_pointer->calculate_first_order_forward_propagation(inputs);
00744
00745 const Vector< Vector<double> >& layers_activation = first_order_forward_propagation[0];
00746 const Vector< Vector<double> >& layers_activation_derivative = first_order_forward_propagation[1];
00747
00748 layers_inputs = multilayer_perceptron_pointer->arrange_layers_input(inputs, layers_activation);
00749
00750 layers_combination_parameters_Jacobian = multilayer_perceptron_pointer->calculate_layers_combination_parameters_Jacobian(layers_inputs);
00751
00752
00753
00754 if(!conditions_layer_flag)
00755 {
00756 const Vector<double>& outputs = layers_activation[layers_number-1];
00757
00758 term = outputs-targets;
00759 term_norm = term.calculate_norm();
00760
00761 if(term_norm == 0.0)
00762 {
00763 output_objective_gradient.initialize(0.0);
00764 }
00765 else
00766 {
00767 output_objective_gradient = term/term_norm;
00768 }
00769
00770 layers_delta = calculate_layers_delta(layers_activation_derivative, output_objective_gradient);
00771 }
00772 else
00773 {
00774
00775 particular_solution = conditions_layer_pointer->calculate_particular_solution(inputs);
00776 homogeneous_solution = conditions_layer_pointer->calculate_homogeneous_solution(inputs);
00777
00778 const Vector<double>& output_layer_activation = layers_activation[layers_number-1];
00779
00780 term = (particular_solution+homogeneous_solution*output_layer_activation - targets);
00781 term_norm = term.calculate_norm();
00782
00783 if(term_norm == 0.0)
00784 {
00785 output_objective_gradient.initialize(0.0);
00786 }
00787 else
00788 {
00789 output_objective_gradient = term/term_norm;
00790 }
00791
00792 layers_delta = calculate_layers_delta(layers_activation_derivative, homogeneous_solution, output_objective_gradient);
00793 }
00794
00795 normalization_coefficient += targets.calculate_sum_squared_error(training_target_data_mean);
00796
00797 point_gradient = calculate_point_gradient(layers_combination_parameters_Jacobian, layers_delta);
00798
00799 Jacobian_terms.set_row(i, point_gradient);
00800 }
00801
00802 return(Jacobian_terms/sqrt(normalization_coefficient));
00803 }
00804
00805
00806
00807
00810
00811 NormalizedSquaredError::FirstOrderEvaluationTerms NormalizedSquaredError::calculate_first_order_evaluation_terms(void)
00812 {
00813 FirstOrderEvaluationTerms first_order_evaluation_terms;
00814
00815 first_order_evaluation_terms.evaluation_terms = calculate_evaluation_terms();
00816
00817 first_order_evaluation_terms.Jacobian_terms = calculate_Jacobian_terms();
00818
00819 return(first_order_evaluation_terms);
00820 }
00821
00822
00823
00824
00826
00827 Vector<double> NormalizedSquaredError::calculate_squared_errors(void) const
00828 {
00829
00830
00831 #ifdef _DEBUG
00832
00833 check();
00834
00835 #endif
00836
00837
00838
00839 const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
00840
00841 const unsigned int inputs_number = multilayer_perceptron_pointer->count_inputs_number();
00842 const unsigned int outputs_number = multilayer_perceptron_pointer->count_outputs_number();
00843
00844
00845
00846 const InstancesInformation& instances_information = data_set_pointer->get_instances_information();
00847
00848 const unsigned int training_instances_number = instances_information.count_training_instances_number();
00849
00850
00851
00852 Vector<double> squared_errors(training_instances_number);
00853
00854 Vector<double> inputs(inputs_number);
00855 Vector<double> outputs(outputs_number);
00856 Vector<double> targets(outputs_number);
00857
00858 for(unsigned int i = 0; i < training_instances_number; i++)
00859 {
00860
00861
00862 inputs = data_set_pointer->get_training_input_instance(i);
00863
00864
00865
00866 outputs = multilayer_perceptron_pointer->calculate_outputs(inputs);
00867
00868
00869
00870 targets = data_set_pointer->get_training_target_instance(i);
00871
00872
00873
00874 squared_errors[i] = outputs.calculate_sum_squared_error(targets);
00875 }
00876
00877 return(squared_errors);
00878 }
00879
00880
00881
00882
00884
00885 std::string NormalizedSquaredError::write_performance_term_type(void) const
00886 {
00887 return("NORMALIZED_SQUARED_ERROR");
00888 }
00889
00890
00891
00892
00895
00896 TiXmlElement* NormalizedSquaredError::to_XML(void) const
00897 {
00898 std::ostringstream buffer;
00899
00900
00901
00902 TiXmlElement* normalized_squared_error_element = new TiXmlElement("NormalizedSquaredError");
00903 normalized_squared_error_element->SetAttribute("Version", 4);
00904
00905
00906
00907 {
00908 TiXmlElement* display_element = new TiXmlElement("Display");
00909 normalized_squared_error_element->LinkEndChild(display_element);
00910
00911 buffer.str("");
00912 buffer << display;
00913
00914 TiXmlText* display_text = new TiXmlText(buffer.str().c_str());
00915 display_element->LinkEndChild(display_text);
00916 }
00917
00918 return(normalized_squared_error_element);
00919 }
00920
00921
00922
00923
00926
00927 void NormalizedSquaredError::from_XML(TiXmlElement* normalized_squared_error_element)
00928 {
00929 if(normalized_squared_error_element)
00930 {
00931
00932 {
00933 TiXmlElement* display_element = normalized_squared_error_element->FirstChildElement("Display");
00934
00935 if(display_element)
00936 {
00937 std::string new_display_string = display_element->GetText();
00938
00939 try
00940 {
00941 set_display(new_display_string != "0");
00942 }
00943 catch(std::exception& e)
00944 {
00945 std::cout << e.what() << std::endl;
00946 }
00947 }
00948 }
00949 }
00950 }
00951
00952 }
00953
00954
00955
00956
00957
00958
00959
00960
00961
00962
00963
00964
00965
00966
00967
00968
00969