00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018 #include <iostream>
00019 #include <fstream>
00020 #include <cmath>
00021 #include <sstream>
00022 #include <string>
00023 #include <limits>
00024
00025
00026
00027 #include "sum_squared_error.h"
00028
00029
00030
00031 #include "../../parsers/tinyxml/tinyxml.h"
00032
00033
00034 namespace OpenNN
00035 {
00036
00037
00038
00042
00043 SumSquaredError::SumSquaredError(void) : PerformanceTerm()
00044 {
00045 }
00046
00047
00048
00049
00054
00055 SumSquaredError::SumSquaredError(NeuralNetwork* new_neural_network_pointer)
00056 : PerformanceTerm(new_neural_network_pointer)
00057 {
00058 }
00059
00060
00061
00062
00067
00068 SumSquaredError::SumSquaredError(DataSet* new_data_set_pointer)
00069 : PerformanceTerm(new_data_set_pointer)
00070 {
00071 }
00072
00073
00074
00075
00081
00082 SumSquaredError::SumSquaredError(NeuralNetwork* new_neural_network_pointer, DataSet* new_data_set_pointer)
00083 : PerformanceTerm(new_neural_network_pointer, new_data_set_pointer)
00084 {
00085 }
00086
00087
00088
00089
00094
00095 SumSquaredError::SumSquaredError(TiXmlElement* sum_squared_error_element)
00096 : PerformanceTerm(sum_squared_error_element)
00097 {
00098 }
00099
00100
00101
00102
00103
00108
00109 SumSquaredError::SumSquaredError(const SumSquaredError& new_sum_squared_error)
00110 : PerformanceTerm(new_sum_squared_error)
00111 {
00112
00113 }
00114
00115
00116
00117
00119
00120 SumSquaredError::~SumSquaredError(void)
00121 {
00122 }
00123
00124
00125
00126
00127
00128
00132
00133 void SumSquaredError::check(void) const
00134 {
00135 std::ostringstream buffer;
00136
00137
00138
00139 if(!neural_network_pointer)
00140 {
00141 buffer << "OpenNN Exception: SumSquaredError class.\n"
00142 << "void check(void) const method.\n"
00143 << "Pointer to neural network is NULL.\n";
00144
00145 throw std::logic_error(buffer.str().c_str());
00146 }
00147
00148 const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
00149
00150 if(!multilayer_perceptron_pointer)
00151 {
00152 buffer << "OpenNN Exception: SumSquaredError class.\n"
00153 << "void check(void) const method.\n"
00154 << "Pointer to multilayer perceptron is NULL.\n";
00155
00156 throw std::logic_error(buffer.str().c_str());
00157 }
00158
00159 const unsigned int inputs_number = multilayer_perceptron_pointer->count_inputs_number();
00160 const unsigned int outputs_number = multilayer_perceptron_pointer->count_outputs_number();
00161
00162
00163
00164 if(!data_set_pointer)
00165 {
00166 buffer << "OpenNN Exception: SumSquaredError class.\n"
00167 << "void check(void) const method.\n"
00168 << "Pointer to data set is NULL.\n";
00169
00170 throw std::logic_error(buffer.str().c_str());
00171 }
00172
00173
00174
00175 const VariablesInformation& variables_information = data_set_pointer->get_variables_information();
00176
00177 const unsigned int targets_number = variables_information.count_targets_number();
00178
00179 if(inputs_number != inputs_number)
00180 {
00181 buffer << "OpenNN Exception: SumSquaredError class.\n"
00182 << "void check(void) const method.\n"
00183 << "Number of inputs in multilayer perceptron must be equal to number of inputs in data set.\n";
00184
00185 throw std::logic_error(buffer.str().c_str());
00186 }
00187
00188 if(outputs_number != targets_number)
00189 {
00190 buffer << "OpenNN Exception: SumSquaredError class.\n"
00191 << "void check(void) const method.\n"
00192 << "Number of outputs in multilayer perceptron must be equal to number of targets in data set.\n";
00193
00194 throw std::logic_error(buffer.str().c_str());
00195 }
00196 }
00197
00198
00199
00200
00202
00203 double SumSquaredError::calculate_evaluation(void) const
00204 {
00205 #ifdef _DEBUG
00206
00207 check();
00208
00209 #endif
00210
00211
00212
00213 const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
00214
00215 const unsigned int inputs_number = multilayer_perceptron_pointer->count_inputs_number();
00216 const unsigned int outputs_number = multilayer_perceptron_pointer->count_outputs_number();
00217
00218
00219
00220 const InstancesInformation& instances_information = data_set_pointer->get_instances_information();
00221
00222 const unsigned int training_instances_number = instances_information.count_training_instances_number();
00223
00224
00225
00226 Vector<double> inputs(inputs_number);
00227 Vector<double> outputs(outputs_number);
00228 Vector<double> targets(outputs_number);
00229
00230 double sum_squared_error = 0.0;
00231
00232 for(unsigned int i = 0; i < training_instances_number; i++)
00233 {
00234
00235
00236 inputs = data_set_pointer->get_training_input_instance(i);
00237
00238
00239
00240 outputs = multilayer_perceptron_pointer->calculate_outputs(inputs);
00241
00242
00243
00244 targets = data_set_pointer->get_training_target_instance(i);
00245
00246
00247
00248 sum_squared_error += outputs.calculate_sum_squared_error(targets);
00249 }
00250
00251 return(sum_squared_error);
00252 }
00253
00254
00255
00256
00261
00262 double SumSquaredError::calculate_evaluation(const Vector<double>& parameters) const
00263 {
00264
00265
00266 #ifdef _DEBUG
00267
00268 check();
00269
00270 #endif
00271
00272
00273
00274 #ifdef _DEBUG
00275
00276 std::ostringstream buffer;
00277
00278 const unsigned int size = parameters.size();
00279
00280 const unsigned int parameters_number = neural_network_pointer->count_parameters_number();
00281
00282 if(size != parameters_number)
00283 {
00284 buffer << "OpenNN Exception: SumSquaredError class." << std::endl
00285 << "double calculate_evaluation(const Vector<double>&) const method." << std::endl
00286 << "Size (" << size << ") must be equal to number of parameters (" << parameters_number << ")." << std::endl;
00287
00288 throw std::logic_error(buffer.str().c_str());
00289 }
00290
00291 #endif
00292
00293 NeuralNetwork neural_network_copy(*neural_network_pointer);
00294
00295 neural_network_copy.set_parameters(parameters);
00296
00297 SumSquaredError sum_squared_error_copy(*this);
00298
00299 sum_squared_error_copy.set_neural_network_pointer(&neural_network_copy);
00300
00301 return(sum_squared_error_copy.calculate_evaluation());
00302 }
00303
00304
00305
00306
00308
00309 double SumSquaredError::calculate_generalization_evaluation(void) const
00310 {
00311 #ifdef _DEBUG
00312
00313 check();
00314
00315 #endif
00316
00317
00318
00319 const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
00320
00321 const unsigned int inputs_number = multilayer_perceptron_pointer->count_inputs_number();
00322 const unsigned int outputs_number = multilayer_perceptron_pointer->count_outputs_number();
00323
00324
00325
00326 const InstancesInformation& instances_information = data_set_pointer->get_instances_information();
00327 const unsigned int generalization_instances_number = instances_information.count_generalization_instances_number();
00328
00329
00330
00331 Vector<double> inputs(inputs_number);
00332 Vector<double> outputs(outputs_number);
00333 Vector<double> targets(outputs_number);
00334
00335 double generalization_objective = 0.0;
00336
00337 for(unsigned int i = 0; i < generalization_instances_number; i++)
00338 {
00339
00340
00341 inputs = data_set_pointer->get_generalization_input_instance(i);
00342
00343
00344
00345 outputs = multilayer_perceptron_pointer->calculate_outputs(inputs);
00346
00347
00348
00349 targets = data_set_pointer->get_generalization_target_instance(i);
00350
00351
00352
00353 generalization_objective += outputs.calculate_sum_squared_error(targets);
00354 }
00355
00356 return(generalization_objective);
00357 }
00358
00359
00360
00361
00364
00365 Vector<double> SumSquaredError::calculate_gradient(void) const
00366 {
00367 #ifdef _DEBUG
00368
00369 check();
00370
00371 #endif
00372
00373
00374
00375 const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
00376
00377
00378
00379 const ConditionsLayer* conditions_layer_pointer = neural_network_pointer->get_conditions_layer_pointer();
00380
00381 const unsigned int inputs_number = multilayer_perceptron_pointer->count_inputs_number();
00382 const unsigned int outputs_number = multilayer_perceptron_pointer->count_outputs_number();
00383
00384 const unsigned int layers_number = neural_network_pointer->get_multilayer_perceptron_pointer()->count_layers_number();
00385
00386 const unsigned int network_parameters_number = multilayer_perceptron_pointer->count_parameters_number();
00387
00388 Vector< Vector< Vector<double> > > first_order_forward_propagation(2);
00389
00390 const bool& conditions_layer_flag = neural_network_pointer->get_conditions_layer_flag();
00391
00392 Vector<double> particular_solution;
00393 Vector<double> homogeneous_solution;
00394
00395
00396
00397 const InstancesInformation& instances_information = data_set_pointer->get_instances_information();
00398
00399 unsigned int training_instances_number = instances_information.count_training_instances_number();
00400
00401 Vector<double> inputs(inputs_number);
00402 Vector<double> targets(outputs_number);
00403
00404
00405
00406 Vector<double> output_objective_gradient(outputs_number);
00407
00408 Vector< Matrix<double> > layers_combination_parameters_Jacobian;
00409
00410 Vector< Vector<double> > layers_inputs(layers_number);
00411 Vector< Vector<double> > layers_delta;
00412
00413 Vector<double> point_gradient(network_parameters_number, 0.0);
00414
00415 Vector<double> objective_gradient(network_parameters_number, 0.0);
00416
00417 for(unsigned int i = 0; i < training_instances_number; i++)
00418 {
00419 inputs = data_set_pointer->get_training_input_instance(i);
00420
00421 targets = data_set_pointer->get_training_target_instance(i);
00422
00423 first_order_forward_propagation = neural_network_pointer->get_multilayer_perceptron_pointer()->calculate_first_order_forward_propagation(inputs);
00424
00425 const Vector< Vector<double> >& layers_activation = first_order_forward_propagation[0];
00426 const Vector< Vector<double> >& layers_activation_derivative = first_order_forward_propagation[1];
00427
00428 layers_inputs[0] = inputs;
00429
00430 for(unsigned int j = 1; j < layers_number; j++)
00431 {
00432 layers_inputs[j] = layers_activation[j-1];
00433 }
00434
00435 layers_combination_parameters_Jacobian = neural_network_pointer->get_multilayer_perceptron_pointer()->calculate_layers_combination_parameters_Jacobian(layers_inputs);
00436
00437 if(!conditions_layer_flag)
00438 {
00439 output_objective_gradient = (layers_activation[layers_number-1]-targets)*2.0;
00440
00441 layers_delta = calculate_layers_delta(layers_activation_derivative, output_objective_gradient);
00442 }
00443 else
00444 {
00445 particular_solution = conditions_layer_pointer->calculate_particular_solution(inputs);
00446 homogeneous_solution = conditions_layer_pointer->calculate_homogeneous_solution(inputs);
00447
00448 output_objective_gradient = (particular_solution+homogeneous_solution*layers_activation[layers_number-1] - targets)*2.0;
00449
00450 layers_delta = calculate_layers_delta(layers_activation_derivative, homogeneous_solution, output_objective_gradient);
00451 }
00452
00453 point_gradient = calculate_point_gradient(layers_combination_parameters_Jacobian, layers_delta);
00454
00455 objective_gradient += point_gradient;
00456 }
00457
00458
00459 return(objective_gradient);
00460 }
00461
00462
00463
00464
00467
00468 Matrix<double> SumSquaredError::calculate_Hessian(void) const
00469 {
00470 #ifdef _DEBUG
00471
00472 check();
00473
00474 #endif
00475
00476
00477
00478 const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
00479
00480 const ConditionsLayer* conditions_layer_pointer = neural_network_pointer->get_conditions_layer_pointer();
00481
00482 const unsigned int inputs_number = multilayer_perceptron_pointer->count_inputs_number();
00483 const unsigned int outputs_number = multilayer_perceptron_pointer->count_outputs_number();
00484
00485 const unsigned int layers_number = multilayer_perceptron_pointer->count_layers_number();
00486
00487 const unsigned int parameters_number = multilayer_perceptron_pointer->count_parameters_number();
00488
00489 const Vector<unsigned int> layers_perceptrons_number = multilayer_perceptron_pointer->arrange_layers_perceptrons_numbers();
00490
00491 const unsigned int conditions_layer_flag = neural_network_pointer->get_conditions_layer_flag();
00492
00493 Vector< Vector< Vector<double> > > second_order_forward_propagation(3);
00494
00495 Vector < Vector< Vector<double> > > perceptrons_combination_parameters_gradient(layers_number);
00496 Matrix < Matrix<double> > interlayers_combination_combination_Jacobian;
00497
00498 Vector<double> particular_solution;
00499 Vector<double> homogeneous_solution;
00500
00501
00502
00503 const InstancesInformation& instances_information = data_set_pointer->get_instances_information();
00504
00505 const unsigned int training_instances_number = instances_information.count_training_instances_number();
00506
00507 Vector<double> inputs(inputs_number);
00508 Vector<double> targets(outputs_number);
00509
00510
00511
00512 Vector< Vector<double> > layers_delta(layers_number);
00513 Matrix< Matrix<double> > interlayers_Delta(layers_number, layers_number);
00514
00515 Vector<double> output_objective_gradient(outputs_number);
00516 Matrix<double> output_objective_Hessian(outputs_number, outputs_number);
00517
00518 Matrix<double> objective_Hessian(parameters_number, parameters_number, 0.0);
00519
00520 for(unsigned int i = 0; i < training_instances_number; i++)
00521 {
00522 inputs = data_set_pointer->get_training_input_instance(i);
00523
00524 targets = data_set_pointer->get_training_target_instance(i);
00525
00526 second_order_forward_propagation = multilayer_perceptron_pointer->calculate_second_order_forward_propagation(inputs);
00527
00528 Vector< Vector<double> >& layers_activation = second_order_forward_propagation[0];
00529 Vector< Vector<double> >& layers_activation_derivative = second_order_forward_propagation[1];
00530 Vector< Vector<double> >& layers_activation_second_derivative = second_order_forward_propagation[2];
00531
00532 Vector< Vector<double> > layers_inputs(layers_number);
00533
00534 layers_inputs[0] = inputs;
00535
00536 for(unsigned int j = 1; j < layers_number; j++)
00537 {
00538 layers_inputs[j] = layers_activation[j-1];
00539 }
00540
00541 perceptrons_combination_parameters_gradient = multilayer_perceptron_pointer->calculate_perceptrons_combination_parameters_gradient(layers_inputs);
00542
00543 interlayers_combination_combination_Jacobian = multilayer_perceptron_pointer->calculate_interlayers_combination_combination_Jacobian(inputs);
00544
00545 if(!conditions_layer_flag)
00546 {
00547 output_objective_gradient = (layers_activation[layers_number-1] - targets)*2.0;
00548 output_objective_Hessian.initialize_diagonal(2.0);
00549
00550 layers_delta = calculate_layers_delta(layers_activation_derivative, output_objective_gradient);
00551 interlayers_Delta = calculate_interlayers_Delta(layers_activation_derivative, layers_activation_second_derivative, interlayers_combination_combination_Jacobian, output_objective_gradient, output_objective_Hessian, layers_delta);
00552 }
00553 else
00554 {
00555 particular_solution = conditions_layer_pointer->calculate_particular_solution(inputs);
00556 homogeneous_solution = conditions_layer_pointer->calculate_homogeneous_solution(inputs);
00557
00558 output_objective_gradient = (particular_solution+homogeneous_solution*layers_activation[layers_number-1] - targets)*2.0;
00559
00560 layers_delta = calculate_layers_delta(layers_activation_derivative, homogeneous_solution, output_objective_gradient);
00561 }
00562
00563 objective_Hessian += calculate_point_Hessian(layers_activation_derivative, perceptrons_combination_parameters_gradient, interlayers_combination_combination_Jacobian, layers_delta, interlayers_Delta);
00564 }
00565
00566 return(objective_Hessian);
00567 }
00568
00569
00570
00571
00573
00574 Vector<double> SumSquaredError::calculate_evaluation_terms(void) const
00575 {
00576
00577
00578 #ifdef _DEBUG
00579
00580 check();
00581
00582 #endif
00583
00584 const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
00585
00586 const unsigned int inputs_number = multilayer_perceptron_pointer->count_inputs_number();
00587 const unsigned int outputs_number = multilayer_perceptron_pointer->count_outputs_number();
00588
00589 const InstancesInformation& instances_information = data_set_pointer->get_instances_information();
00590
00591 const unsigned int training_instances_number = instances_information.count_training_instances_number();
00592
00593 Vector<double> evaluation_terms(training_instances_number);
00594
00595 Vector<double> inputs(inputs_number);
00596 Vector<double> outputs(outputs_number);
00597 Vector<double> targets(outputs_number);
00598
00599 for(unsigned int i = 0; i < training_instances_number; i++)
00600 {
00601
00602
00603 inputs = data_set_pointer->get_training_input_instance(i);
00604
00605
00606
00607 outputs = multilayer_perceptron_pointer->calculate_outputs(inputs);
00608
00609
00610
00611 targets = data_set_pointer->get_training_target_instance(i);
00612
00613
00614
00615 evaluation_terms[i] = outputs.calculate_distance(targets);
00616 }
00617
00618 return(evaluation_terms);
00619 }
00620
00621
00622
00623
00626
00627 Vector<double> SumSquaredError::calculate_evaluation_terms(const Vector<double>& parameters) const
00628 {
00629
00630
00631 #ifdef _DEBUG
00632
00633 check();
00634
00635 #endif
00636
00637
00638 #ifdef _DEBUG
00639
00640 const unsigned int size = parameters.size();
00641
00642 const unsigned int parameters_number = neural_network_pointer->count_parameters_number();
00643
00644 if(size != parameters_number)
00645 {
00646 std::ostringstream buffer;
00647
00648 buffer << "OpenNN Exception: SumSquaredError class." << std::endl
00649 << "double calculate_evaluation_terms(const Vector<double>&) const method." << std::endl
00650 << "Size (" << size << ") must be equal to number of neural network parameters (" << parameters_number << ")." << std::endl;
00651
00652 throw std::logic_error(buffer.str().c_str());
00653 }
00654
00655 #endif
00656
00657 NeuralNetwork neural_network_copy(*neural_network_pointer);
00658
00659 neural_network_copy.set_parameters(parameters);
00660
00661 SumSquaredError sum_squared_error_copy(*this);
00662
00663 sum_squared_error_copy.set_neural_network_pointer(&neural_network_copy);
00664
00665 return(sum_squared_error_copy.calculate_evaluation_terms());
00666 }
00667
00668
00669
00670
00674
00675 Matrix<double> SumSquaredError::calculate_Jacobian_terms(void) const
00676 {
00677 #ifdef _DEBUG
00678
00679 check();
00680
00681 #endif
00682
00683 const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
00684
00685 const unsigned int inputs_number = multilayer_perceptron_pointer->count_inputs_number();
00686 const unsigned int outputs_number = multilayer_perceptron_pointer->count_outputs_number();
00687 const unsigned int layers_number = multilayer_perceptron_pointer->count_layers_number();
00688
00689 const unsigned int network_parameters_number = multilayer_perceptron_pointer->count_parameters_number();
00690
00691 Vector< Vector< Vector<double> > > first_order_forward_propagation(2);
00692
00693 Vector< Vector<double> > layers_inputs(layers_number);
00694 Vector< Matrix<double> > layers_combination_parameters_Jacobian(layers_number);
00695
00696 Vector<double> particular_solution;
00697 Vector<double> homogeneous_solution;
00698
00699 const bool conditions_layer_flag = neural_network_pointer->get_conditions_layer_flag();
00700
00701
00702
00703 const InstancesInformation& instances_information = data_set_pointer->get_instances_information();
00704
00705 const unsigned int training_instances_number = instances_information.count_training_instances_number();
00706
00707 Vector<double> inputs(inputs_number);
00708 Vector<double> targets(outputs_number);
00709
00710
00711
00712 Vector<double> term(outputs_number);
00713 double term_norm;
00714
00715 Vector<double> output_objective_gradient(outputs_number);
00716
00717 Vector< Vector<double> > layers_delta(layers_number);
00718 Vector<double> point_gradient(network_parameters_number);
00719
00720 Matrix<double> Jacobian_terms(training_instances_number, network_parameters_number);
00721
00722
00723
00724 for(unsigned int i = 0; i < training_instances_number; i++)
00725 {
00726 inputs = data_set_pointer->get_training_input_instance(i);
00727
00728 targets = data_set_pointer->get_training_target_instance(i);
00729
00730 first_order_forward_propagation = multilayer_perceptron_pointer->calculate_first_order_forward_propagation(inputs);
00731
00732 const Vector< Vector<double> >& layers_activation = first_order_forward_propagation[0];
00733 const Vector< Vector<double> >& layers_activation_derivative = first_order_forward_propagation[1];
00734
00735 layers_inputs[0] = inputs;
00736
00737 for(unsigned int j = 1; j < layers_number; j++)
00738 {
00739 layers_inputs[j] = layers_activation[j-1];
00740 }
00741
00742 layers_combination_parameters_Jacobian = multilayer_perceptron_pointer->calculate_layers_combination_parameters_Jacobian(layers_inputs);
00743
00744 if(!conditions_layer_flag)
00745 {
00746 const Vector<double>& outputs = first_order_forward_propagation[0][layers_number-1];
00747
00748 term = outputs-targets;
00749 term_norm = term.calculate_norm();
00750
00751 if(term_norm == 0.0)
00752 {
00753 output_objective_gradient.initialize(0.0);
00754 }
00755 else
00756 {
00757 output_objective_gradient = term/term_norm;
00758 }
00759
00760 layers_delta = calculate_layers_delta(layers_activation_derivative, output_objective_gradient);
00761 }
00762 else
00763 {
00764 ConditionsLayer* conditions_layer_pointer = neural_network_pointer->get_conditions_layer_pointer();
00765
00766 particular_solution = conditions_layer_pointer->calculate_particular_solution(inputs);
00767 homogeneous_solution = conditions_layer_pointer->calculate_homogeneous_solution(inputs);
00768
00769 const Vector<double>& output_layer_activation = first_order_forward_propagation[0][layers_number-1];
00770
00771 term = (particular_solution+homogeneous_solution*output_layer_activation - targets);
00772 term_norm = term.calculate_norm();
00773
00774 if(term_norm == 0.0)
00775 {
00776 output_objective_gradient.initialize(0.0);
00777 }
00778 else
00779 {
00780 output_objective_gradient = term/term_norm;
00781 }
00782
00783 layers_delta = calculate_layers_delta(layers_activation_derivative, homogeneous_solution, output_objective_gradient);
00784 }
00785
00786 point_gradient = calculate_point_gradient(layers_combination_parameters_Jacobian, layers_delta);
00787
00788 Jacobian_terms.set_row(i, point_gradient);
00789 }
00790
00791 return(Jacobian_terms);
00792 }
00793
00794
00795
00796
00799
00800 PerformanceTerm::FirstOrderEvaluationTerms SumSquaredError::calculate_first_order_evaluation_terms(void) const
00801 {
00802 FirstOrderEvaluationTerms first_order_evaluation_terms;
00803
00804 first_order_evaluation_terms.evaluation_terms = calculate_evaluation_terms();
00805 first_order_evaluation_terms.Jacobian_terms = calculate_Jacobian_terms();
00806
00807 return(first_order_evaluation_terms);
00808 }
00809
00810
00811
00812
00814
00815 Vector<double> SumSquaredError::calculate_squared_errors(void) const
00816 {
00817
00818
00819 #ifdef _DEBUG
00820
00821 check();
00822
00823 #endif
00824
00825
00826
00827 const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
00828
00829 const unsigned int inputs_number = multilayer_perceptron_pointer->count_inputs_number();
00830 const unsigned int outputs_number = multilayer_perceptron_pointer->count_outputs_number();
00831
00832
00833
00834 const InstancesInformation& instances_information = data_set_pointer->get_instances_information();
00835
00836 const unsigned int training_instances_number = instances_information.count_training_instances_number();
00837
00838 Vector<double> squared_errors(training_instances_number);
00839
00840 Vector<double> inputs(inputs_number);
00841 Vector<double> outputs(outputs_number);
00842 Vector<double> targets(outputs_number);
00843
00844 for(unsigned int i = 0; i < training_instances_number; i++)
00845 {
00846
00847
00848 inputs = data_set_pointer->get_training_input_instance(i);
00849
00850
00851
00852 outputs = multilayer_perceptron_pointer->calculate_outputs(inputs);
00853
00854
00855
00856 targets = data_set_pointer->get_training_target_instance(i);
00857
00858
00859
00860 squared_errors[i] = outputs.calculate_sum_squared_error(targets);
00861 }
00862
00863 return(squared_errors);
00864 }
00865
00866
00867
00868
00870
00871 std::string SumSquaredError::write_performance_term_type(void) const
00872 {
00873 return("SUM_SQUARED_ERROR");
00874 }
00875
00876
00877
00878
00880
00881 TiXmlElement* SumSquaredError::to_XML(void) const
00882 {
00883 std::ostringstream buffer;
00884
00885
00886
00887 TiXmlElement* sum_squared_error_element = new TiXmlElement("SumSquaredError");
00888 sum_squared_error_element->SetAttribute("Version", 4);
00889
00890
00891
00892 {
00893 TiXmlElement* display_element = new TiXmlElement("Display");
00894 sum_squared_error_element->LinkEndChild(display_element);
00895
00896 buffer.str("");
00897 buffer << display;
00898
00899 TiXmlText* display_text = new TiXmlText(buffer.str().c_str());
00900 display_element->LinkEndChild(display_text);
00901 }
00902
00903 return(sum_squared_error_element);
00904 }
00905
00906
00907
00908
00910
00911
00912 void SumSquaredError::from_XML(TiXmlElement* sum_squared_error_element)
00913 {
00914 if(sum_squared_error_element)
00915 {
00916
00917 {
00918 TiXmlElement* display_element = sum_squared_error_element->FirstChildElement("Display");
00919
00920 if(display_element)
00921 {
00922 std::string new_display_string = display_element->GetText();
00923
00924 try
00925 {
00926 set_display(new_display_string != "0");
00927 }
00928 catch(std::exception& e)
00929 {
00930 std::cout << e.what() << std::endl;
00931 }
00932 }
00933 }
00934 }
00935 }
00936
00937 }
00938
00939
00940
00941
00942
00943
00944
00945
00946
00947
00948
00949
00950
00951
00952
00953