OpenNN 返回缩放的输出值

OpenNN returns scaled output values

本文关键字:输出 缩放 返回 OpenNN      更新时间:2023-10-16

我在OpenNN上遇到了一个问题,不幸的是,文档缺乏很多。

正在尝试使用一些随机生成的数据来创建神经网络,并且我从代码中获得的结果是缩放的(我假设这是问题所在),但我看不到关闭缩放而不会出错的选项,而且我无法弄清楚如何缩放输出。

以下是我创建 NN 的代码:

struct DataHold {
        std::vector<std::vector<double>> dataPoints;
        double learnValueGlucose;
        double learnValueHarn;
        double learnValueCholesterin;
        std::string debugString;
    };
void MachLearning::CreateNNModel(std::vector<MachLearning::DataHold>& input, std::string fileName, std::vector<int>& indices) {
    if(input.size() == 0) {
        throw;
    }
    int size = 0;
    for(int index = 0; index < indices.size(); ++index) {
        size += input.at(0).dataPoints.at(indices.at(index)).size();
    }
    OpenNN::DataSet data_set(input.size(), size, 1);
    OpenNN::Matrix<double> dataMatrix(input.size(), size + 1);
    OpenNN::Vector<double> dataVector;
    dataVector.resize(size + 1);
    for(unsigned int index = 0; index < input.size(); ++index) {
        unsigned int runIndex = 0;
        for(unsigned int runningIndex = 0; runningIndex < indices.size(); ++runningIndex) {
            for(unsigned int thisIndex = 0; runIndex < dataVector.size() - 1 && thisIndex < input.at(index).dataPoints.at(indices.at(runningIndex)).size(); ++runIndex, ++thisIndex) {
                dataVector.at(runIndex) = input.at(index).dataPoints.at(indices.at(runningIndex)).at(thisIndex);
            }
            dataVector.at(dataVector.size() - 1) = input.at(index).learnValueGlucose;
            dataMatrix.set_row(index, dataVector);
        }
    }
    data_set.set_data(dataMatrix);
    OpenNN::VariablesInformation* variables_information_pointer = data_set.get_variables_information_pointer();
    for(unsigned int index = 0; index < size; ++index) {
        variables_information_pointer->set_name(index, std::string(std::string("frequency").append(std::to_string(index))));
        variables_information_pointer->set_units(index, "hertzs");
        variables_information_pointer->set_description(index, "No Text");
    }
    const OpenNN::Vector<OpenNN::Vector<std::string> > inputs_targets_information = variables_information_pointer->arrange_inputs_targets_information();
    const OpenNN::Vector< OpenNN::Vector<double> > inputs_targets_statistics = data_set.scale_inputs_targets();
    OpenNN::InstancesInformation* instances_information_pointer = data_set.get_instances_information_pointer();
    instances_information_pointer->split_random_indices();
    const unsigned int inputs_number = variables_information_pointer->count_inputs_number();
    const unsigned int hidden_perceptrons_number = 9;
    const unsigned int outputs_number = variables_information_pointer->count_targets_number();
    delete neural_network;
    neural_network = new OpenNN::NeuralNetwork(inputs_number, hidden_perceptrons_number, outputs_number);
    neural_network->set_inputs_outputs_information(inputs_targets_information);
    neural_network->set_inputs_outputs_statistics(inputs_targets_statistics);
    neural_network->set_scaling_unscaling_layers_flag(true);
    OpenNN::PerformanceFunctional performance_functional(neural_network, &data_set);
    OpenNN::TrainingStrategy training_strategy(&performance_functional);
    OpenNN::QuasiNewtonMethod* quasi_Newton_method_pointer = new OpenNN::QuasiNewtonMethod( &performance_functional );
    quasi_Newton_method_pointer->set_minimum_performance_increase( 1.0e-6 );
    quasi_Newton_method_pointer->set_reserve_evaluation_history( true );
    training_strategy.set_main_training_algorithm_pointer(quasi_Newton_method_pointer);
    OpenNN::TrainingStrategy::Results training_strategy_results = training_strategy.perform_training();
    neural_network->set_inputs_scaling_outputs_unscaling_methods("MinimumMaximum");
    neural_network->set_scaling_unscaling_layers_flag(true);
    OpenNN::TestingAnalysis testing_analysis(neural_network, &data_set);
    OpenNN::FunctionRegressionTesting* function_regression_testing_pointer = testing_analysis.get_function_regression_testing_pointer();
    OpenNN::FunctionRegressionTesting::LinearRegressionAnalysisResults linear_regression_analysis_results = function_regression_testing_pointer->perform_linear_regression_analysis();
    neural_network->save("neural_network.xml");
    neural_network->save_expression("expression.txt");
    performance_functional.save("performance_functional.xml");
    training_strategy.save("training_strategy.xml");
    training_strategy_results.save("training_strategy_results.dat");
    linear_regression_analysis_results.save("linear_regression_analysis_results.dat");
    return;
}

计算输出的代码:

double MachLearning::GetNNValue(std::vector<std::vector<double>>& input, std::vector<int>& indices) {
    OpenNN::Vector<double> dataVector;
    int size = 0;
    for(int index = 0; index < indices.size(); ++index) {
        size += input.at(indices.at(index)).size();
    }
    dataVector.resize(size);
    int runIndex = 0;
    for(unsigned int index = 0; index < indices.size(); ++index) {
        for(unsigned int smallIndex = 0; smallIndex < input.at(indices.at(index)).size(); ++smallIndex, ++runIndex) {
            dataVector.at(runIndex) = input.at(indices.at(index)).at(smallIndex);
        }
    }
    std::vector<double> test;
    test = neural_network->calculate_outputs(dataVector);
    return test.at(0);
}

我已经用一些随机生成的向量对此进行了测试,它应该是什么,与 NN 计算的内容之间的相关性为 0.999(..),但它缩放到类似 -1 到 1 左右(我已经有 1.2 作为结果,猜猜输入只是更大的随机)。

有人知道OpenNN接口是如何工作的吗?我尝试了一下scaling_unscaling标志,但根本没有影响。

您似乎使用的是旧版本的 OpenNN。您可以从SourceForge下载最新版本:https://sourceforge.net/projects/opennn

关于缩放/取消缩放的内容,您可以通过执行以下操作来禁用它:

scaling_layer.set_scaling_method(NoScaling);
unscaling_layer.set_unscaling_method(NoUnscaling);