将 Matlab 神经网络转换为C++神经网络

converting Matlab Neural Network into C++ Neural Network

本文关键字:神经网络 C++ 转换 Matlab      更新时间:2023-10-16

我在 Matlab 中使用 newff 创建了一个神经网络,用于手写数字识别。

我只是训练它只识别图像中的 0 和 1 值。

有 3 层,输入层有 9 个神经元,隐藏层有

5 个神经元,输出层 1 神经元,有 9 个输入。

我的输出是0.1和0.2,所有层输出功能都是"tansig"。

我在 Matlab 中对其进行了测试,网络工作正常。 现在我想用 c++ 创建这个网络,我写了代码,我复制了所有的权重和偏差(总共 146 个权重)。但是当我将相同的输入数据放入网络时,输出值不正确。

谁能指导我?

这是我的网络代码:

here's my networks code...
public class Neuron
{
    public Neuron()
    { }
    public Neuron(int SumOfInputs)
    {
        m_SumOfInputs = SumOfInputs;
    }
    public double act(double[] Input, double[] weight, double bias)
    {
        double tmp = bias;
        for (int i = 0; i < m_SumOfInputs; i++)
            tmp += (Input[i] * weight[i]);
        m_output = 1.0 / (1.0 + Math.Exp(-tmp));
        return m_output;
    }
    public double m_output;
    private int m_SumOfInputs;
};
public class Net
{
    public Net()
    {
        int i;
        //net1 , net2
        //initializing inputLayer Neurons
        for (i = 0; i < 9; i++)
            InputLayer[i] = new Neuron(9);
        //initializing HiddenLayer Neurons
        for (i = 0; i < 5; i++)
            HiddenLayer[i] = new Neuron(9);
        //initializing OutputLayer
        OutputLayer = new Neuron(5);
    }
    public double Calculate(double[] inputs)
    {
        double[] ILay_Outputs = new double[9];
        double[] HLay_Outputs = new double[5];
        //inputLayer acting
        ILay_Outputs[0] = InputLayer[0].act(inputs, IW1, Ib[0]);
        ILay_Outputs[1] = InputLayer[1].act(inputs, IW2, Ib[1]);
        ILay_Outputs[2] = InputLayer[2].act(inputs, IW3, Ib[2]);
        ILay_Outputs[3] = InputLayer[3].act(inputs, IW4, Ib[3]);
        ILay_Outputs[4] = InputLayer[4].act(inputs, IW5, Ib[4]);
        ILay_Outputs[5] = InputLayer[5].act(inputs, IW6, Ib[5]);
        ILay_Outputs[6] = InputLayer[6].act(inputs, IW7, Ib[6]);
        ILay_Outputs[7] = InputLayer[7].act(inputs, IW8, Ib[7]);
        ILay_Outputs[8] = InputLayer[8].act(inputs, IW9, Ib[8]);
        //HiddenLayer acting
        HLay_Outputs[0] = HiddenLayer[0].act(ILay_Outputs, HW1, Hb[0]);
        HLay_Outputs[1] = HiddenLayer[1].act(ILay_Outputs, HW2, Hb[1]);
        HLay_Outputs[2] = HiddenLayer[2].act(ILay_Outputs, HW3, Hb[2]);
        HLay_Outputs[3] = HiddenLayer[3].act(ILay_Outputs, HW4, Hb[3]);
        HLay_Outputs[4] = HiddenLayer[4].act(ILay_Outputs, HW5, Hb[4]);
        //OutputLayer acting
        OutputLayer.act(HLay_Outputs, OW, Ob);
        return OutputLayer.m_output;
    }
    //variables
    Neuron[] InputLayer = new Neuron[9];
    Neuron[] HiddenLayer = new Neuron[5];
    Neuron OutputLayer;
    //net2 tansig tansig tansig
    double[] IW1 = { 0.726312035124743, 1.01034015912570, 0.507178716484559, -0.254689455765290, 0.475299816659036, 0.0336358919735363, -0.715890843015230, 0.466632424349648, 0.565406467159982 };
    double[] IW2 = { 0.866482591050076, -0.672473224929341, 0.915599891389326, 0.310163265280920, -0.373812653648686, -0.0859927887021936, 0.0100063635393257, 0.816638798257382, -0.540771172965867 };
    double[] IW3 = { 0.138868216294952, 1.93121321568871, -0.564704445249800, 0.834275586326333, 3.08348295981989, 0.899715248285303, -0.661916798988641, 6.00562393127300, 6.11939776912678 };
    double[] IW4 = { 0.578089791487308, 0.885170493965113, -0.992514702569606, 0.415980526304333, -0.706140252063166, 0.442017877881589, -0.449053823645690, -0.0894051386719344, -0.348622179369911 };
    double[] IW5 = { -0.407756482945129, 0.0786764402198765, 0.972408690276837, -0.959955597431701, -0.977769442966978, 1.52121267506016, 0.503296357838885, -3.31593633455649, -3.47834004737816 };
    double[] IW6 = { -1.17474983226852, 0.870140308892922, 1.50545637070446, 0.369712493398677, -0.569857993006262, -0.732502911495791, -0.668984976457441, -1.48023312055586, -0.893472571240467 };
    double[] IW7 = { -0.860518592120001, -1.48432158859269, 0.957060799463945, -0.680797771869510, -0.270752283410268, -0.218766920514208, 0.168091770241510, -2.50326075864844, -0.800988078966455 };
    double[] IW8 = { 0.436492138260917, 0.280081066366966, 0.484813099857825, -0.310693876078844, 1.60359045377467, 1.57343220231689, -1.21552190886612, 2.03276547165735, 1.27245062411707 };
    double[] IW9 = { 1.66853306274827, -1.59142022586958, 0.862315766588855, 0.676048095028997, -2.22623540036057, -1.48036066273542, -0.0386781503608105, -5.18214728910353, -5.21258509200432 };
    double[] HW1 = { 0.577543862468449, 0.452264642610010, -0.869014797322399, 0.122435296258077, 0.507631314535324, 0.0386430216115630, -0.398222802253669, -0.614601040619812, 1.43324133164016 };
    double[] HW2 = { 0.163344332215885, 0.434728230081814, -3.04877964757120, -0.118300732191499, -2.63220585865390, 0.443163977179405, -2.11883915836372, 2.07955461474729, -3.94441429060856 };
    double[] HW3 = { -0.156103043064606, -0.482049683802527, 1.24788068138172, -1.05731056687422, -0.615321348655331, 0.214815967784408, 0.375762477817552, -0.728649292060764, -0.212151944122515 };
    double[] HW4 = { 1.78276088127139, 1.15086535250306, 1.25967219208841, -0.446026243031773, -3.94742837475153, -1.33311929047378, -2.09356929069216, 0.0736879745054291, 1.51472991137144 };
    double[] HW5 = { 0.744372844550077, 0.400815326319268, -4.94686055701529, 0.444773365537176, 2.65351865321717, 1.87143709824455, 1.74346707204902, -3.28220218001754, 5.78321274609173 };
    double[] OW = { -1.09112204235009, -7.13508015318964, -1.02533926874837, 3.80439015418632, -4.16711367340349 };
    double[] Ib =  {-1.77988445077976,
                -1.37323967952292,
                -0.547465218997906,
                0.331535304175263,
                -0.0167810612906040,
                0.734128501831859,
                -0.543321122358485,
                -1.13525462762255,
                1.82870615182942};
    double[] Hb =  {1.68321697741393,
                -0.862080862212137,
                -0.536310792063381,
                -0.772019935790668,
                1.51470472867250};
    double Ob = -0.156343477742835;
};

你在描述中提到你想使用 Tansig 激活函数,但在你的代码中,你有 Logsig 激活函数的实现。坦西格近似值为:

2/(1+Math.Exp(-2*tmp))-1

我也不确定你是如何获得输入层的权重的,这些可能是隐藏层的权重。Matlab 不会为输入层生成权重,因为输入直接连接到隐藏层。哪里净。IW是第一层(隐藏)的权重,后续层(包括输出)的权重由net给出。LW.

除了上述内容之外,我在您的代码中没有看到明显的错误/错误,也许先尝试一个更简单的网络并训练它来建立旧的和明智的 XOR 关系。

最后,我想提一下,如果您正在为微控制器编写此代码,那么在 C 语言中更容易完成并且没有对象。您的代码将更小更快。这里给出了一个分步示例。

我发现了问题。

在 MATLAB 中,在输入进入网络之前,它们都转到名为 mapminmax.m 的 .m 文件中名为 applyminmax 的函数,然后这个函数输出是网络输入。

在网络上完成模拟后,输出将转到同一 .m 文件中名为 reverse 的函数。这个函数输出是神经网络的最终输出。