如何在C++的嵌套函数中使用指针

How to use a pointer in nested functions in C++

本文关键字:指针 函数 嵌套 C++      更新时间:2023-10-16

我正在尝试纠正这个程序,但直到现在我只是对使用函数"dotprod"进行了更改,但是当使用"活动"功能时有一些东西,我无法评估部分以获得输出,我得到的错误是程序接收信号SIGSEGV,分段错误。这似乎是指针的问题,所以我尝试了一些更改,但我总是得到一个错误。如果有人能给我一个建议,请。谢谢

#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>

double dotprod( int n, double *vec1, double *vec2)
// n Length of vectors
// vec 1 One of the vectors to be dotted
//vec 2The other vector
{
 int k, m;
 double sum;
 sum=0.0;   //Will cumulate dot product here
 k=n/4;     //Divide into this many groups of 4
 m=n%4;     //This is the remainder of thet division

 while(k--){       //Do each group of 4
  sum += *(vec1) * *(vec2);
  for (int j=1; j<4; j++){
  sum += *(vec1+j) * *(vec2+j);
  }
  *(vec1) += 4;
  *(vec2) += 4;
  }
 while (m--){   //Do the remainder
  sum += *(vec1++) * *(vec2++);
 }
 return sum;
}

void activity(          //Implement Equation
  double *input,        //This neuron's input vector, ninputs long
  double *coefs,        //Weigth vector,ninputs+1 long(bias is at end)
  double *output,       //Achieved activation of this neuron
  int ninputs,          //Number of inputs
  int outlin            //Activation function is identity if nonzero, else logistic
  )
{
  double sum;

  double sumtotal;
  sum=dotprod(ninputs,(double *)input,(double *)coefs);
  sumtotal=sum+coefs[ninputs];   //Bias ter
    double param, resultado;
    param = sumtotal;
    resultado = exp (-param);
    printf ("The exponential value of %f is %f.n", param, resultado );
  if(outlin)
    *(output)=sum;
   else
    *(output)=1.0/(1.0+param);
}

static void trial_thr(
  double *input,        //input vector n_model_inputs long
  int n_all,            //Number of layers, including output, not including input
  int n_model_inputs,   //Number of inputs to the model
  double *outputs,      //Output vector of the model, ntarg long
  int ntarg,            //Number of outputs
  int *nhid_all,        //nhid_all[i] is the number of hidden neurons in hidden layer i
  double *weigths_opt[],//Weigths_opt[i] points to the weigth vector for hidden layer i
  double *hid_act[],    //hid_act[i] points to the vector of activations of hideen layer i
  double *final_layer_weigths,  //Weigths of final layer
  int classifier        //If nonzero use SoftMax output; else use linear output
  )
{
  int i, ilayer;
  double sum;
    for(ilayer=0;ilayer<n_all;ilayer++){
     if(ilayer==0 && n_all==1){        //Direct input to output? (No hidden)
       for(i=0;i<ntarg;i++)
         activity((double *)input,final_layer_weigths+i*(n_model_inputs+1),
                  outputs+i,n_model_inputs,1);
       }
     else if(ilayer==0){               //First hidden layer?
        for(i=0;i<nhid_all[ilayer];i++){
            std::cout<< "Contador : " << i << std::endl;
            activity((double *)input,weigths_opt[ilayer]+i*(n_model_inputs+1),
                    hid_act[ilayer]+i,n_model_inputs,0);
        }
     }
     else if(ilayer<n_all-1){          //Subsequent hiden layer?
       for(i=0;i<nhid_all[ilayer];i++)
         activity(hid_act[ilayer-1],weigths_opt[ilayer]+i*(nhid_all[ilayer-1]+1),
                  hid_act[ilayer]+i,nhid_all[ilayer-1],0);
       }
     else{
       for(i=0;i<ntarg;i++)
         activity(hid_act[ilayer-1],final_layer_weigths+i*(nhid_all[ilayer-1]+1),
                  outputs+i,nhid_all[ilayer-1],1);
       }
     }
    if(classifier){ //Classifier is always SoftMax(Equation(2.12)con page 22)
       sum=0.0;
       for(i=0;i<ntarg;i++){     //For all outputs
         if(outputs[i]<300.0)    //SoftMax can occasionally produce huge outputs
            outputs[i]=exp(outputs[i]);
         else
            outputs[i]=exp(300.0);
         sum+=outputs[i];
         }
       for(i=0;i<ntarg;i++)
         outputs[i]/=sum;
       }
    }

    int main(void)  {
    double b[4][1] = { 1, 2, 5, 7 };
    double c[4][1] = { 1, 4, 8, 2 };
    double result;
    double a[2][19];
    int i;
    i=0;
    a[0][0] = -2;
    a[0][1] = -2;
    a[0][2] = -1;
    a[0][3] = 0;
    a[0][4] = 1;
    a[0][5] = 1;
    a[0][6] = 2;
    a[0][7] = 2;
    a[0][8] = 2;
    a[0][9] = 3;
    a[1][0] = 2;
    a[1][1] = 4;
    a[1][2] = -4;
    a[1][3] = 3;
    a[1][4] = -2;
    a[1][5] = 0;
    a[1][6] = -3;
    a[1][7] = 0;
    a[1][8] = 2;
    a[1][9] = 0;
    int n_all=2;
    int n_model_inputs=10;
    double *outputs;
    int ntarg=1;
    int nhid_all=3;
    double *weigths_op;
    double *hid_act;
    double final_layer_weigths;
    int classifier=1;

    for(i;i<=9;i++){
        printf("%4.5f, %4.5fn",a[0][i],a[1][i]);
    }
    result=dotprod(4,*c,*b);
    printf("n This is the result : %4.5f n ", result);
    trial_thr( (double *)a, n_all, n_model_inputs, outputs, ntarg, &nhid_all, &weigths_op, &hid_act,
             &final_layer_weigths, classifier);
    system("PAUSE");
}
变量

double* weights_opt永远不会初始化,但你先把它传递给trial_thr,然后在第一个隐藏层的部分传递给activity,它调用dotprod并在它第一次尝试在线读取变量时崩溃

 sum += *(vec1) * *(vec2);

.prod 中也有一个错误。它应该是

double dotprod( int n, double *vec1, double *vec2)
  // n Length of vectors
  // vec 1 One of the vectors to be dotted
  //vec 2The other vector
{
  int k, m;
  double sum;
  sum=0.0;   //Will cumulate dot product here
  k=n/4;     //Divide into this many groups of 4
  m=n%4;     //This is the remainder of thet division

  while(k--){       //Do each group of 4
    sum += *(vec1) * *(vec2);
    for (int j=1; j<4; j++){
      sum += *(vec1+j) * *(vec2+j);
    }
    vec1 += 4; //HERE: you need to increase the address not the value.
    vec2 += 4;
  }
  while (m--){   //Do the remainder
    sum += *(vec1++) * *(vec2++);
  }
  return sum; 
}

虽然我会写第一个while循环

while(k--){       //Do each group of 4
  for (int j=0;j<4;++j){
    sum += *(vec1++) * *(vec2++);
  }
}