LNK1169 我找不到的错误

LNK1169 Error I can't find

本文关键字:错误 找不到 LNK1169      更新时间:2023-10-16

LNK1169:找到一个或多个乘法定义符号

我的项目是一个并行随机数生成器,使用MPI和并行前缀算法。我已经查找了许多解决LNK1169错误的方法。为了防止它,我使许多变量静态,我寻找多重定义的变量,但找不到。我没有头文件中变量可以被多重定义。如果有人能帮我找出错误,我将不胜感激。我很确定这个错误发生在functions.cpp的某个地方,因为在我试图实现parallel_prefix函数之前,一切都是正确的。

下面是LNK2005的输出:

LNK2005 "类std::vector>,类std::allocator>>> __cdecl parallel_prefix(类std::vector>,类std::allocator>>>,类std::allocator>,类std::allocator>,类std::allocator>>>>>,int,int)"(parallel_prefix@@YA AV ? vector@V ?美元$ vector@HV ? allocator@H@std@@@std@@V美元? allocator@V美元? vector@HV美元? allocator@H@std@@@std@@@2@@std@@V美元? vector@V美元? vector@V美元? vector@HV美元? allocator@H@std@@@std@@V美元? allocator@V美元? vector@HV美元? allocator@H@std@@@std@@@2@@std@@V美元? allocator@V美元? vector@V美元? vector@HV美元? allocator@H@std@@@std@@V美元? allocator@V美元? vector@HV美元? allocator@H@std@@@std@@@2@@std@@@2@@2@HH@Z美元)已经定义在函数。obj RandomNumberGenerator

这是我的代码。

RandomNumberGenerator.cpp

#include "functions.cpp"
int main(int argc, char *argv[])
{
    // Establishes what rank it is, and how many processes are running.
    static int rank, p, n, per_Process;
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &p);
    static vector<int> Broadcast_data;
    n = 100;
    per_Process = n / p;
// The first and second arguments are constants for number generation, the third is a large prime to mod by, and the fourth is a random seed. x1 is calculated based off x0.
// All provided by the user except x1.
// Rank 0 broadcasts the data to all processes.
if (rank == 0)
{
    for (static int i = 1; i < 5; i++)
    {
        Broadcast_data.push_back(std::atoi(argv[i]));
    }
    Broadcast_data.push_back(std::atoi(argv[1]) *std::atoi(argv[4]) % std::atoi(argv[3]));
    // NOTE: THIS PUSH BACK IS HOW MANY RANDOM NUMBERS WILL BE GENERATED
    Broadcast_data.push_back(n);
    cout << "Rank " << rank << " Broadcast Data: ";
    for (static int i = 0; i < 6; i++)
    {
        cout << Broadcast_data[i] << " ";
    }
    cout << endl;
}
else
{
    Broadcast_data.resize(6);
}
MPI_Bcast(Broadcast_data.data(), 6, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
// Initialize an array of n/p values at every process.  Each of the n/p values is the matrix M.
// M is this 2 dimmensional array:
// [ a 1 ]
// [ b 0 ]
static vector<vector<int>> M;
M.resize(2);
M[0].resize(2);
M[1].resize(2);
M[0][0] = Broadcast_data[0];
M[0][1] = Broadcast_data[1];
M[1][0] = 1;
M[1][1] = 0;
// Now we must initialize the array of these M values.  Notation might get complex here
// as we are dealing with 3D arrays.
static vector<vector<vector<int>>> M_values;
M_values.resize(per_Process);
for (static int i = 0; i < per_Process; i++)
{
    M_values.push_back(M);
}
// Now we are ready for the parallel prefix operation.  Note that the operator here
// is matrix multiplication.
static vector<vector<int>> prefix;
prefix = parallel_prefix(M_values, rank, p);

MPI_Finalize();
return 0;
}

functions.cpp

#include <mpi.h>
#include <iostream>
#include <cstdlib>
#include <string>
#include <vector>
#include <time.h>
using namespace std;
// This is parallel prefix with the operator being matrix multiplication
vector<vector<int>> parallel_prefix(vector<vector<vector<int>>> Matrices, int rank, int p)
{
    // The first step is a local multiplication of all M values.
    // In a matrix represented by:
    // [ a b ]
    // [ c d ]
    // The new matrix will be this:
    // [ a^2+bc ab+bd ]
    // [ ca+dc cb+d^2 ]
    // So the first step will be to complete this operation once for every matrix M in M_values
static vector<vector<int>> local_sum;
local_sum = Matrices[0];
for (static int i = 1; i < Matrices.size(); i++)
{
    vector<vector<int>> temp_vector;
    temp_vector = local_sum;
    temp_vector[0][0] = local_sum[0][0] * Matrices[i][0][0] + local_sum[1][0] * Matrices[i][0][1];
    temp_vector[0][1] = local_sum[0][1] * Matrices[i][0][0] + local_sum[1][1] * Matrices[i][0][1];
    temp_vector[1][0] = local_sum[0][0] * Matrices[i][1][0] + local_sum[0][1] * Matrices[i][1][1];
    temp_vector[1][1] = local_sum[0][1] * Matrices[i][1][0] + local_sum[1][1] * Matrices[i][1][1];
    local_sum = temp_vector;
}
// Now that all the local sums have been computed we can start step 2: communication.
// Determine how many steps it will take
int steps = 0;
while (int j = 1 < p)
{
    j *= 2;
    steps++;
}
while (int k = 0 < steps)
{
    // First determine the rank's mate.
    static int mate;
    mate = rank | (1u << steps);
    // Now we send the local sum to mate, and receive our mate's local sum.
    // First modify the local sum vector to a vector that can be sent.
    // Send vector syntax is [ a c b d ]
    static vector<int> send_vector, recv_vector;
    send_vector.resize(4);
    recv_vector.resize(4);
    send_vector[0] = local_sum[0][0];
    send_vector[1] = local_sum[0][1];
    send_vector[2] = local_sum[1][0];
    send_vector[3] = local_sum[1][1];
    // Send the vector to your mate, and recieve a vector from your mate.
    static MPI_Status status;
    MPI_Send(send_vector.data(), 4, MPI_INT, mate, 0, MPI_COMM_WORLD);
    MPI_Recv(recv_vector.data(), 4, MPI_INT, mate, 1, MPI_COMM_WORLD, &status);
    // Update the local sum if your mate rank is lower than your rank.
    if (mate < rank)
    {
        static vector<vector<int>> temp_vector;
        temp_vector = local_sum;
        temp_vector[0][0] = local_sum[0][0] * recv_vector[0] + local_sum[1][0] * recv_vector[1];
        temp_vector[0][1] = local_sum[0][1] * recv_vector[0] + local_sum[1][1] * recv_vector[1];
        temp_vector[1][0] = local_sum[0][0] * recv_vector[2] + local_sum[0][1] * recv_vector[3];
        temp_vector[1][1] = local_sum[0][1] * recv_vector[2] + local_sum[1][1] * recv_vector[3];
        local_sum = temp_vector;
    }
    MPI_Barrier(MPI_COMM_WORLD);
    k++;
    // After completion of this loop the local sum is the parallel prefix output for each process.
}

return local_sum;
}

您将functions.cpp包含在main.cpp中,并且可能还将其包含在您的项目中。这将functions.cpp中的内容编译两次。

不要在main中包含functions.cpp。使用functions.h在其中声明函数