Problem ze zgodnością CUDA i gcc

Dostałem ten błąd

/usr/local/cuda-5.0/bin/../include/host_config.h:82:2: error: #error - nieobsługiwana wersja GNU! gcc 4.7 i nowsze nie są obsługiwane! robić:* [src / Throughput.o] Błąd 1

W host_config.h zapewniają zgodność do wersji 4.6

#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 6)

#error -- unsupported GNU version! gcc 4.7 and up are not supported!

Mam zarówno 4.6 jak i 4.7

wybierz @ elect-desktop: /usr/local/cuda-5.0/bin$ gcc gcc
gcc-4.7 gcc-nm-4.7 gcc-4.6 gcc-ar-4.7
gcc-ranlib-4.7

Patrząc na Internet, sugerują dodanie linku do gcc-4.6 w katalogu cuda bin.

Więc zrobiłem

elect @ elect-desktop: /usr/local/cuda-5.0/bin$ sudo ln -s /usr/bin/gcc-4.6 gcc

I dostaję inny błąd

**** Build of configuration Debug for project Throughput ****

make all 
Building file: ../src/Throughput.cu
Invoking: NVCC Compiler
nvcc -G -g -O0 -gencode arch=compute_20,code=sm_20 -odir "src" -M -o "src/Throughput.d"     "../src/Throughput.cu"
gcc: error trying to exec 'cc1plus': execvp: No such file or directory
make: *** [src/Throughput.o] Error 1

**** Build Finished ****

Googling ponownie nie przyniósł mi wyraźnych sytuacji (obniżenie gcc itp.)

Pytam więc tutaj, co jest teraz problemem, ponieważ CUDA ma być kompatybilny z gcc-4.6 ...

Mój system:

Ubuntu 12.10 64bcuda_5.0.35_linux_64_ubuntu11.10-1

Oto kod samouczka, który próbuję obecnie skompilować

/**
 * Copyright 1993-2012 NVIDIA Corporation.  All rights reserved.
 *
 * Please refer to the NVIDIA end user license agreement (EULA) associated
 * with this source code for terms and conditions that govern your use of
 * this software. Any use, reproduction, disclosure, or distribution of
 * this software and related documentation outside the terms of the EULA
 * is strictly prohibited.
 */
#include <stdio.h>
#include <stdlib.h>

static const int WORK_SIZE = 256;

/**
 * This macro checks return value of the CUDA runtime call and exits
 * the application if the call failed.
 */
#define CUDA_CHECK_RETURN(value) {                                          \
    cudaError_t _m_cudaStat = value;                                        \
    if (_m_cudaStat != cudaSuccess) {                                       \
        fprintf(stderr, "Error %s at line %d in file %s\n",                 \
                cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);       \
        exit(1);                                                            \
    } }

__device__ unsigned int bitreverse(unsigned int number) {
    number = ((0xf0f0f0f0 & number) >> 4) | ((0x0f0f0f0f & number) << 4);
    number = ((0xcccccccc & number) >> 2) | ((0x33333333 & number) << 2);
    number = ((0xaaaaaaaa & number) >> 1) | ((0x55555555 & number) << 1);
    return number;
}

/**
 * CUDA kernel function that reverses the order of bits in each element of the array.
 */
__global__ void bitreverse(void *data) {
    unsigned int *idata = (unsigned int*) data;
    idata[threadIdx.x] = bitreverse(idata[threadIdx.x]);
}

/**
 * Host function that prepares data array and passes it to the CUDA kernel.
 */
int main(void) {
    void *d = NULL;
    int i;
    unsigned int idata[WORK_SIZE], odata[WORK_SIZE];

    for (i = 0; i < WORK_SIZE; i++)
        idata[i] = (unsigned int) i;

    CUDA_CHECK_RETURN(cudaMalloc((void**) &d, sizeof(int) * WORK_SIZE));

    CUDA_CHECK_RETURN(cudaMemcpy(d, idata, sizeof(int) * WORK_SIZE, cudaMemcpyHostToDevice));

    bitreverse<<<1, WORK_SIZE, WORK_SIZE * sizeof(int)>>>(d);

    CUDA_CHECK_RETURN(cudaThreadSynchronize());
    // Wait for the GPU launched work to complete
    CUDA_CHECK_RETURN(cudaGetLastError());
    CUDA_CHECK_RETURN(cudaMemcpy(odata, d, sizeof(int) * WORK_SIZE, cudaMemcpyDeviceToHost));

    for (i = 0; i < WORK_SIZE; i++)
        printf("Input value: %u, device output: %u\n", idata[i], odata[i]);

    CUDA_CHECK_RETURN(cudaFree((void*) d));
    CUDA_CHECK_RETURN(cudaDeviceReset());

    return 0;
}

questionAnswers(2)

yourAnswerToTheQuestion