• cuda vector addition


    http://webgpu.hwu.crhc.illinois.edu/

    // MP 1
    #include    <wb.h>
    
    __global__ void vecAdd(float * in1, float * in2, float * out, int len) {
        //@@ Insert code to implement vector addition here
        int i = blockIdx.x * blockDim.x + threadIdx.x ; 
        if( i < len )
            out[i] = in1[i] + in2[i] ; 
    }
    
    int main(int argc, char ** argv) {
        wbArg_t args;
        int inputLength;
        float * hostInput1;
        float * hostInput2;
        float * hostOutput;
        float * deviceInput1;
        float * deviceInput2;
        float * deviceOutput;
    
        args = wbArg_read(argc, argv);
    
        wbTime_start(Generic, "Importing data and creating memory on host");
        hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
        hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength);
        hostOutput = (float *) malloc(inputLength * sizeof(float));
        wbTime_stop(Generic, "Importing data and creating memory on host");
    
        wbLog(TRACE, "The input length is ", inputLength);
    
        wbTime_start(GPU, "Allocating GPU memory.");
        //@@ Allocate GPU memory here
        
        cudaMalloc((void**)&deviceInput1 , sizeof(float) * inputLength);
        cudaMalloc((void**)&deviceInput2 , sizeof(float) * inputLength);
        cudaMalloc((void**)&deviceOutput , sizeof(float) * inputLength);
        
        
        wbTime_stop(GPU, "Allocating GPU memory.");
        
        wbTime_start(GPU, "Copying input memory to the GPU.");
        //@@ Copy memory to the GPU here
        cudaMemcpy(deviceInput1,hostInput1,sizeof(float) * inputLength , cudaMemcpyHostToDevice) ;
        cudaMemcpy(deviceInput2,hostInput2,sizeof(float) * inputLength , cudaMemcpyHostToDevice) ; 
        
    
        wbTime_stop(GPU, "Copying input memory to the GPU.");
        
        //@@ Initialize the grid and block dimensions here
        
        
        dim3 DimGrid( (inputLength  - 1)/ 256 + 1 , 1 , 1 ) ;
        dim3 DimBlock( 256 ,1 ,1 ) ; 
        
        
        
        wbTime_start(Compute, "Performing CUDA computation");
        //@@ Launch the GPU Kernel here
        
        vecAdd<<<DimGrid,DimBlock>>>(deviceInput1,deviceInput2,deviceOutput,inputLength);
        
    
        cudaThreadSynchronize();
        wbTime_stop(Compute, "Performing CUDA computation");
        
        wbTime_start(Copy, "Copying output memory to the CPU");
        //@@ Copy the GPU memory back to the CPU here
        
        cudaMemcpy(hostOutput,deviceOutput,sizeof(float)*inputLength,cudaMemcpyDeviceToHost);
        
    
        wbTime_stop(Copy, "Copying output memory to the CPU");
    
        wbTime_start(GPU, "Freeing GPU Memory");
        //@@ Free the GPU memory here
        
        cudaFree(deviceInput1);
        cudaFree(deviceInput2);
        cudaFree(deviceOutput);
    
    
        wbTime_stop(GPU, "Freeing GPU Memory");
    
        wbSolution(args, hostOutput, inputLength);
    
        free(hostInput1);
        free(hostInput2);
        free(hostOutput);
    
        return 0;
    }
    View Code
  • 相关阅读:
    【响应式Web设计实践 #BOOK#】
    【JS】(+﹏+)~
    -_-#【邮件】qq邮箱不显示图片
    -_-#【Markdown】
    51Nod——N1284 2 3 5 7的倍数
    51Nod——N1118 机器人走方格
    洛谷——P1014 Cantor表
    洛谷—— P1434 滑雪
    洛谷——P1443 马的遍历
    python(24)- 面向对象进阶
  • 原文地址:https://www.cnblogs.com/jh818012/p/3533972.html
Copyright © 2020-2023  润新知