• 使用shared memory 计算矩阵乘法 (其实并没有加速多少)


    #include "cuda_runtime.h"
    #include "device_launch_parameters.h"
    #include "device_functions.h"
    
    
    #include <stdio.h>
    #include <windows.h>
    
    #include <m_tools.h>
    
    
    
    cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
    
    
    #define TILE_WIDTH 16  
    
    __global__ void MatrixMulKernle(int m, int n, int k, int *A, int  *B, int *C)
    {
    	//申请共享内存,存在于每个block中
    	__shared__ int ds_A[TILE_WIDTH][TILE_WIDTH];
    	__shared__ int ds_B[TILE_WIDTH][TILE_WIDTH];
    
    	//简化坐标记法,出现下面6个表示的地方就是并行的地方。
    	int bx = blockIdx.x;
    	int by = blockIdx.y;
    	int tx = threadIdx.x;		
    	int ty = threadIdx.y;
    
    	//确定结果矩阵中的行和列
    	int iy = by * TILE_WIDTH + ty;
    	int ix = bx * TILE_WIDTH + tx;
    
    	if (iy >= m || ix >= k) {
    		return;
    	}
    	int gw = gridDim.x;
    	int gh = gridDim.y;
    
    	//临时变量
    	int Cvalue = 0;
    
    	//循环读入A,B瓦片,计算结果矩阵,分阶段进行计算
    	for (int t = 0; t < (n + TILE_WIDTH - 1) / TILE_WIDTH; ++t)  
    	{
    		ds_A[tx][ty] = A[iy*n + t*TILE_WIDTH + tx];
    		ds_B[tx][ty] = B[(t*TILE_WIDTH + ty)*k + ix];
    		__syncthreads();
    
    		for (int i = 0; i < TILE_WIDTH; ++i)
    			Cvalue += ds_A[i][ty] * ds_B[tx][i];//从shared memory中取值
    		C[iy*k + ix] = Cvalue;
    	}
    }
    
    //不适用shared memory
    __global__ void addKernel(int *c, const int *a, const int *b)
    {
    	//const int bs = CUDA_LG::block_size;
    	//BLOCK_SIZE;
    	int ix = blockIdx.x * blockDim.x + threadIdx.x,
    		iy = blockIdx.y * blockDim.y + threadIdx.y;
    	if (ix >= 100 || iy >= 100) {
    		return;
    	}
    
    	int sum = 0;
    
    	for (int i = 0; i != 200; ++i) {
    
    		int ta = a[iy * 100 + i];
    
    		int tb = b[i * 100 + ix];
    
    		sum += ta*tb;
    	}
    	c[iy * 100 + ix] = sum;
    
    }
    
    int main()
    {
    	const int arow = 100;
    	const int acol = 200;
    	const int brow = 200;
    	const int bcol = 100;
    
    	const int arraySize = arow*acol;
    	
    	int * a = new int[arraySize];
    	int * b = new int[arraySize];
    	int * c = new int[arraySize/2];
    
    
    	for (int j = 0; j != arow; ++j) {
    		for (int i = 0; i != acol; ++i) {
    			a[j*acol + i] = i;
    		}
    	}
    
    	for (int j = 0; j != brow; ++j) {
    		for (int i = 0; i != bcol; ++i) {
    			b[j*bcol + i] = i;
    		}
    	}
        addWithCuda(c, a, b, arraySize);
    
    	
        cudaDeviceReset();
    
    
    	printf("c0=%d c1=%d c[3,50]=%d 
    ", c[0], c[1],c[3*100+50]);
    	delete[] a;
    	delete[] b;
    	delete[] c;
    
    	system("pause");
        return 0;
    }
    
    // Helper function for using CUDA to add vectors in parallel.
    cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
    {
        int *dev_a = 0;
        int *dev_b = 0;
        int *dev_c = 0;
        cudaError_t cudaStatus;
    
        // Choose which GPU to run on, change this on a multi-GPU system.
        cudaStatus = cudaSetDevice(0);
        cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
        cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
        cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
    
        cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
        cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
    
    	int thread_x = 100;
    	int thread_y = 100;
    	dim3 block(TILE_WIDTH, TILE_WIDTH);
    	int grid_w = (thread_x + block.x - 1) / block.x;
    	int grid_h = (thread_y + block.y - 1) / block.y;
    	dim3 grid(grid_w, grid_h);
        // Launch a kernel on the GPU with one thread for each element.
    
    	
    	TIME_INIT;
    	TIME_MARK("t1");
    	for(int i=0;i!=10000;++i)
    		addKernel << < grid, block >> > (dev_c, dev_a, dev_b);//486ms
    	TIME_MARK("t2");
    	for (int i = 0; i != 10000; ++i)
    		MatrixMulKernle << < grid, block >> >(100, 200, 100, dev_a, dev_b, dev_c);//1069ms
    	TIME_MARK("t3");
    	TIME_PRINT;
        cudaStatus = cudaGetLastError();
        cudaStatus = cudaDeviceSynchronize();
        cudaStatus = cudaMemcpy(c, dev_c, size/2 * sizeof(int), cudaMemcpyDeviceToHost);
    
    Error:
        cudaFree(dev_c);
        cudaFree(dev_a);
        cudaFree(dev_b);
        
        return cudaStatus;
    }
    

      

  • 相关阅读:
    JQuery.Ajax 错误调试帮助信息
    获取当前时间差
    选中复选框
    生成一个连接对象 生成静态页
    64位系统IIS开启32位兼容模式命令
    分割table获取数据
    JS 获取倒计时
    SqlServer 日期时间格式转换
    js 获得光标所在的文本框(text/textarea)中的位置
    .net 数据库链接的字符串
  • 原文地址:https://www.cnblogs.com/luoyinjie/p/10846113.html
Copyright © 2020-2023  润新知