本文主要是介绍TensorRT及CUDA自学笔记007 运行时库及矩阵加法demo,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
TensorRT及CUDA自学笔记007 运行时库及矩阵加法demo
Runtime 运行时库
明天再补充,先去准备面试了
矩阵加法demo
cudaMalloc和cudaMemcpy
它们和c的malloc和memcpy功能一致,只是操作的不是host端的内存空间,而是device端的”显存空间“
cudaSetDevice
cudaSetDevice是用于针对主机线程指定Device的cudaAPI函数,接下来主机中这个线程的后续的cuda平台的所有操作都是针对于这个被指定的设备的。
error_check
error_check是我写的检查函数,用于检查你调用的cudaAPI函数是否调用失败或报错,如果失败,error_check会为你输出失败的原因、文件路径和代码行号。
main.cu
#include"common/common.h"void data_inital(float* data,int N){time_t t;srand((unsigned)time(&t));std::cout<<"data: ";//初始化数据for(int i=0;i<N;i++){data[i] = (float)(rand()%0xff)/10.0f;std::cout<<data[i]<<" ";}std::cout<<std::endl;return;
};__global__ void add(float* a, float* b,float* c,int N){int threadID = threadIdx.y*blockDim.x+threadIdx.x;if(threadID<N){c[threadID] = a[threadID]+b[threadID];}
}int main(int argc, char** argv){int deviceCount {0};cudaDeviceProp deviceProp;int driverVersion {0};int runtimeVersion {0};device_information(&deviceCount,&deviceProp,&driverVersion,&runtimeVersion);std::cout<<std::endl;cudaError_t error = error_check(cudaSetDevice(0),__FILE__,__LINE__);//针对主机线程指定Device,接下来主机中这个线程的后续的cuda平台的所有操作都是针对于这个设备的。if(error == cudaSuccess){std::cout<<"cudaSetDevice success!"<<std::endl;std::cout<<"set on device:"<< deviceProp.name << std::endl;}else{std::cout<<"cudaSetDevice failed!"<<std::endl;return -1;}int numElem = 16;size_t nBytes = numElem * sizeof(float);// 初始化主机端数据缓冲区float *hostDataA, *hostDataB, *gpuRef;hostDataA = (float*)malloc(nBytes);hostDataB = (float*)malloc(nBytes);gpuRef = (float*)malloc(nBytes);if (hostDataA == NULL || hostDataB == NULL || gpuRef == NULL){std::cout<<"malloc failed!"<<std::endl;return -1;}data_inital(hostDataA,numElem); //初始化数据data_inital(hostDataB,numElem); //初始化数据memset(gpuRef, 0, nBytes);// 初始化设备端数据缓冲区float *deviceDataA, *deviceDataB, *deviceDataC;cudaMalloc((float**)&deviceDataA, nBytes);//注意,cudaMalloc的修饰符为__host____device___,也就是说host和device都可以使用这个cudaAPI函数cudaMalloc((float**)&deviceDataB, nBytes);cudaMalloc((float**)&deviceDataC, nBytes);if (deviceDataA == NULL || deviceDataB == NULL || deviceDataC == NULL){std::cout<<"cudaMalloc failed!"<<std::endl;free(hostDataA);free(hostDataB);free(gpuRef);return -1;}if(cudaSuccess == cudaMemcpy(deviceDataA,hostDataA,nBytes,cudaMemcpyHostToDevice) &&cudaSuccess == cudaMemcpy(deviceDataB,hostDataB,nBytes,cudaMemcpyHostToDevice) && cudaSuccess == cudaMemcpy(deviceDataC,gpuRef,nBytes,cudaMemcpyHostToDevice)) ///注意,cudaMemcpy的修饰符为__host__,也就是说只有host可以使用这个cudaAPI函数{std::cout<<"successfully copy data from host to device "<< deviceProp.name <<std::endl;}else{std::cout<<"copy data from host to device"<< deviceProp.name <<" failed!" <<std::endl;free(hostDataA);free(hostDataB);free(gpuRef);return -1;}//加载核函数dim3 block (4,4);dim3 grid (1,1);add<<<grid,block>>>(deviceDataA,deviceDataB,deviceDataC,numElem);//将数据从设备端拷贝回主机端cudaMemcpy(gpuRef,deviceDataC,nBytes,cudaMemcpyDeviceToHost);//打印运算结果std::cout<<"result: ";for(size_t i = 0; i < numElem; i++)std::cout<<gpuRef[i] << " ";std::cout<<std::endl;//释放资源free(hostDataA);free(hostDataB);free(gpuRef);cudaFree(deviceDataA);cudaFree(deviceDataB);cudaFree(deviceDataC);cudaDeviceReset();return 0;
}
common.h
#include<sys/time.h>
#include<iostream>
#include<cuda_runtime.h>
#include<stdio.h>//用于检查你的cuda函数是否调用失败
cudaError_t error_check(cudaError_t status,const char *filePathName,int lineNumber){if(status !=cudaSuccess){std::cout << "CUDA API error " << cudaGetErrorName(status) << " at " << filePathName << " in line " << lineNumber << std::endl;std::cout << "description :" << cudaGetErrorString(status) << std::endl;return status;}return status;
}bool device_information(int* ptr_devicCount,cudaDeviceProp* ptr_deviceProp,int* ptr_driverVersion,int* ptr_runtimeVersion){cudaGetDeviceCount(ptr_devicCount);if(*ptr_devicCount == 0){std::cerr << "error: no devices supporting CUDA.\n";return false;}else{std::cout << "Detected " << *ptr_devicCount << " CUDA Capable device(s)\n";}for(int i {0}; i < *ptr_devicCount; i++){cudaSetDevice(i);error_check(cudaGetDeviceProperties(ptr_deviceProp,i),__FILE__,__LINE__);std::cout << "Device " << i << " name: " << ptr_deviceProp->name << std::endl;error_check(cudaDriverGetVersion(ptr_driverVersion),__FILE__,__LINE__);error_check(cudaRuntimeGetVersion(ptr_runtimeVersion),__FILE__,__LINE__);std::cout << "CUDA Driver Version / Runtime Version: " << *ptr_driverVersion/1000 << "." << (*ptr_driverVersion%100)/10 << "." << *ptr_driverVersion%10 << "/" << *ptr_runtimeVersion/1000 << "."<< (*ptr_runtimeVersion%100)/10 << "." << *ptr_runtimeVersion%10 << std::endl;std::cout << "CUDA Capability Major/Minor version number: " << ptr_deviceProp->major << "." << ptr_deviceProp->minor << std::endl;std::cout << "Total amount of global memory: " << ptr_deviceProp->totalGlobalMem << std::endl;std::cout << "Total amount of constant memory: " << ptr_deviceProp->totalConstMem << std::endl;std::cout << "Total amount of shared memory per block: " << ptr_deviceProp->sharedMemPerBlock << std::endl;std::cout << "Total number of registers available per block: " << ptr_deviceProp->regsPerBlock << std::endl;std::cout << "Warp size: " << ptr_deviceProp->warpSize << std::endl;std::cout << "Maximum number of threads per block: " << ptr_deviceProp->maxThreadsPerBlock << std::endl;std::cout << "Maximum sizes of each dimension of a block: " << ptr_deviceProp->maxThreadsDim[0] << " x " << ptr_deviceProp->maxThreadsDim[1] << " x " << ptr_deviceProp->maxThreadsDim[2] << std::endl;std::cout << "Maximum sizes of each dimension of a grid: " << ptr_deviceProp->maxGridSize[0] << " x "<< ptr_deviceProp->maxGridSize[1] << " x " << ptr_deviceProp->maxGridSize[2] << std::endl;std::cout << "Maximum memory pitch: " << ptr_deviceProp->memPitch << std::endl;std::cout << "Texture alignment: " << ptr_deviceProp->textureAlignment << std::endl;std::cout << "Concurrent copy and execution: " << ptr_deviceProp->deviceOverlap << std::endl;std::cout << "Run time limit on kernels: " << ptr_deviceProp->kernelExecTimeoutEnabled << std::endl;std::cout << "Integrated: " << ptr_deviceProp->integrated << std::endl;std::cout << "Support host page-locked memory mapping: " << ptr_deviceProp->canMapHostMemory << std::endl;std::cout << "Alignment requirement for Surfaces: " << ptr_deviceProp->surfaceAlignment << std::endl;std::cout << "Device has ECC support: " << ptr_deviceProp->ECCEnabled << std::endl;std::cout << "Device is using TCC driver model: " << ptr_deviceProp->tccDriver << std::endl;std::cout << "Device supports Unified Addressing (UVA): " << ptr_deviceProp->unifiedAddressing << std::endl;std::cout << "Device supports Compute Preemption: " << ptr_deviceProp->computePreemptionSupported << std::endl;}return true;
}
这篇关于TensorRT及CUDA自学笔记007 运行时库及矩阵加法demo的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!