TensorRT及CUDA自学笔记007 运行时库及矩阵加法demo

2024-02-26 21:28

本文主要是介绍TensorRT及CUDA自学笔记007 运行时库及矩阵加法demo,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!

TensorRT及CUDA自学笔记007 运行时库及矩阵加法demo

Runtime 运行时库

明天再补充,先去准备面试了

矩阵加法demo

cudaMalloc和cudaMemcpy

它们和c的malloc和memcpy功能一致,只是操作的不是host端的内存空间,而是device端的”显存空间“

cudaSetDevice

cudaSetDevice是用于针对主机线程指定Device的cudaAPI函数,接下来主机中这个线程的后续的cuda平台的所有操作都是针对于这个被指定的设备的。

error_check

error_check是我写的检查函数,用于检查你调用的cudaAPI函数是否调用失败或报错,如果失败,error_check会为你输出失败的原因、文件路径和代码行号。

main.cu

#include"common/common.h"void data_inital(float* data,int N){time_t t;srand((unsigned)time(&t));std::cout<<"data: ";//初始化数据for(int i=0;i<N;i++){data[i] = (float)(rand()%0xff)/10.0f;std::cout<<data[i]<<" ";}std::cout<<std::endl;return;
};__global__ void add(float* a, float* b,float* c,int N){int threadID = threadIdx.y*blockDim.x+threadIdx.x;if(threadID<N){c[threadID] = a[threadID]+b[threadID];}
}int main(int argc, char** argv){int deviceCount {0};cudaDeviceProp deviceProp;int driverVersion {0};int runtimeVersion {0};device_information(&deviceCount,&deviceProp,&driverVersion,&runtimeVersion);std::cout<<std::endl;cudaError_t error = error_check(cudaSetDevice(0),__FILE__,__LINE__);//针对主机线程指定Device,接下来主机中这个线程的后续的cuda平台的所有操作都是针对于这个设备的。if(error == cudaSuccess){std::cout<<"cudaSetDevice success!"<<std::endl;std::cout<<"set on device:"<< deviceProp.name << std::endl;}else{std::cout<<"cudaSetDevice failed!"<<std::endl;return -1;}int numElem = 16;size_t nBytes = numElem * sizeof(float);// 初始化主机端数据缓冲区float *hostDataA, *hostDataB, *gpuRef;hostDataA = (float*)malloc(nBytes);hostDataB = (float*)malloc(nBytes);gpuRef = (float*)malloc(nBytes);if (hostDataA == NULL || hostDataB == NULL || gpuRef == NULL){std::cout<<"malloc failed!"<<std::endl;return -1;}data_inital(hostDataA,numElem);    //初始化数据data_inital(hostDataB,numElem);    //初始化数据memset(gpuRef, 0, nBytes);// 初始化设备端数据缓冲区float *deviceDataA, *deviceDataB, *deviceDataC;cudaMalloc((float**)&deviceDataA, nBytes);//注意,cudaMalloc的修饰符为__host____device___,也就是说host和device都可以使用这个cudaAPI函数cudaMalloc((float**)&deviceDataB, nBytes);cudaMalloc((float**)&deviceDataC, nBytes);if (deviceDataA == NULL || deviceDataB == NULL || deviceDataC == NULL){std::cout<<"cudaMalloc failed!"<<std::endl;free(hostDataA);free(hostDataB);free(gpuRef);return -1;}if(cudaSuccess ==  cudaMemcpy(deviceDataA,hostDataA,nBytes,cudaMemcpyHostToDevice) &&cudaSuccess ==  cudaMemcpy(deviceDataB,hostDataB,nBytes,cudaMemcpyHostToDevice) && cudaSuccess ==  cudaMemcpy(deviceDataC,gpuRef,nBytes,cudaMemcpyHostToDevice)) ///注意,cudaMemcpy的修饰符为__host__,也就是说只有host可以使用这个cudaAPI函数{std::cout<<"successfully copy data from host to device "<< deviceProp.name <<std::endl;}else{std::cout<<"copy data from host to device"<< deviceProp.name <<" failed!" <<std::endl;free(hostDataA);free(hostDataB);free(gpuRef);return -1;}//加载核函数dim3 block (4,4);dim3 grid (1,1);add<<<grid,block>>>(deviceDataA,deviceDataB,deviceDataC,numElem);//将数据从设备端拷贝回主机端cudaMemcpy(gpuRef,deviceDataC,nBytes,cudaMemcpyDeviceToHost);//打印运算结果std::cout<<"result: ";for(size_t i = 0; i < numElem; i++)std::cout<<gpuRef[i] << " ";std::cout<<std::endl;//释放资源free(hostDataA);free(hostDataB);free(gpuRef);cudaFree(deviceDataA);cudaFree(deviceDataB);cudaFree(deviceDataC);cudaDeviceReset();return 0;
}

common.h

#include<sys/time.h>
#include<iostream>
#include<cuda_runtime.h>
#include<stdio.h>//用于检查你的cuda函数是否调用失败
cudaError_t error_check(cudaError_t status,const char *filePathName,int lineNumber){if(status !=cudaSuccess){std::cout << "CUDA API error " << cudaGetErrorName(status) << " at " << filePathName << " in line " << lineNumber << std::endl;std::cout << "description :" << cudaGetErrorString(status) << std::endl;return status;}return status;
}bool device_information(int* ptr_devicCount,cudaDeviceProp* ptr_deviceProp,int* ptr_driverVersion,int* ptr_runtimeVersion){cudaGetDeviceCount(ptr_devicCount);if(*ptr_devicCount == 0){std::cerr << "error: no devices supporting CUDA.\n";return false;}else{std::cout << "Detected " << *ptr_devicCount << " CUDA Capable device(s)\n";}for(int i {0}; i < *ptr_devicCount; i++){cudaSetDevice(i);error_check(cudaGetDeviceProperties(ptr_deviceProp,i),__FILE__,__LINE__);std::cout << "Device " << i << " name: " << ptr_deviceProp->name << std::endl;error_check(cudaDriverGetVersion(ptr_driverVersion),__FILE__,__LINE__);error_check(cudaRuntimeGetVersion(ptr_runtimeVersion),__FILE__,__LINE__);std::cout << "CUDA Driver Version / Runtime Version: " << *ptr_driverVersion/1000 << "." << (*ptr_driverVersion%100)/10 << "." << *ptr_driverVersion%10 << "/" << *ptr_runtimeVersion/1000 << "."<< (*ptr_runtimeVersion%100)/10 << "." << *ptr_runtimeVersion%10 << std::endl;std::cout << "CUDA Capability Major/Minor version number: " << ptr_deviceProp->major << "." << ptr_deviceProp->minor << std::endl;std::cout << "Total amount of global memory: " << ptr_deviceProp->totalGlobalMem << std::endl;std::cout << "Total amount of constant memory: " << ptr_deviceProp->totalConstMem << std::endl;std::cout << "Total amount of shared memory per block: " << ptr_deviceProp->sharedMemPerBlock << std::endl;std::cout << "Total number of registers available per block: " << ptr_deviceProp->regsPerBlock << std::endl;std::cout << "Warp size: " << ptr_deviceProp->warpSize << std::endl;std::cout << "Maximum number of threads per block: " << ptr_deviceProp->maxThreadsPerBlock << std::endl;std::cout << "Maximum sizes of each dimension of a block: " << ptr_deviceProp->maxThreadsDim[0] << " x " << ptr_deviceProp->maxThreadsDim[1] << " x " << ptr_deviceProp->maxThreadsDim[2] << std::endl;std::cout << "Maximum sizes of each dimension of a grid: " << ptr_deviceProp->maxGridSize[0] << " x "<< ptr_deviceProp->maxGridSize[1] << " x " << ptr_deviceProp->maxGridSize[2] << std::endl;std::cout << "Maximum memory pitch: " << ptr_deviceProp->memPitch << std::endl;std::cout << "Texture alignment: " << ptr_deviceProp->textureAlignment << std::endl;std::cout << "Concurrent copy and execution: " << ptr_deviceProp->deviceOverlap << std::endl;std::cout << "Run time limit on kernels: " << ptr_deviceProp->kernelExecTimeoutEnabled << std::endl;std::cout << "Integrated: " << ptr_deviceProp->integrated << std::endl;std::cout << "Support host page-locked memory mapping: " << ptr_deviceProp->canMapHostMemory << std::endl;std::cout << "Alignment requirement for Surfaces: " << ptr_deviceProp->surfaceAlignment << std::endl;std::cout << "Device has ECC support: " << ptr_deviceProp->ECCEnabled << std::endl;std::cout << "Device is using TCC driver model: " << ptr_deviceProp->tccDriver << std::endl;std::cout << "Device supports Unified Addressing (UVA): " << ptr_deviceProp->unifiedAddressing << std::endl;std::cout << "Device supports Compute Preemption: " << ptr_deviceProp->computePreemptionSupported << std::endl;}return true;
}

这篇关于TensorRT及CUDA自学笔记007 运行时库及矩阵加法demo的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!



http://www.chinasem.cn/article/750214

相关文章

Springboot处理跨域的实现方式(附Demo)

《Springboot处理跨域的实现方式(附Demo)》:本文主要介绍Springboot处理跨域的实现方式(附Demo),具有很好的参考价值,希望对大家有所帮助,如有错误或未考虑完全的地方,望不... 目录Springboot处理跨域的方式1. 基本知识2. @CrossOrigin3. 全局跨域设置4.

Java终止正在运行的线程的三种方法

《Java终止正在运行的线程的三种方法》停止一个线程意味着在任务处理完任务之前停掉正在做的操作,也就是放弃当前的操作,停止一个线程可以用Thread.stop()方法,但最好不要用它,本文给大家介绍了... 目录前言1. 停止不了的线程2. 判断线程是否停止状态3. 能停止的线程–异常法4. 在沉睡中停止5

在VSCode中本地运行DeepSeek的流程步骤

《在VSCode中本地运行DeepSeek的流程步骤》本文详细介绍了如何在本地VSCode中安装和配置Ollama和CodeGPT,以使用DeepSeek进行AI编码辅助,无需依赖云服务,需要的朋友可... 目录步骤 1:在 VSCode 中安装 Ollama 和 CodeGPT安装Ollama下载Olla

解读docker运行时-itd参数是什么意思

《解读docker运行时-itd参数是什么意思》在Docker中,-itd参数组合用于在后台运行一个交互式容器,同时保持标准输入和分配伪终端,这种方式适合需要在后台运行容器并保持交互能力的场景... 目录docker运行时-itd参数是什么意思1. -i(或 --interactive)2. -t(或 --

pycharm远程连接服务器运行pytorch的过程详解

《pycharm远程连接服务器运行pytorch的过程详解》:本文主要介绍在Linux环境下使用Anaconda管理不同版本的Python环境,并通过PyCharm远程连接服务器来运行PyTorc... 目录linux部署pytorch背景介绍Anaconda安装Linux安装pytorch虚拟环境安装cu

通过prometheus监控Tomcat运行状态的操作流程

《通过prometheus监控Tomcat运行状态的操作流程》文章介绍了如何安装和配置Tomcat,并使用Prometheus和TomcatExporter来监控Tomcat的运行状态,文章详细讲解了... 目录Tomcat安装配置以及prometheus监控Tomcat一. 安装并配置tomcat1、安装

mysqld_multi在Linux服务器上运行多个MySQL实例

《mysqld_multi在Linux服务器上运行多个MySQL实例》在Linux系统上使用mysqld_multi来启动和管理多个MySQL实例是一种常见的做法,这种方式允许你在同一台机器上运行多个... 目录1. 安装mysql2. 配置文件示例配置文件3. 创建数据目录4. 启动和管理实例启动所有实例

IDEA运行spring项目时,控制台未出现的解决方案

《IDEA运行spring项目时,控制台未出现的解决方案》文章总结了在使用IDEA运行代码时,控制台未出现的问题和解决方案,问题可能是由于点击图标或重启IDEA后控制台仍未显示,解决方案提供了解决方法... 目录问题分析解决方案总结问题js使用IDEA,点击运行按钮,运行结束,但控制台未出现http://

解决Spring运行时报错:Consider defining a bean of type ‘xxx.xxx.xxx.Xxx‘ in your configuration

《解决Spring运行时报错:Considerdefiningabeanoftype‘xxx.xxx.xxx.Xxx‘inyourconfiguration》该文章主要讲述了在使用S... 目录问题分析解决方案总结问题Description:Parameter 0 of constructor in x

解决IDEA使用springBoot创建项目,lombok标注实体类后编译无报错,但是运行时报错问题

《解决IDEA使用springBoot创建项目,lombok标注实体类后编译无报错,但是运行时报错问题》文章详细描述了在使用lombok的@Data注解标注实体类时遇到编译无误但运行时报错的问题,分析... 目录问题分析问题解决方案步骤一步骤二步骤三总结问题使用lombok注解@Data标注实体类,编译时