WSL 2 的 cudaMallocHost() 不能正常申请到 VM 的内存。也许是 WSL 2 上的 cuda 是 ubuntu20.04 的版本,不是 WSL 2 特供版。WSL 2 的 cuda 也有一些限制,详细见 WSL2 User guide 。
1. 什么是 Nsight System
我们先看下 Nsight System 官网对该工具的描述:
NVIDIA Nsight™ Systems is a system-wide performance analysis tool designed to visualize an application’s algorithms, help you identify the largest opportunities to optimize, and tune to scale efficiently across any quantity or size of CPUs and GPUs, from large servers to our smallest system on a chip (SoC).
如 gperoftools 一样,这是个性能调优工具,聚焦在 N 家的 GPU 上,当然,CPU 也是在其性能分析的范围内。Nsight system 更多的时候是查看 Memory Stream(Host2Device, Device2Host) 和 计算 Kernel 之间的关系,查看有无合理的填充满流水线,更好的利用 GPU 的并行性。
Nsight system 主要是通过采样和追踪来做抓取系统信息:
- sampling 是硬件层面的实现 ,利用了Linux OS’ perf subsystem,跟Linux perf工具用的一样,周期性地停止目标程序(比如每100w个cycle),收集每个线程的 CPU Instruction Pointers(IP, 指令指针),便于了解某一时刻系统的执行状态。
- tracing 是精确地采集各个活动开始和结束的时间,便于了解系统各个环节的时间开销和运转情况。
2. 如何使用 Nsight System
我以 BGR 2 YUV 的例子(代码来自于[1])来展示了 Nsight system 的信息。该示例使用了两种方式:1. 单 Stream 执行;2. 16 Stream 执行(Stream 的数量没有明确的限制,但是貌似在我的机器上,性能最优的结果就是 16,应该 stream 多了以后就后会被加入调度队列了)。
一般在调优的时候先使用 Nsight system 来大体的看一下同步,overlap 数据搬运和计算等是不是合理。对于 Kernel 的调优一般是在 Nsight compute 中。当然,该工具实际上也可以来监测 Graphic 相关的东西,不仅仅是只有 CUDA。
为了便利,直接使用 GUI 界面来操作,个人也推荐 GUI 启动,毕竟最终还是要看时间轴图来的直观。对于没装 GUI 的机器,也可以使用 SSH 远程连接它,便于操作。
对于 BGR 2 YUV 的例子,我们通过在 GUI 中设置程序的位置和程序的启动命令即可配置完成一个 Nsight system Project。(可以看到,这个perf工具还支持很多的信息统计,如 Vulkan 和 OpenGL)
在配置完采样信息,需要追踪的信息后,点击 Start 就愉快的开始程序的分析了。在分析后的 Timeline View 中,我们可以清晰的看到每个阶段的时间消耗。
比如在 BGR 2 YUV 的第一个例子中(上图,只使用单个Stream),从时间轴上可以看到并行性并没有起来,我们可以多开几个 Stream 让数据传输和计算并行起来。通让每个 Stream 做不同的工作(数据搬运,计算),来最大化并行。如下图所示:
code:
[Click to expand]
主项目 CMake 设置
cmake_minimum_required(VERSION 3.18)
project(
cmake_learn
LANGUAGES CXX CUDA
)
if(CUDA_ENABLE)
enable_language(CUDA)
endif()
set(CMAKE_EXPORT_COMPILE_COMMANDS ON CACHE BOOL "")
message(STATUS "cuda version: " ${CUDA_VERSION_STRING})
include_directories(${CUDA_INCLUDE_DIRS})
add_subdirectory("./stream")
子项目 CMake 设置
project(cuda_stream)
add_executable(cuda_stream main.cu)
add_compile_options(--cuda-gpu-arch=sm_20)
main.cu
#include <vector>
#include <random>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#ifdef DEBUG
#define CUDA_CALL(F) if( (F) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__); exit(-1);}
#define CUDA_CHECK() if( (cudaPeekAtLastError()) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__-1); exit(-1);}
#else
#define CUDA_CALL(F) (F)
#define CUDA_CHECK()
#endif
void PrintDeviceInfo();
void GenerateBgra8K(uint8_t* buffer, int dataSize);
void convertPixelFormatCpu(uint8_t* inputBgra, uint8_t* outputYuv, int numPixels);
__global__ void convertPixelFormat(uint8_t* inputBgra, uint8_t* outputYuv, int numPixels);
int main() {
PrintDeviceInfo();
uint8_t* bgraBuffer;
uint8_t* yuvBuffer;
uint8_t* deviceBgraBuffer;
uint8_t* deviceYuvBuffer;
const int dataSizeBgra = 7680 * 4320 * 4;
const int dataSizeYuv = 7680 * 4320 * 3;
CUDA_CALL(cudaMallocHost(&bgraBuffer, dataSizeBgra));
CUDA_CALL(cudaMallocHost(&yuvBuffer, dataSizeYuv));
CUDA_CALL(cudaMalloc(&deviceBgraBuffer, dataSizeBgra));
CUDA_CALL(cudaMalloc(&deviceYuvBuffer, dataSizeYuv));
std::vector<uint8_t> yuvCpuBuffer(dataSizeYuv);
cudaEvent_t start, stop;
float elapsedTime;
float elapsedTimeTotal;
float dataRate;
CUDA_CALL(cudaEventCreate(&start));
CUDA_CALL(cudaEventCreate(&stop));
std::cout << " " << std::endl;
std::cout << "Generating 7680 x 4320 BRGA8888 image, data size: " << dataSizeBgra << std::endl;
GenerateBgra8K(bgraBuffer, dataSizeBgra);
std::cout << " " << std::endl;
std::cout << "Computing results using CPU." << std::endl;
std::cout << " " << std::endl;
CUDA_CALL(cudaEventRecord(start, 0));
convertPixelFormatCpu(bgraBuffer, yuvCpuBuffer.data(), 7680*4320);
CUDA_CALL(cudaEventRecord(stop, 0));
CUDA_CALL(cudaEventSynchronize(stop));
CUDA_CALL(cudaEventElapsedTime(&elapsedTime, start, stop));
std::cout << " Whole process took " << elapsedTime << "ms." << std::endl;
std::cout << " " << std::endl;
std::cout << "Computing results using GPU, default stream." << std::endl;
std::cout << " " << std::endl;
std::cout << " Move data to GPU." << std::endl;
CUDA_CALL(cudaEventRecord(start, 0));
CUDA_CALL(cudaMemcpy(deviceBgraBuffer, bgraBuffer, dataSizeBgra, cudaMemcpyHostToDevice));
CUDA_CALL(cudaEventRecord(stop, 0));
CUDA_CALL(cudaEventSynchronize(stop));
CUDA_CALL(cudaEventElapsedTime(&elapsedTime, start, stop));
dataRate = dataSizeBgra/(elapsedTime/1000.0)/1.0e9;
elapsedTimeTotal = elapsedTime;
std::cout << " Data transfer took " << elapsedTime << "ms." << std::endl;
std::cout << " Performance is " << dataRate << "GB/s." << std::endl;
std::cout << " Convert 8-bit BGRA to 8-bit YUV." << std::endl;
CUDA_CALL(cudaEventRecord(start, 0));
convertPixelFormat<<<32400, 1024>>>(deviceBgraBuffer, deviceYuvBuffer, 7680*4320);
CUDA_CHECK();
CUDA_CALL(cudaDeviceSynchronize());
CUDA_CALL(cudaEventRecord(stop, 0));
CUDA_CALL(cudaEventSynchronize(stop));
CUDA_CALL(cudaEventElapsedTime(&elapsedTime, start, stop));
dataRate = dataSizeBgra/(elapsedTime/1000.0)/1.0e9;
elapsedTimeTotal += elapsedTime;
std::cout << " Processing of 8K image took " << elapsedTime << "ms." << std::endl;
std::cout << " Performance is " << dataRate << "GB/s." << std::endl;
std::cout << " Move data to CPU." << std::endl;
CUDA_CALL(cudaEventRecord(start, 0));
CUDA_CALL(cudaMemcpy(yuvBuffer, deviceYuvBuffer, dataSizeYuv, cudaMemcpyDeviceToHost));
CUDA_CALL(cudaEventRecord(stop, 0));
CUDA_CALL(cudaEventSynchronize(stop));
CUDA_CALL(cudaEventElapsedTime(&elapsedTime, start, stop));
dataRate = dataSizeYuv/(elapsedTime/1000.0)/1.0e9;
elapsedTimeTotal += elapsedTime;
std::cout << " Data transfer took " << elapsedTime << "ms." << std::endl;
std::cout << " Performance is " << dataRate << "GB/s." << std::endl;
std::cout << " Whole process took " << elapsedTimeTotal << "ms." <<std::endl;
std::cout << " Compare CPU and GPU results ..." << std::endl;
bool foundMistake = false;
for(int i=0; i<dataSizeYuv; i++){
if(yuvCpuBuffer[i]!=yuvBuffer[i]){
foundMistake = true;
break;
}
}
if(foundMistake){
std::cout << " Results are NOT the same." << std::endl;
} else {
std::cout << " Results are the same." << std::endl;
}
const int nStreams = 16;
std::cout << " " << std::endl;
std::cout << "Computing results using GPU, using "<< nStreams <<" streams." << std::endl;
std::cout << " " << std::endl;
cudaStream_t streams[nStreams];
std::cout << " Creating " << nStreams << " CUDA streams." << std::endl;
for (int i = 0; i < nStreams; i++) {
CUDA_CALL(cudaStreamCreate(&streams[i]));
}
int brgaOffset = 0;
int yuvOffset = 0;
const int brgaChunkSize = dataSizeBgra / nStreams;
const int yuvChunkSize = dataSizeYuv / nStreams;
CUDA_CALL(cudaEventRecord(start, 0));
for(int i=0; i<nStreams; i++)
{
std::cout << " Launching stream " << i << "." << std::endl;
brgaOffset = brgaChunkSize*i;
yuvOffset = yuvChunkSize*i;
CUDA_CALL(cudaMemcpyAsync( deviceBgraBuffer+brgaOffset,
bgraBuffer+brgaOffset,
brgaChunkSize,
cudaMemcpyHostToDevice,
streams[i] ));
convertPixelFormat<<<4096, 1024, 0, streams[i]>>>(deviceBgraBuffer+brgaOffset, deviceYuvBuffer+yuvOffset, brgaChunkSize/4);
CUDA_CALL(cudaMemcpyAsync( yuvBuffer+yuvOffset,
deviceYuvBuffer+yuvOffset,
yuvChunkSize,
cudaMemcpyDeviceToHost,
streams[i] ));
}
CUDA_CHECK();
CUDA_CALL(cudaDeviceSynchronize());
CUDA_CALL(cudaEventRecord(stop, 0));
CUDA_CALL(cudaEventSynchronize(stop));
CUDA_CALL(cudaEventElapsedTime(&elapsedTime, start, stop));
std::cout << " Whole process took " << elapsedTime << "ms." << std::endl;
std::cout << " Compare CPU and GPU results ..." << std::endl;
for(int i=0; i<dataSizeYuv; i++){
if(yuvCpuBuffer[i]!=yuvBuffer[i]){
foundMistake = true;
break;
}
}
if(foundMistake){
std::cout << " Results are NOT the same." << std::endl;
} else {
std::cout << " Results are the same." << std::endl;
}
CUDA_CALL(cudaFreeHost(bgraBuffer));
CUDA_CALL(cudaFreeHost(yuvBuffer));
CUDA_CALL(cudaFree(deviceBgraBuffer));
CUDA_CALL(cudaFree(deviceYuvBuffer));
return 0;
}
void PrintDeviceInfo(){
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
std::cout << "Number of device(s): " << deviceCount << std::endl;
if (deviceCount == 0) {
std::cout << "There is no device supporting CUDA" << std::endl;
return;
}
cudaDeviceProp info;
for(int i=0; i<deviceCount; i++){
cudaGetDeviceProperties(&info, i);
std::cout << "Device " << i << std::endl;
std::cout << " Name: " << std::string(info.name) << std::endl;
std::cout << " Glocbal memory: " << info.totalGlobalMem/1024.0/1024.0 << " MB"<< std::endl;
std::cout << " Shared memory per block: " << info.sharedMemPerBlock/1024.0 << " KB"<< std::endl;
std::cout << " Warp size: " << info.warpSize<< std::endl;
std::cout << " Max thread per block: " << info.maxThreadsPerBlock<< std::endl;
std::cout << " Thread dimension limits: " << info.maxThreadsDim[0]<< " x "
<< info.maxThreadsDim[1]<< " x "
<< info.maxThreadsDim[2]<< std::endl;
std::cout << " Max grid size: " << info.maxGridSize[0]<< " x "
<< info.maxGridSize[1]<< " x "
<< info.maxGridSize[2]<< std::endl;
std::cout << " Compute capability: " << info.major << "." << info.minor << std::endl;
}
}
void GenerateBgra8K(uint8_t* buffer, int dataSize){
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> sampler(0, 255);
for(int i=0; i<dataSize/4; i++){
buffer[i*4] = sampler(gen);
buffer[i*4+1] = sampler(gen);
buffer[i*4+2] = sampler(gen);
buffer[i*4+3] = 255;
}
}
void convertPixelFormatCpu(uint8_t* inputBgra, uint8_t* outputYuv, int numPixels){
short3 yuv16;
char3 yuv8;
for(int idx=0; idx<numPixels; idx++){
yuv16.x = 66*inputBgra[idx*4+2] + 129*inputBgra[idx*4+1] + 25*inputBgra[idx*4];
yuv16.y = -38*inputBgra[idx*4+2] + -74*inputBgra[idx*4+1] + 112*inputBgra[idx*4];
yuv16.z = 112*inputBgra[idx*4+2] + -94*inputBgra[idx*4+1] + -18*inputBgra[idx*4];
yuv8.x = (yuv16.x>>8)+16;
yuv8.y = (yuv16.y>>8)+128;
yuv8.z = (yuv16.z>>8)+128;
*(reinterpret_cast<char3*>(&outputYuv[idx*3])) = yuv8;
}
}
__global__ void convertPixelFormat(uint8_t* inputBgra, uint8_t* outputYuv, int numPixels){
int stride = gridDim.x * blockDim.x;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
short3 yuv16;
char3 yuv8;
while(idx<=numPixels){
if(idx<numPixels){
yuv16.x = 66*inputBgra[idx*4+2] + 129*inputBgra[idx*4+1] + 25*inputBgra[idx*4];
yuv16.y = -38*inputBgra[idx*4+2] + -74*inputBgra[idx*4+1] + 112*inputBgra[idx*4];
yuv16.z = 112*inputBgra[idx*4+2] + -94*inputBgra[idx*4+1] + -18*inputBgra[idx*4];
yuv8.x = (yuv16.x>>8)+16;
yuv8.y = (yuv16.y>>8)+128;
yuv8.z = (yuv16.z>>8)+128;
*(reinterpret_cast<char3*>(&outputYuv[idx*3])) = yuv8;
}
idx += stride;
}
}
Reference:
[1] CUDA随笔之Stream的使用