I used cudnn to test sgemm for C[stride x stride] = A[stride x stride] x B[stride x stride] below,
Configuration
- GPU: T1000/SM_75
- cuda-12.0.1/driver-535 installed (via the multiverse repos on ubuntu-24.04)
- cudnn-9.2.1 tarball installed
sgemm_cudnn_test.cu
#include <assert.h>
#include <cuda_runtime.h>
#include <cudnn.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define ONES(mat, stride) for (int i = 0; i < stride * stride; mat[i++] = 1)
#define ZEROS(mat, stride) for (int i = 0; i < stride * stride; mat[i++] = 0)
#define ASSERT(mat, stride) for (int i = 0; i < stride * stride; assert(mat[i++] == stride))
void checked_finalize(cudnnBackendDescriptor_t desc, const char *dname) {
cudnnStatus_t status;
if ((status = cudnnBackendFinalize(desc)) != CUDNN_STATUS_SUCCESS) {
printf("Finalizing %s error: %s, exit!n", dname, cudnnGetErrorString(status));
exit(1);
}
}
void run(int stride, bool manual, cudnnBackendHeurMode_t heurMode) {
float *A; cudaMallocManaged(&A, sizeof(float) * stride * stride); ONES(A, stride);
float *B; cudaMallocManaged(&B, sizeof(float) * stride * stride); ONES(B, stride);
float *C; cudaMallocManaged(&C, sizeof(float) * stride * stride); ZEROS(C, stride);
cudnnStatus_t status;
cudnnHandle_t handle;
if ((status = cudnnCreate(&handle)) != CUDNN_STATUS_SUCCESS) {
printf("Creating handle error: %s, exit!n", cudnnGetErrorString(status));
exit(1);
}
cudnnDataType_t dtype = CUDNN_DATA_FLOAT;
int64_t dim[] = {1, stride, stride}, strides[] = {stride * stride, stride, 1};
int64_t alignment = 4;
cudnnBackendDescriptor_t aDesc; cudnnBackendCreateDescriptor(CUDNN_BACKEND_TENSOR_DESCRIPTOR, &aDesc);
cudnnBackendSetAttribute(aDesc, CUDNN_ATTR_TENSOR_DATA_TYPE, CUDNN_TYPE_DATA_TYPE, 1, &dtype);
cudnnBackendSetAttribute(aDesc, CUDNN_ATTR_TENSOR_DIMENSIONS, CUDNN_TYPE_INT64, 3, dim);
cudnnBackendSetAttribute(aDesc, CUDNN_ATTR_TENSOR_STRIDES, CUDNN_TYPE_INT64, 3, strides);
int64_t aId = 'A'; cudnnBackendSetAttribute(aDesc, CUDNN_ATTR_TENSOR_UNIQUE_ID, CUDNN_TYPE_INT64, 1, &aId);
cudnnBackendSetAttribute(aDesc, CUDNN_ATTR_TENSOR_BYTE_ALIGNMENT, CUDNN_TYPE_INT64, 1, &alignment);
checked_finalize(aDesc, "aDesc");
cudnnBackendDescriptor_t bDesc; cudnnBackendCreateDescriptor(CUDNN_BACKEND_TENSOR_DESCRIPTOR, &bDesc);
cudnnBackendSetAttribute(bDesc, CUDNN_ATTR_TENSOR_DATA_TYPE, CUDNN_TYPE_DATA_TYPE, 1, &dtype);
cudnnBackendSetAttribute(bDesc, CUDNN_ATTR_TENSOR_DIMENSIONS, CUDNN_TYPE_INT64, 3, dim);
cudnnBackendSetAttribute(bDesc, CUDNN_ATTR_TENSOR_STRIDES, CUDNN_TYPE_INT64, 3, strides);
int64_t bId = 'B'; cudnnBackendSetAttribute(bDesc, CUDNN_ATTR_TENSOR_UNIQUE_ID, CUDNN_TYPE_INT64, 1, &bId);
cudnnBackendSetAttribute(bDesc, CUDNN_ATTR_TENSOR_BYTE_ALIGNMENT, CUDNN_TYPE_INT64, 1, &alignment);
checked_finalize(bDesc, "bDesc");
cudnnBackendDescriptor_t cDesc; cudnnBackendCreateDescriptor(CUDNN_BACKEND_TENSOR_DESCRIPTOR, &cDesc);
cudnnBackendSetAttribute(cDesc, CUDNN_ATTR_TENSOR_DATA_TYPE, CUDNN_TYPE_DATA_TYPE, 1, &dtype);
cudnnBackendSetAttribute(cDesc, CUDNN_ATTR_TENSOR_DIMENSIONS, CUDNN_TYPE_INT64, 3, dim);
cudnnBackendSetAttribute(cDesc, CUDNN_ATTR_TENSOR_STRIDES, CUDNN_TYPE_INT64, 3, strides);
int64_t cId = 'C'; cudnnBackendSetAttribute(cDesc, CUDNN_ATTR_TENSOR_UNIQUE_ID, CUDNN_TYPE_INT64, 1, &cId);
cudnnBackendSetAttribute(cDesc, CUDNN_ATTR_TENSOR_BYTE_ALIGNMENT, CUDNN_TYPE_INT64, 1, &alignment);
checked_finalize(cDesc, "cDesc");
cudnnBackendDescriptor_t matmulDesc; cudnnBackendCreateDescriptor(CUDNN_BACKEND_MATMUL_DESCRIPTOR, &matmulDesc);
cudnnBackendSetAttribute(matmulDesc, CUDNN_ATTR_MATMUL_COMP_TYPE, CUDNN_TYPE_DATA_TYPE, 1, &dtype);
checked_finalize(matmulDesc, "matmulDesc");
cudnnBackendDescriptor_t matmulOptDesc; cudnnBackendCreateDescriptor(CUDNN_BACKEND_OPERATION_MATMUL_DESCRIPTOR, &matmulOptDesc);
cudnnBackendSetAttribute(matmulOptDesc, CUDNN_ATTR_OPERATION_MATMUL_DESC, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &matmulDesc);
cudnnBackendSetAttribute(matmulOptDesc, CUDNN_ATTR_OPERATION_MATMUL_ADESC, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &aDesc);
cudnnBackendSetAttribute(matmulOptDesc, CUDNN_ATTR_OPERATION_MATMUL_BDESC, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &bDesc);
cudnnBackendSetAttribute(matmulOptDesc, CUDNN_ATTR_OPERATION_MATMUL_CDESC, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &cDesc);
checked_finalize(matmulOptDesc, "matmulOptDesc");
cudnnBackendDescriptor_t optGraphDesc; cudnnBackendCreateDescriptor(CUDNN_BACKEND_OPERATIONGRAPH_DESCRIPTOR, &optGraphDesc);
cudnnBackendSetAttribute(optGraphDesc, CUDNN_ATTR_OPERATIONGRAPH_HANDLE, CUDNN_TYPE_HANDLE, 1, &handle);
cudnnBackendSetAttribute(optGraphDesc, CUDNN_ATTR_OPERATIONGRAPH_OPS, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &matmulOptDesc);
checked_finalize(optGraphDesc, "optGraphDesc");
cudnnBackendDescriptor_t engineDesc, engineHeurDesc, engineCfgDesc;
int64_t idx = 0, engineCfgDescCount;
if (manual) {
cudnnBackendCreateDescriptor(CUDNN_BACKEND_ENGINE_DESCRIPTOR, &engineDesc);
cudnnBackendSetAttribute(engineDesc, CUDNN_ATTR_ENGINE_OPERATION_GRAPH, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &optGraphDesc);
cudnnBackendSetAttribute(engineDesc, CUDNN_ATTR_ENGINE_GLOBAL_INDEX, CUDNN_TYPE_INT64, 1, &idx);
checked_finalize(engineDesc, "engineDesc");
cudnnBackendCreateDescriptor(CUDNN_BACKEND_ENGINECFG_DESCRIPTOR, &engineCfgDesc);
cudnnBackendSetAttribute(engineCfgDesc, CUDNN_ATTR_ENGINECFG_ENGINE, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &engineDesc);
checked_finalize(engineCfgDesc, "engineCfgDesc");
} else {
cudnnBackendCreateDescriptor(CUDNN_BACKEND_ENGINEHEUR_DESCRIPTOR, &engineHeurDesc);
cudnnBackendSetAttribute(engineHeurDesc, CUDNN_ATTR_ENGINEHEUR_OPERATION_GRAPH, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &optGraphDesc);
cudnnBackendSetAttribute(engineHeurDesc, CUDNN_ATTR_ENGINEHEUR_MODE, CUDNN_TYPE_HEUR_MODE, 1, &heurMode);
checked_finalize(engineHeurDesc, "engineHeurDesc");
/* Line 93 */ status = cudnnBackendGetAttribute(engineHeurDesc, CUDNN_ATTR_ENGINEHEUR_RESULTS, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &engineCfgDescCount, &engineCfgDesc);
if (status != CUDNN_STATUS_SUCCESS) {
printf("Getting engineCfgDesc error: %s, exit!n", cudnnGetErrorString(status));
exit(1);
}
if (!engineCfgDescCount) {
printf("0 engineCfgDesc found, exit!");
exit(1);
}
}
cudnnBackendDescriptor_t executionPlanDesc; cudnnBackendCreateDescriptor(CUDNN_BACKEND_EXECUTION_PLAN_DESCRIPTOR, &executionPlanDesc);
cudnnBackendSetAttribute(executionPlanDesc, CUDNN_ATTR_EXECUTION_PLAN_HANDLE, CUDNN_TYPE_HANDLE, 1, &handle);
cudnnBackendSetAttribute(executionPlanDesc, CUDNN_ATTR_EXECUTION_PLAN_ENGINE_CONFIG, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &engineCfgDesc);
checked_finalize(executionPlanDesc, "executionPlanDesc");
cudnnBackendDescriptor_t varianPackDesc; cudnnBackendCreateDescriptor(CUDNN_BACKEND_VARIANT_PACK_DESCRIPTOR, &varianPackDesc);
void *dataPtrs[] = {A, B, C}; cudnnBackendSetAttribute(varianPackDesc, CUDNN_ATTR_VARIANT_PACK_DATA_POINTERS, CUDNN_TYPE_VOID_PTR, 3, dataPtrs);
int64_t ids[] = {'A', 'B', 'C'}; cudnnBackendSetAttribute(varianPackDesc, CUDNN_ATTR_VARIANT_PACK_UNIQUE_IDS, CUDNN_TYPE_INT64, 3, ids);
void *workspace; cudnnBackendSetAttribute(varianPackDesc, CUDNN_ATTR_VARIANT_PACK_WORKSPACE, CUDNN_TYPE_VOID_PTR, 1, &workspace);
checked_finalize(varianPackDesc, "varianPackDesc");
/* Line 115 */ if ((status = cudnnBackendExecute(handle, executionPlanDesc, varianPackDesc)) != CUDNN_STATUS_SUCCESS) {
printf("Executing stride %d error: %s, exit!n", stride, cudnnGetErrorString(status));
exit(1);
}
cudaDeviceSynchronize();
/* Line 121 */ ASSERT(C, stride);
// printf("Executing stride %d OK.n", stride);
cudnnDestroy(handle);
cudaFree(A);
cudaFree(B);
cudaFree(C);
}
int main(int argc, char **argv) {
int stride;
if (argc >= 2 && (stride = atoi(argv[argc - 1]))) {
char *engine = argv[argc - 2];
if (!strcmp(engine, "manual")) {
run(stride, true, CUDNN_HEUR_MODE_FALLBACK);
return 0;
}
if (!strcmp(engine, "heurA")) {
run(stride, false, CUDNN_HEUR_MODE_A);
return 0;
}
if (!strcmp(engine, "heurB")) {
run(stride, false, CUDNN_HEUR_MODE_B);
return 0;
}
if (!strcmp(engine, "fallback")) {
run(stride, false, CUDNN_HEUR_MODE_FALLBACK);
return 0;
}
}
printf("Usage: ./sgemm_cudnn_test manual|heurA|heurB|fallback striden");
}
Tests
$ nvcc sgemm_cudnn_test.cu
-o sgemm_cudnn_test
-I $CUDNN_ROOT/include
-L $CUDNN_ROOT/lib
-l cudnn
$ echo "stride time kernel";
for stride in 1024 2048 4096 8192 16384; do
nvprof ./sgemm_cudnn_test manual $stride 2>
>(awk -v stride=$stride '/GPU/{printf "%-6s %s %sn", stride, $7, $9}');
done
stride time kernel
1024 5.5512ms volta_sgemm_128x64_nn
2048 24.040ms volta_sgemm_128x64_nn
4096 218.24ms volta_sgemm_32x128_nn
8192 1.40703s volta_sgemm_64x64_nn
16384 6.24338s volta_sgemm_128x64_nn
$ ./sgemm_cudnn_test manual 256
Executing stride 256 error: CUDNN_STATUS_EXECUTION_FAILED_CUBLAS, exit!
$ ./sgemm_cudnn_test manual 512
Assertion failed.
Aborted (core dumped)
$ for heur in heurA heurB fallback; do
./sgemm_cudnn_test $heur 1024;
done
Getting engineCfgDesc error: CUDNN_STATUS_BAD_PARAM, exit!
Getting engineCfgDesc error: CUDNN_STATUS_BAD_PARAM, exit!
Getting engineCfgDesc error: CUDNN_STATUS_BAD_PARAM, exit!
Problems (as shown above)
-
When the tests were manually configured with the engine descriptor, they worked for strides 1024 2048 4096 8192 16384 as expected, but failed for stride 256 calling the API cudnnBackendExecute on Line 115 and failed for stride 512 because of assertion error on Line 121. I guess that the engine knob should be manually adjusted too but wasn’t done due to lack of concrete guidelines. Right?
-
When the tests were configured with the heuristics engine descriptor, they failed on all the heuristics modes when calling the API cudnnBackendGetAttribute on Line 93. I haven’t figured out how to solve it so far. Could you help? Thanks!
References
- use case example
- matrix multiplication operator
- cudnn graph API
Finally I’ve found and retested the solution on the second problem successfully. Because cudnnBackendGetAttribute(…, void *arrayOfElements) accepts an opaque pointer argument to the retrieved elements, we have to firstly instantiate the concrete type before pass its reference to the call. Therefore, the relevant part is updated below,
cudnnBackendDescriptor_t engineDesc, engineHeurDesc;
/* Line 77, instantiate engineCfgDesc before pass its reference to cudnnBackendGetAttribute */ cudnnBackendDescriptor_t engineCfgDesc; cudnnBackendCreateDescriptor(CUDNN_BACKEND_ENGINECFG_DESCRIPTOR, &engineCfgDesc);
int64_t idx = 0, engineCfgDescCount;
if (manual) {
cudnnBackendCreateDescriptor(CUDNN_BACKEND_ENGINE_DESCRIPTOR, &engineDesc);
cudnnBackendSetAttribute(engineDesc, CUDNN_ATTR_ENGINE_OPERATION_GRAPH, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &optGraphDesc);
cudnnBackendSetAttribute(engineDesc, CUDNN_ATTR_ENGINE_GLOBAL_INDEX, CUDNN_TYPE_INT64, 1, &idx);
checked_finalize(engineDesc, "engineDesc");
cudnnBackendSetAttribute(engineCfgDesc, CUDNN_ATTR_ENGINECFG_ENGINE, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &engineDesc);
checked_finalize(engineCfgDesc, "engineCfgDesc");
} else {
cudnnBackendCreateDescriptor(CUDNN_BACKEND_ENGINEHEUR_DESCRIPTOR, &engineHeurDesc);
cudnnBackendSetAttribute(engineHeurDesc, CUDNN_ATTR_ENGINEHEUR_OPERATION_GRAPH, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &optGraphDesc);
cudnnBackendSetAttribute(engineHeurDesc, CUDNN_ATTR_ENGINEHEUR_MODE, CUDNN_TYPE_HEUR_MODE, 1, &heurMode);
checked_finalize(engineHeurDesc, "engineHeurDesc");
status = cudnnBackendGetAttribute(engineHeurDesc, CUDNN_ATTR_ENGINEHEUR_RESULTS, CUDNN_TYPE_BACKEND_DESCRIPTOR, 1, &engineCfgDescCount, &engineCfgDesc);
if (status != CUDNN_STATUS_SUCCESS) {
printf("Getting engineCfgDesc error: %s, exit!n", cudnnGetErrorString(status));
exit(1);
}
if (!engineCfgDescCount) {
printf("0 engineCfgDesc found, exit!");
exit(1);
}
}
However, the first problem persists on any of the engine modes (manual|heurA|heurB|fallback). I guess that without being further tuned CUDNN couldn’t correctly handle matrix multiplication for strides 256 512 in my test cases aforementioned. How to deal with the tuning? Or could be a bug?