I am working in parallelise [this file][1] on GPU using [PTX file with matlab parallel.gpu.CUDAkernel][2]. My problem with [kron tensor product][3] is the following. My code should multiply two vectors kron(a,b)
by multiplying each element of the first vector a=<32x1>
by the all elements of the other vector b=<1x32>
and the output vector size will be k<32x32>=a.*b
. I tried to write it in C++ and it worked, as I only concern about summing all the elements of 2d array. I thought I can make it easy as 1D array because m=sum(sum(kron(a,b)))
is the code I am working on
for(i=0;i<32;i++)
for(j=0;j<32;j++)
k[i*32+j]=a[i]*b[j]
It meant to have the a[i]
th element multiply by eachelement in b
and I though to go with 32
blocks with each block has a 32
threads and the code should be
__global__ void myKrom(int* c,int* a, int*b) {
int i=blockDim.x*blockIdx.x+threadIdx.x;
while(i<32) {
c[i]=a[blockIdx.x]+b[blockDim.x*blockIdx.x+threadIdx.x];
}
That should make the trick as the blockIdx.x
is the outer loop, but it didn't. Could any body tell me where, may i ask for parallel way to do the parallel sum.
You may actually mean something like this:
__global__ void myKrom(int* c,int* a, int*b)
{
int i=blockDim.x*blockIdx.x+threadIdx.x;
if(i<32*32){
c[i]=a[blockIdx.x]+b[threadIdx.x];
}
}
when you call the kernel by myKrom<<<32, 32>>> (c, a, b);
In case when the first operand is the identity matrix, then the result of the Kronecker product can be simply represented using cuSPARSE's bsr
's format.
Below, a simple example implementing the following Matlab instructions
m = 5;
I = speye(m);
e = ones(m, 1);
T = spdiags([e -4 * e e],[-1 0 1], m, m);
kron(I, T)
KRON(I, T)
#include <stdio.h>
#include <assert.h>
#include <cusparse.h>
#define blockMatrixSize 3 // --- Each block of the sparse block matrix is blockMatrixSize x blockMatrixSize
/*******************/
/* iDivUp FUNCTION */
/*******************/
int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); }
/********************/
/* CUDA ERROR CHECK */
/********************/
// --- Credit to http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) { exit(code); }
}
}
void gpuErrchk(cudaError_t ans) { gpuAssert((ans), __FILE__, __LINE__); }
/***************************/
/* CUSPARSE ERROR CHECKING */
/***************************/
static const char *_cusparseGetErrorEnum(cusparseStatus_t error)
{
switch (error)
{
case CUSPARSE_STATUS_SUCCESS:
return "CUSPARSE_STATUS_SUCCESS";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "CUSPARSE_STATUS_NOT_INITIALIZED";
case CUSPARSE_STATUS_ALLOC_FAILED:
return "CUSPARSE_STATUS_ALLOC_FAILED";
case CUSPARSE_STATUS_INVALID_VALUE:
return "CUSPARSE_STATUS_INVALID_VALUE";
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "CUSPARSE_STATUS_ARCH_MISMATCH";
case CUSPARSE_STATUS_MAPPING_ERROR:
return "CUSPARSE_STATUS_MAPPING_ERROR";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "CUSPARSE_STATUS_EXECUTION_FAILED";
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "CUSPARSE_STATUS_INTERNAL_ERROR";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case CUSPARSE_STATUS_ZERO_PIVOT:
return "CUSPARSE_STATUS_ZERO_PIVOT";
}
return "<unknown>";
}
inline void __cusparseSafeCall(cusparseStatus_t err, const char *file, const int line)
{
if (CUSPARSE_STATUS_SUCCESS != err) {
fprintf(stderr, "CUSPARSE error in file '%s', line %d, error %s\nterminating!\n", __FILE__, __LINE__, \
_cusparseGetErrorEnum(err)); \
assert(0); \
}
}
extern "C" void cusparseSafeCall(cusparseStatus_t err) { __cusparseSafeCall(err, __FILE__, __LINE__); }
/********/
/* MAIN */
/********/
int main() {
// --- Initialize cuSPARSE
cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle));
// --- Initialize matrix descriptors
cusparseMatDescr_t descrA, descrC;
cusparseSafeCall(cusparseCreateMatDescr(&descrA));
cusparseSafeCall(cusparseCreateMatDescr(&descrC));
const int Mb = 5; // --- Number of blocks along rows
const int Nb = 5; // --- Number of blocks along columns
const int M = Mb * blockMatrixSize; // --- Number of rows
const int N = Nb * blockMatrixSize; // --- Number of columns
const int nnzb = Mb; // --- Number of non-zero blocks
float h_block[blockMatrixSize * blockMatrixSize] = { 4.f, -1.f, 0.f, -1.f, 4.f, -1.f, 0.f, -1.f, 4.f };
// --- Host vectors defining the block-sparse matrix
float *h_bsrValA = (float *)malloc(blockMatrixSize * blockMatrixSize * nnzb * sizeof(float));
int *h_bsrRowPtrA = (int *)malloc((Mb + 1) * sizeof(int));
int *h_bsrColIndA = (int *)malloc(nnzb * sizeof(int));
for (int k = 0; k < nnzb; k++) memcpy(h_bsrValA + k * blockMatrixSize * blockMatrixSize, h_block, blockMatrixSize * blockMatrixSize * sizeof(float));
h_bsrRowPtrA[0] = 0;
h_bsrRowPtrA[1] = 1;
h_bsrRowPtrA[2] = 2;
h_bsrRowPtrA[3] = 3;
h_bsrRowPtrA[4] = 4;
h_bsrRowPtrA[5] = 5;
h_bsrColIndA[0] = 0;
h_bsrColIndA[1] = 1;
h_bsrColIndA[2] = 2;
h_bsrColIndA[3] = 3;
h_bsrColIndA[4] = 4;
// --- Device vectors defining the block-sparse matrix
float *d_bsrValA; gpuErrchk(cudaMalloc(&d_bsrValA, blockMatrixSize * blockMatrixSize * nnzb * sizeof(float)));
int *d_bsrRowPtrA; gpuErrchk(cudaMalloc(&d_bsrRowPtrA, (Mb + 1) * sizeof(int)));
int *d_bsrColIndA; gpuErrchk(cudaMalloc(&d_bsrColIndA, nnzb * sizeof(int)));
gpuErrchk(cudaMemcpy(d_bsrValA, h_bsrValA, blockMatrixSize * blockMatrixSize * nnzb * sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_bsrRowPtrA, h_bsrRowPtrA, (Mb + 1) * sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_bsrColIndA, h_bsrColIndA, nnzb * sizeof(int), cudaMemcpyHostToDevice));
// --- Transforming bsr to csr format
cusparseDirection_t dir = CUSPARSE_DIRECTION_COLUMN;
const int nnz = nnzb * blockMatrixSize * blockMatrixSize; // --- Number of non-zero elements
int *d_csrRowPtrC; gpuErrchk(cudaMalloc(&d_csrRowPtrC, (M + 1) * sizeof(int)));
int *d_csrColIndC; gpuErrchk(cudaMalloc(&d_csrColIndC, nnz * sizeof(int)));
float *d_csrValC; gpuErrchk(cudaMalloc(&d_csrValC, nnz * sizeof(float)));
cusparseSafeCall(cusparseSbsr2csr(handle, dir, Mb, Nb, descrA, d_bsrValA, d_bsrRowPtrA, d_bsrColIndA, blockMatrixSize, descrC, d_csrValC, d_csrRowPtrC, d_csrColIndC));
// --- Transforming csr to dense format
float *d_A; gpuErrchk(cudaMalloc(&d_A, M * N * sizeof(float)));
cusparseSafeCall(cusparseScsr2dense(handle, M, N, descrC, d_csrValC, d_csrRowPtrC, d_csrColIndC, d_A, M));
float *h_A = (float *)malloc(M * N * sizeof(float));
gpuErrchk(cudaMemcpy(h_A, d_A, M * N * sizeof(float), cudaMemcpyDeviceToHost));
// --- m is row index, n column index
for (int m = 0; m < M; m++) {
for (int n = 0; n < N; n++) {
printf("%f ", h_A[m + n * M]);
}
printf("\n");
}
return 0;
}
Also in simple cases when the second operand is the identity matrix, the result of the Kronecker product can be represented using cuSPARSE's bsr
's format.
Below, a simple example
m = 5;
I = speye(3);
e = ones(m, 1);
S = spdiags([e e], [-1 1], m, m);
kron(S, I)
KRON(S, I)
#include <stdio.h>
#include <assert.h>
#include <cusparse.h>
#define blockMatrixSize 3 // --- Each block of the sparse block matrix is blockMatrixSize x blockMatrixSize
/*******************/
/* iDivUp FUNCTION */
/*******************/
int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); }
/********************/
/* CUDA ERROR CHECK */
/********************/
// --- Credit to http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) { exit(code); }
}
}
void gpuErrchk(cudaError_t ans) { gpuAssert((ans), __FILE__, __LINE__); }
/***************************/
/* CUSPARSE ERROR CHECKING */
/***************************/
static const char *_cusparseGetErrorEnum(cusparseStatus_t error)
{
switch (error)
{
case CUSPARSE_STATUS_SUCCESS:
return "CUSPARSE_STATUS_SUCCESS";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "CUSPARSE_STATUS_NOT_INITIALIZED";
case CUSPARSE_STATUS_ALLOC_FAILED:
return "CUSPARSE_STATUS_ALLOC_FAILED";
case CUSPARSE_STATUS_INVALID_VALUE:
return "CUSPARSE_STATUS_INVALID_VALUE";
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "CUSPARSE_STATUS_ARCH_MISMATCH";
case CUSPARSE_STATUS_MAPPING_ERROR:
return "CUSPARSE_STATUS_MAPPING_ERROR";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "CUSPARSE_STATUS_EXECUTION_FAILED";
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "CUSPARSE_STATUS_INTERNAL_ERROR";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case CUSPARSE_STATUS_ZERO_PIVOT:
return "CUSPARSE_STATUS_ZERO_PIVOT";
}
return "<unknown>";
}
inline void __cusparseSafeCall(cusparseStatus_t err, const char *file, const int line)
{
if (CUSPARSE_STATUS_SUCCESS != err) {
fprintf(stderr, "CUSPARSE error in file '%s', line %d, error %s\nterminating!\n", __FILE__, __LINE__, \
_cusparseGetErrorEnum(err)); \
assert(0); \
}
}
extern "C" void cusparseSafeCall(cusparseStatus_t err) { __cusparseSafeCall(err, __FILE__, __LINE__); }
/********/
/* MAIN */
/********/
int main() {
// --- Initialize cuSPARSE
cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle));
// --- Initialize matrix descriptors
cusparseMatDescr_t descrA, descrC;
cusparseSafeCall(cusparseCreateMatDescr(&descrA));
cusparseSafeCall(cusparseCreateMatDescr(&descrC));
const int Mb = 5; // --- Number of blocks along rows
const int Nb = 5; // --- Number of blocks along columns
const int M = Mb * blockMatrixSize; // --- Number of rows
const int N = Nb * blockMatrixSize; // --- Number of columns
const int nnzb = 2 * (Mb - 1); // --- Number of non-zero blocks
float h_block[blockMatrixSize * blockMatrixSize] = { 1.f, 0.f, 0.f, 0.f, 1.f, 0.f, 0.f, 0.f, 1.f };
// --- Host vectors defining the block-sparse matrix
float *h_bsrValA = (float *)malloc(blockMatrixSize * blockMatrixSize * nnzb * sizeof(float));
int *h_bsrRowPtrA = (int *)malloc((Mb + 1) * sizeof(int));
int *h_bsrColIndA = (int *)malloc(nnzb * sizeof(int));
for (int k = 0; k < nnzb; k++) memcpy(h_bsrValA + k * blockMatrixSize * blockMatrixSize, h_block, blockMatrixSize * blockMatrixSize * sizeof(float));
h_bsrRowPtrA[0] = 0;
h_bsrRowPtrA[1] = 1;
h_bsrRowPtrA[2] = 3;
h_bsrRowPtrA[3] = 5;
h_bsrRowPtrA[4] = 7;
h_bsrRowPtrA[5] = 2 * (Mb - 1);
h_bsrColIndA[0] = 1;
h_bsrColIndA[1] = 0;
h_bsrColIndA[2] = 2;
h_bsrColIndA[3] = 1;
h_bsrColIndA[4] = 3;
h_bsrColIndA[5] = 2;
h_bsrColIndA[6] = 4;
h_bsrColIndA[7] = 3;
// --- Device vectors defining the block-sparse matrix
float *d_bsrValA; gpuErrchk(cudaMalloc(&d_bsrValA, blockMatrixSize * blockMatrixSize * nnzb * sizeof(float)));
int *d_bsrRowPtrA; gpuErrchk(cudaMalloc(&d_bsrRowPtrA, (Mb + 1) * sizeof(int)));
int *d_bsrColIndA; gpuErrchk(cudaMalloc(&d_bsrColIndA, nnzb * sizeof(int)));
gpuErrchk(cudaMemcpy(d_bsrValA, h_bsrValA, blockMatrixSize * blockMatrixSize * nnzb * sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_bsrRowPtrA, h_bsrRowPtrA, (Mb + 1) * sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_bsrColIndA, h_bsrColIndA, nnzb * sizeof(int), cudaMemcpyHostToDevice));
// --- Transforming bsr to csr format
cusparseDirection_t dir = CUSPARSE_DIRECTION_COLUMN;
const int nnz = nnzb * blockMatrixSize * blockMatrixSize; // --- Number of non-zero elements
int *d_csrRowPtrC; gpuErrchk(cudaMalloc(&d_csrRowPtrC, (M + 1) * sizeof(int)));
int *d_csrColIndC; gpuErrchk(cudaMalloc(&d_csrColIndC, nnz * sizeof(int)));
float *d_csrValC; gpuErrchk(cudaMalloc(&d_csrValC, nnz * sizeof(float)));
cusparseSafeCall(cusparseSbsr2csr(handle, dir, Mb, Nb, descrA, d_bsrValA, d_bsrRowPtrA, d_bsrColIndA, blockMatrixSize, descrC, d_csrValC, d_csrRowPtrC, d_csrColIndC));
// --- Transforming csr to dense format
float *d_A; gpuErrchk(cudaMalloc(&d_A, M * N * sizeof(float)));
cusparseSafeCall(cusparseScsr2dense(handle, M, N, descrC, d_csrValC, d_csrRowPtrC, d_csrColIndC, d_A, M));
float *h_A = (float *)malloc(M * N * sizeof(float));
gpuErrchk(cudaMemcpy(h_A, d_A, M * N * sizeof(float), cudaMemcpyDeviceToHost));
// --- m is row index, n column index
for (int m = 0; m < M; m++) {
for (int n = 0; n < N; n++) {
printf("%f ", h_A[m + n * M]);
}
printf("\n");
}
return 0;
}