7
1 Basic Example: Matrix Multiplication using CUDA General-purpose Programming of Massively Parallel Graphics Processors Shiraz University, Spring 2010 Instructor: Reza Azimi Some materials/slides are adapted from: Andreas Moshovos’ Course at the University of Toronto UIUC course by Wen-Mei Hwu and David Kirk Adapted From: David Kirk/NVIDIA and Wen-mei W. Hwu, UIUC void MatrixMulOnHost( float* M, float* N, float* P, int Width) { for (int i = 0; i < Width; ++i) for (int j = 0; j < Width; ++j) { float sum = 0; for (int k = 0; k < Width; ++k) { float a = M[i * Width + k]; float b = N[k * Width + j]; sum += a * b; } P[i * Width + j] = sum; } } 2 M N P WIDTH WIDTH WIDTH WIDTH i k k j

Matrix multiplication using CUDA

Embed Size (px)

DESCRIPTION

 

Citation preview

Page 1: Matrix multiplication using CUDA

1

Basic Example: Matrix Multiplication using CUDA

General-purpose Programming of Massively Parallel Graphics Processors

Shiraz University, Spring 2010Instructor: Reza Azimi

Some materials/slides are adapted from:Andreas Moshovos’ Course at the University of Toront o

UIUC course by Wen-Mei Hwu and David Kirk

������������� � ��� � ������� ���� ������� ��� ���� ��� �Adapted From: David Kirk/NVIDIA and Wen-mei W. Hwu, UIUC

void MatrixMulOnHost( float* M, float* N, float* P, int Width) { for (int i = 0; i < Width; ++i)

for (int j = 0; j < Width; ++j) {float sum = 0;for (int k = 0; k < Width; ++k) {

float a = M[i * Width + k];float b = N[k * Width + j];sum += a * b;

}P[i * Width + j] = sum;

}}

2

� ��� !" #$%& '"(%�$) �) *

M

N

P

WID

TH

WID

TH

WIDTH WIDTH

i

k

k

j

Page 2: Matrix multiplication using CUDA

2

������������� � ��� � ������� ���� ������� ��� ���� ��� +

__global__ void MatrixMulKernel(float* d_M,

float* d_N, float* d_P, int Width) {

int row = threadIdx.y;int col = threadIdx.x;float P_val = 0; for (int k = 0; k < Width; ++k) {

float M_elem = d_M[row * Width + k];float N_elem = d_N[k * Width + col];P_val += M_elem * N_elem;

}d_p[row*Width+col] = P_val;

}

,-. /"()"! 01)2&�$)

3

d_M

d_N

d_P

WID

TH

WID

TH

WIDTH WIDTH

row(threadIdx.y)

k

k

col(threadIdx.x)

Adapted From: David Kirk/NVIDIA and Wen-mei W. Hwu, UIUC

������������� � ��� � ������� ���� ������� ��� ���� ��� 3 4

void MatrixMulOnDevice(float* M, float* N, float* P, int Width)

{int matrix_size = Width * Width * sizeof(float); float *d_M, *d_N, *d_P;

// Allocate and Load M and N to device memory cudaMalloc(&d_M, matrix_size);cudaMemcpy(d_M, M, matrix_size, cudaMemcpyHostToDevice);

cudaMalloc(&d_N, matrix_size);cudaMemcpy(d_N, N, matrix_size, cudaMemcpyHostToDevice);

// Allocate P on the devicecudaMalloc(&d_P, matrix_size);

4) 1& 56&6 �!!$26&�$) 6)7 8(6)%9"(

Adapted From: David Kirk/NVIDIA and Wen-mei W. Hwu, UIUC

Page 3: Matrix multiplication using CUDA

3

������������� � ��� � ������� ���� ������� ��� ���� ��� : 5

/"()"! 4);$26&�$) 6)7 *$ < ="%1!&%

© David Kirk/NVIDIA and Wen-mei W. Hwu, 2007-2009ECE 498AL Spring 2010, University of Illinois, Urbana-Champaign

// Setup the execution configurationdim3 dimGrid(1, 1);dim3 dimBlock(Width, Width);

// Launch the device computation threads!MatrixMulKernel<<<dimGrid, dimBlock>>>(d_M, d_N, d_P,

Width);

// Copy back the results from device to hostcudaMemcpy(P, d_P, matrix_size, cudaMemcpyDeviceToHost);

// Free up the device memory matricescudaFree(d_P);cudaFree(d_M);cudaFree(d_N);

������������� � ��� � ������� ���� ������� ��� ���� ��� > 6

Only One Thread Block Used? One Block of threads compute matrix d_P@ Each thread A Loads a row of matrix d_MA Loads a column of matrix d_NA Perform one multiply and

addition for each pair of d_Mand d_N elementsA Computes one element of d_P

Grid 1

Block 1

3 2 5 4

2

4

2

6

48

Thread(2, 2)

WIDTH

d_Md_P

d_N

Adapted From: David Kirk/NVIDIA and Wen-mei W. Hwu, UIUC

Size of matrix limited by the number of threads allowed in a thread block

Page 4: Matrix multiplication using CUDA

4

������������� � ��� � ������� ���� ������� ��� ���� ��� B

�$!1&�$) CD ,�;" E62F 8F("67 G$(" H$(I

7

d_P

threadIdx.y

threadIdx.x TILE_WIDTH

TILE_

Each thread is assigned to a Tile of TILE_WIDTHxTILE_WIDTHentries

������������� � ��� � ������� ���� ������� ��� ���� ��� J

__global__ void MatrixMulKernel(float* d_M, float* d_N, float* d_P, int Width) {

int start_row = threadIdx.y * TILE_WIDTH;int end_row = start_row + TILE_WIDTH;int start_col = threadIdx.x * TILE_WIDTH;int end_col = start_col + TILE_WIDTH;

for (int row = start_row; row < end_row; row++) { for(int col = start_col; col < end_col; col++) {

float P_val = 0; for (int k = 0; k < Width; ++k) {

float M_elem = d_M[row * Width + k];float N_elem = d_N[k * Width + col];P_val += M_elem * N_elem;

}d_p[row*Width+col] = P_val;

}}

}

Solution 1: Give Each Thread More Work

With one block we utilize only one multiprocessor!

Page 5: Matrix multiplication using CUDA

5

������������� � ��� � ������� ���� ������� ��� ���� ��� K

�$!1&�$) LD .%" G1!&� !" 8F("67 M!$2I%

9

d_P

threadIdx.y

blockIdx.y

blockIdx.x

threadIdx.x

blockDim.x

blockDim.y

assigned to athread block

assigned to athread

������������� � ��� � ������� ���� ������� ��� ���� ��� �N

__global__ void MatrixMulKernel(float* d_M,

float* d_N, float* d_P, int Width) {

int row = blockIdx.y * blockDim.y + threadIdx.y;int col = blockIdx.x * blockDim.x + threadIdx.x;float P_val = 0;

for (int k = 0; k < Width; ++k) {float M_elem = d_M[row * Width + k];float N_elem = d_N[k * Width + col];P_val += M_elem * N_elem;

}d_p[row*Width+col] = P_val;

}

Solution 2: Use Multiple Thread Blocks

Page 6: Matrix multiplication using CUDA

6

������������� � ��� � ������� ���� ������� ��� ���� ��� �� 11

/"()"! 4);$26&�$) 6)7 *$ < ="%1!&%int block_size = 64;

// Setup the execution configurationdim3 dimGrid(Width/block_size, Width/block_size);dim3 dimBlock(block_size, block_size);

// Launch the device computation threads!MatrixMulKernel<<<dimGrid, dimBlock>>>(d_M, d_N, d_P,

Width);

…Size of matrix limited by the number of threads allowed on a device

������������� � ��� � ������� ���� ������� ��� ���� ��� ��

,8O LPQ 8F("67 R���&6&�$)%S Max Number of Threads per Block: 512S Max Number of Blocks per Streaming Multiprocessor: 8 S Number of Streaming Multiprocessors: 30S Total Number of Threads Available =

30 x 8 x 512 = 122880

Let me double-check this!

Page 7: Matrix multiplication using CUDA

7

������������� � ��� � ������� ���� ������� ��� ���� ��� �+

*$�T�)�)U &F" 8V$ �$!1&�$)%

13

d_P

threadIdx.y

blockIdx.y

blockIdx.x

threadIdx.xblockDim.x

blockDim.y

TILE_WIDTH

TILE_WIDT

������������� � ��� � ������� ���� ������� ��� ���� ��� �3

__global__ void MatrixMulKernel(float* d_M, float* d_N, float* d_P, int Width) {

int start_row = blockDim.y * blockIdx.y + threadIdx.y * TILE_WIDTH;int end_row = start_row + TILE_WIDTH;int start_col = blockDim.x * blockIdx.x + threadIdx.x * TILE_WIDTH;int end_col = start_col + TILE_WIDTH;

for (int row = start_row; row < end_row; row++) { for(int col = start_col; col < end_col; col++) {

float P_val = 0; for (int k = 0; k < Width; ++k) {

float M_elem = d_M[row * Width + k];float N_elem = d_N[k * Width + col];P_val += M_elem * N_elem;

}d_p[row*Width+col] = P_val;

}}

}

*$�T�)�)U &F" 8V$ �$!1&�$)%