日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當(dāng)前位置: 首頁 > 编程资源 > 编程问答 >内容正文

编程问答

CUDA编程--实现并行矩阵乘法【80行代码】

發(fā)布時間:2025/4/16 编程问答 30 豆豆
生活随笔 收集整理的這篇文章主要介紹了 CUDA编程--实现并行矩阵乘法【80行代码】 小編覺得挺不錯的,現(xiàn)在分享給大家,幫大家做個參考.

簡述

這里只寫了方陣之間的乘法,但是本質(zhì)上都是一樣的。

  • 我測試過100規(guī)模的方陣之間的乘法,沒有問題。

代碼

  • 讀取文件data.txt
  • 數(shù)據(jù)格式就是一個數(shù)值N,然后來連續(xù)的兩個N*N的矩陣。用空格隔開。
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <fstream> #include <stdio.h> // Kernal: __global__ void MatrixMultiply(int *a, int * b, int *c, int N) {int tx = threadIdx.x + blockIdx.x * blockDim.x;int ty = threadIdx.y + blockIdx.y * blockDim.y;if (tx < N && ty < N) {int sum = 0;for (int k = 0; k < N; ++k) {int adata = a[tx * N + k];int bdata = b[k * N + ty];sum += adata * bdata;}c[tx * N + ty] = sum;} }cudaError_t matrixMultiplyWithCuda(int *a, int *b, int *c, size_t size);int main() {std::ifstream in("data.txt");int N;in >> N;if (in.fail()) {printf("Something wrong\n");}else {printf("Success read\n");}// host initialint *a = new int[N * N];int *b = new int[N * N];int *c = new int[N * N];// read for (int i = 0; i < N; ++i)for (int j = 0; j < N; ++j) in >> a[i * N + j];for (int i = 0; i < N; ++i)for (int j = 0; j < N; ++j) in >> b[i * N + j];cudaError_t cudaStatus = matrixMultiplyWithCuda(a, b, c, N);for (int i = 0; i < N; ++i) {for (int j = 0; j < N; ++j) std::cout << c[i * N + j]<<" ";std::cout << std::endl;}cudaStatus = cudaThreadExit();// host free delete[] a;delete[] b;delete[] c;return 0; } cudaError_t matrixMultiplyWithCuda(int *a, int *b, int *c, size_t N) {int *dev_a = 0;int *dev_b = 0;int *dev_c = 0;cudaError_t cudaStatus;cudaStatus = cudaMalloc((void**)&dev_a, N * N * sizeof(int));cudaStatus = cudaMalloc((void**)&dev_b, N * N * sizeof(int));cudaStatus = cudaMalloc((void**)&dev_c, N * N * sizeof(int));cudaStatus = cudaMemcpy(dev_a, a, N * N * sizeof(int), cudaMemcpyHostToDevice);cudaStatus = cudaMemcpy(dev_b, b, N * N * sizeof(int), cudaMemcpyHostToDevice);if (cudaStatus != cudaSuccess) {printf("Something wrong\n");goto Error;}// kernal invocation dim3 threadPerBlock(32, 32);dim3 numBlocks(N / threadPerBlock.x + 1, N / threadPerBlock.y + 1);MatrixMultiply<<<numBlocks, threadPerBlock>>>(dev_a, dev_b, dev_c, N);if (cudaStatus != cudaSuccess) {printf( "Calculate wrong\n");goto Error;}cudaStatus = cudaMemcpy(c, dev_c, N * N * sizeof(int), cudaMemcpyDeviceToHost); Error:cudaFree(dev_a);cudaFree(dev_b);cudaFree(dev_c);return cudaStatus; }

寫入文件的 版本

(也改成了浮點(diǎn)數(shù)運(yùn)算了)

#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <fstream> #include <stdio.h> // Kernal: __global__ void MatrixMultiply(float *a, float * b, float *c, int N) {int tx = threadIdx.x + blockIdx.x * blockDim.x;int ty = threadIdx.y + blockIdx.y * blockDim.y;if (tx < N && ty < N) {float sum = 0;for (int k = 0; k < N; ++k) {float adata = a[tx * N + k];float bdata = b[k * N + ty];sum += adata * bdata;}c[tx * N + ty] = sum;} }cudaError_t matrixMultiplyWithCuda(float *a, float *b, float *c, size_t size);int main() {std::ifstream in("data.txt");int N;in >> N;if (in.fail()) {printf("Something wrong\n");}else {printf("Success read\n");}// host initialfloat *a = new float[N * N];float *b = new float[N * N];float *c = new float[N * N];// read for (int i = 0; i < N; ++i)for (int j = 0; j < N; ++j) in >> a[i * N + j];for (int i = 0; i < N; ++i)for (int j = 0; j < N; ++j) in >> b[i * N + j];cudaError_t cudaStatus = matrixMultiplyWithCuda(a, b, c, N);std::ofstream out("output.txt");for (int i = 0; i < N; ++i) {for (int j = 0; j < N; ++j) out << c[i * N + j]<<" ";out << std::endl;}cudaStatus = cudaThreadExit();// host free delete[] a;delete[] b;delete[] c;return 0; } cudaError_t matrixMultiplyWithCuda(float *a, float *b, float *c, size_t N) {float *dev_a = 0;float *dev_b = 0;float *dev_c = 0;cudaError_t cudaStatus;cudaStatus = cudaMalloc((void**)&dev_a, N * N * sizeof(int));cudaStatus = cudaMalloc((void**)&dev_b, N * N * sizeof(int));cudaStatus = cudaMalloc((void**)&dev_c, N * N * sizeof(int));cudaStatus = cudaMemcpy(dev_a, a, N * N * sizeof(int), cudaMemcpyHostToDevice);cudaStatus = cudaMemcpy(dev_b, b, N * N * sizeof(int), cudaMemcpyHostToDevice);if (cudaStatus != cudaSuccess) {printf("Something wrong\n");goto Error;}// kernal invocation dim3 threadPerBlock(32, 32);dim3 numBlocks(N / threadPerBlock.x + 1, N / threadPerBlock.y + 1);MatrixMultiply<<<numBlocks, threadPerBlock>>>(dev_a, dev_b, dev_c, N);if (cudaStatus != cudaSuccess) {printf( "Calculate wrong\n");goto Error;}cudaStatus = cudaMemcpy(c, dev_c, N * N * sizeof(int), cudaMemcpyDeviceToHost); Error:cudaFree(dev_a);cudaFree(dev_b);cudaFree(dev_c);return cudaStatus; }

總結(jié)

以上是生活随笔為你收集整理的CUDA编程--实现并行矩阵乘法【80行代码】的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網(wǎng)站內(nèi)容還不錯,歡迎將生活随笔推薦給好友。