1.
__host__ cudaError_t cudaMalloc(
void
**devPtr,
size_t
size)
该函数主要用来分配设备上的内存(即显存中的内存)。该函数被声明为了__host__,即表示被host所调用,即在cpu中执行的代码所调用。
返回值:为cudaError_t类型,实质为cudaError的枚举类型,其中定义了一系列的错误代码。如果函数调用成功,则返回cudaSuccess。
第一个参数,void ** 类型,devPtr:用于接受该函数所分配的内存地址
第二个参数,size_t类型,size:指定分配内存的大小,单位为字
-
enum
__device_builtin__ cudaMemcpyKind
-
{
-
cudaMemcpyHostToHost = 0,
-
cudaMemcpyHostToDevice = 1,
-
cudaMemcpyDeviceToHost = 2,
-
cudaMemcpyDeviceToDevice = 3,
-
cudaMemcpyDefault = 4
-
};
cudaMemcpyKind决定了拷贝的方向,即是从主机的内存拷贝至设备内存,还是将设备内存拷贝值主机内存等。cudaMemcpy内部根据拷贝的类型(kind)来决定调用以下的某个函数:
4.
__host__ cudaError_t cudaDeviceReset(
void
)
该函数销毁当前进程中当前设备上所有的内存分配和重置所有状态,调用该函数达到重新初始该设备的作用。应该注意,在调用该函数时,应该确保该进程中其他host线程不能访问该设备!
下面是一个简单的向量相加的程序
* Please refer to the NVIDIA end user
license
agreement (EULA) associated
*
with
this source code
for
terms
and
conditions that govern your use of
* this software. Any use
,
reproduction
,
disclosure
,
or
distribution of
* this software
and
related documentation outside the terms of the EULA
*
is
strictly prohibited.
* Vector addition: C = A + B.
* This sample
is
a very basic sample that implements element by element
* vector addition. It
is
the same
as
the sample illustrating Chapter
2
* of the programming guide
with
some additions like error checking.
#include <stdio.h>
// For the CUDA runtime routines (prefixed
with
"cuda_"
)
#include <cuda_runtime.h>
#include <helper_cuda.h>
* CUDA Kernel Device code
* Computes the vector addition of A
and
B into C. The
3
vectors have the same
* number of elements numElements.
__global__ void
vectorAdd(const
float
*A
,
const
float
*B
,
float
*C
,
int
numElements)
int
i = blockDim.x * blockIdx.x + threadIdx.x;
if
(i < numElements)
C[i] = A[i] + B[i];
* Host main routine
main(void)
// Error code to check
return
values
for
CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used
,
and
compute its size
int
numElements =
50000
;
size_t size = numElements * sizeof(
float
);
printf(
"[Vector addition of %d elements]
\n
"
,
numElements);
// Allocate the host
input
vector A
float *h_A = (
float
*)malloc(size);
// Allocate the host
input
vector B
float *h_B = (
float
*)malloc(size);
// Allocate the host output vector C
float *h_C = (
float
*)malloc(size);
// Verify that allocations succeeded
if
(h_A == NULL || h_B == NULL || h_C == NULL)
fprintf(stderr
,
"Failed to allocate host vectors!
\n
"
);
exit
(EXIT_FAILURE);
// Initialize the host
input
vectors
for
(int i =
0
; i < numElements; ++i)
h_A[i] = rand()/(
float
)RAND_MAX;
h_B[i] = rand()/(
float
)RAND_MAX;
// Allocate the device
input
vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A
,
size);
if
(err != cudaSuccess)
fprintf(stderr
,
"Failed to allocate device vector A (error code %s)!
\n
"
,
cudaGetErrorString(err));
exit
(EXIT_FAILURE);
// Allocate the device
input
vector B
float *d_B = NULL;
err = cudaMalloc((void **)&d_B
,
size);
if
(err != cudaSuccess)
fprintf(stderr
,
"Failed to allocate device vector B (error code %s)!
\n
"
,
cudaGetErrorString(err));
exit
(EXIT_FAILURE);
// Allocate the device output vector C
float *d_C = NULL;
err = cudaMalloc((void **)&d_C
,
size);
if
(err != cudaSuccess)
fprintf(stderr
,
"Failed to allocate device vector C (error code %s)!
\n
"
,
cudaGetErrorString(err));
exit
(EXIT_FAILURE);
// Copy the host
input
vectors A
and
B
in
host memory to the device
input
vectors
in
// device memory
printf(
"Copy input data from the host memory to the CUDA device
\n
"
);
err = cudaMemcpy(d_A
,
h_A
,
size
,
cudaMemcpyHostToDevice);
if
(err != cudaSuccess)
fprintf(stderr
,
"Failed to copy vector A from host to device (error code %s)!
\n
"
,
cudaGetErrorString(err));
exit
(EXIT_FAILURE);
err = cudaMemcpy(d_B
,
h_B
,
size
,
cudaMemcpyHostToDevice);
if
(err != cudaSuccess)
fprintf(stderr
,
"Failed to copy vector B from host to device (error code %s)!
\n
"
,
cudaGetErrorString(err));
exit
(EXIT_FAILURE);
// Launch the Vector Add CUDA Kernel
int threadsPerBlock =
256
;
int blocksPerGrid =(numElements + threadsPerBlock -
1
) / threadsPerBlock;
printf(
"CUDA kernel launch with %d blocks of %d threads
\n
"
,
blocksPerGrid
,
threadsPerBlock);
vectorAdd<<<blocksPerGrid
,
threadsPerBlock>>>(d_A
,
d_B
,
d_C
,
numElements);
err = cudaGetLastError();
if
(err != cudaSuccess)
fprintf(stderr
,
"Failed to launch vectorAdd kernel (error code %s)!
\n
"
,
cudaGetErrorString(err));
exit
(EXIT_FAILURE);
// Copy the device result vector
in
device memory to the host result vector
//
in
host memory.
printf(
"Copy output data from the CUDA device to the host memory
\n
"
);
err = cudaMemcpy(h_C
,
d_C
,
size
,
cudaMemcpyDeviceToHost);
if
(err != cudaSuccess)
fprintf(stderr
,
"Failed to copy vector C from device to host (error code %s)!
\n
"
,
cudaGetErrorString(err));
exit
(EXIT_FAILURE);
// Verify that the result vector
is
correct
for
(int i =
0
; i < numElements; ++i)
if
(fabs(h_A[i] + h_B[i] - h_C[i]) >
1e-5
)
fprintf(stderr
,
"Result verification failed at element %d!
\n
"
,
i);
exit
(EXIT_FAILURE);
printf(
"Test PASSED
\n
"
);
// Free device
global
memory
err = cudaFree(d_A);
if
(err != cudaSuccess)
fprintf(stderr
,
"Failed to free device vector A (error code %s)!
\n
"
,
cudaGetErrorString(err));
exit
(EXIT_FAILURE);
err = cudaFree(d_B);
if
(err != cudaSuccess)
fprintf(stderr
,
"Failed to free device vector B (error code %s)!
\n
"
,
cudaGetErrorString(err));
exit
(EXIT_FAILURE);
err = cudaFree(d_C);
if
(err != cudaSuccess)
fprintf(stderr
,
"Failed to free device vector C (error code %s)!
\n
"
,
cudaGetErrorString(err));
exit
(EXIT_FAILURE);
// Free host memory
free(h_A);
free(h_B);
free(h_C);
printf(
"Done
\n
"
);
return
0
;
目前CUDA和OpenCL是最主流的两个GPU编程库
,CUDA和OpenCL都是原生支持C/C++的,其它语言想要访问还有些麻烦,比如Java,需要通过JNI来访问CUDA或者OpenCL。基于JNI,现今有各种Java版本的GPU编程库,比如JCUDA等。另一种思路就是语言还是由java来编写,通过一种工具将java转换成C。
图2 GPU编程库
检查电脑是否有c++编译环境,没有则需要安装
. 检查电脑NVIDIA的
Cuda
驱动版本:控制面板->NVIDIA控制面板,驱动下载地址
https://www.nvidia.com/download/index.aspx?lang=en-us
下载
Cuda
:https://developer.nvidia.com/
cuda
-toolkit-archive
下载J
cuda
:10....
J
Cuda
可以将
CUDA
runtime 和driver api与java相连接,从而实现java程序调用
GPU
资源,进行并行加速的目的
具体介绍可参考http://www.j
cuda
.org/j
cuda
/J
Cuda
.html
一。 安装J
CUDA
1. 下载J
CUDA
libraries(注意此前电脑上应该已经安装
CUDA
的相应文件,并
本文是看小破站某
cuda
入门教程留下来的笔记,多上PPT上内容,夹杂一点自己的理解,和代码注释
教程地址:https://www.bilibili.com/video/av74148375
git地址(PPT和源码):https://github.com/huiscliu/tutorials
主要目的是为Gstreamer打点基础,不然基本抓瞎
什么是
GPU
计算
为什么要使用
GPU
计算
CPU与
GPU
分工与协作
GPU
计算架构...