Commit a73f8e42 by ltb

merge into xiao clip/scaleandshift(float16/int/int8) …

merge into xiao    clip/scaleandshift(float16/int/int8)            logsoftmax/hardtanh(float16)    modify  XGlobal  __int8
parent 3501c0fa
......@@ -32,6 +32,8 @@
#ifndef WIN32
#include <sys/time.h>
#include <unistd.h>
#include <stdint.h>
typedef int8_t __int8;
#endif
// the CUDA stuff
......@@ -43,6 +45,10 @@
/* the nts (NiuTrans.Tensor) namespace */
namespace nts {
#if (__cplusplus >= 201103L || _MSC_VER >= 1700)
#define USE_CPP11
#endif
#define _XINLINE_
//#define DOUBELPRICSION
......
......@@ -17,6 +17,7 @@
/*
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-08-03
* $Update by: Lin Ye (email: linye2015@outlook.com) 2019-07-06 float16/int/int8 added
*/
#include "../../XDevice.h"
......@@ -35,34 +36,20 @@ set each entry to its clip value (CUDA Kernel)
>> upper - the upper border
>> size - size of the data array
*/
template <class T>
__global__
void KernelClip(DTYPE * a, DTYPE * b, DTYPE lower, DTYPE upper, int size)
void KernelClip(T * a, T * b, T lower, T upper, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
if (a[i] > upper)
b[i] = upper;
else if (a[i] < lower)
b[i] = lower;
else
b[i] = a[i];
}
}
int i = blockDim.x * blockIdx.x + threadIdx.x;
/*
set each entry to its clip value with float16 data type value (CUDA Kernel)
This is for float16 computation
>> a - pointer to input data array
>> b - pointer to output data array
>> lower - the lower border
>> upper - the upper border
>> size - size of the data array
*/
__global__
void KernelClip(__half * a, __half * b, DTYPE lower, DTYPE upper, int size)
{
return;
if (i < size) {
if (a[i] > upper)
b[i] = upper;
else if (a[i] < lower)
b[i] = lower;
else
b[i] = a[i];
}
}
/*
......@@ -88,12 +75,27 @@ void _CudaClip(const XTensor * a, XTensor * b, DTYPE lower, DTYPE upper)
int devIDBackup;
ProtectCudaDev(a->devID, devIDBackup);
if (a->dataType == DEFAULT_DTYPE) {
KernelClip << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, lower, upper, a->unitNum);
}
else if (a->dataType == X_FLOAT16) {
KernelClip << <blocks, threads >> >((__half*)a->data, (__half*)b->data, lower, upper, a->unitNum);
}
if (a->dataType == DEFAULT_DTYPE) {
KernelClip << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, lower, upper, a->unitNum);
}
else if (a->dataType == X_FLOAT16) {
half lower1 = __float2half(lower);
half upper1 = __float2half(upper);
KernelClip << <blocks, threads >> >((__half*)a->data, (__half*)b->data, lower1, upper1, a->unitNum);
}
else if (a->dataType == X_INT) {
int lower1 = (int)lower;
int upper1 = (int)upper;
KernelClip << <blocks, threads >> >((int *)a->data, (int *)b->data, lower1, upper1, a->unitNum);
}
else if (a->dataType == X_INT8) {
__int8 lower1 = (__int8)lower;
__int8 upper1 = (__int8)upper;
KernelClip << <blocks, threads >> >((__int8 *)a->data, (__int8 *)b->data, lower1, upper1, a->unitNum);
}
else {
ShowNTErrors("TODO!");
}
......
......@@ -29,12 +29,9 @@ namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/* set each entry to its clip value (CUDA Kernel) */
template <class T>
__global__
void KernelClip(DTYPE * a, DTYPE * b, DTYPE lower, DTYPE upper, int size);
/* set each entry to its clip value (CUDA Kernel) with float16 data type*/
__global__
void KernelClip(__half * a, __half * b, DTYPE lower, DTYPE upper, int size);
void KernelClip(T * a, T * b, T lower, T upper, int size);
/* set each entry to its clip value */
void _CudaClip(const XTensor * a, XTensor * b, DTYPE lower, DTYPE upper);
......
......@@ -17,6 +17,7 @@
/*
* $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-04-24
* $Update by: Lin Ye (email: linye2015@outlook.com) 2019-07-06 float16/int added
*/
#include "ScaleAndShift.cuh"
......@@ -34,9 +35,9 @@ scale and shift all tensor entires b = a * scale + shift (CUDA Kernel)
>> scale - how much we want to scale it
>> shift - how much we want to shift it
*/
template<bool isUnitScale, bool isZeroShift>
template<class T, bool isUnitScale, bool isZeroShift>
__global__
void KernelScaleAndShift(DTYPE * a, DTYPE * b, int size, DTYPE scale, DTYPE shift)
void KernelScaleAndShift(T * a, T * b, int size, T scale, T shift)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
......@@ -56,28 +57,6 @@ void KernelScaleAndShift(DTYPE * a, DTYPE * b, int size, DTYPE scale, DTYPE shif
}
}
/*
scale and shift all tensor entires p = p * scale + shift (CUDA Kernel)
This is for float16 computation
>> a - the input data array
>> b - the output data array
>> size - the size of d
>> scale - how much we want to scale it
>> shift - how much we want to shift it
*/
__global__
void KernelScaleAndShift(__half * a, __half * b, int size, __half scale, __half shift)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
if(i < size)
b[i] = __hadd(__hmul(a[i], scale), shift);
#else
if (i < size)
b[i] = __float2half(__half2float(a[i]) * __half2float(scale) + __half2float(shift));
#endif
}
/*
scale and shift all tensor entires
......@@ -108,20 +87,52 @@ void _CudaScaleAndShift(const XTensor * a, XTensor * b, DTYPE scale, DTYPE shift
if(a->dataType == DEFAULT_DTYPE){
if(scale == 1.0F && shift == 0)
KernelScaleAndShift<true, true> <<<blocks, threads>>>((DTYPE*)a->data, (DTYPE*)b->data, a->unitNum, scale, shift);
KernelScaleAndShift<DTYPE, true, true> <<<blocks, threads>>>((DTYPE*)a->data, (DTYPE*)b->data, a->unitNum, scale, shift);
else if (scale == 1.0F && shift != 0)
KernelScaleAndShift<true, false> << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, a->unitNum, scale, shift);
KernelScaleAndShift<DTYPE, true, false> << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, a->unitNum, scale, shift);
else if(scale != 1.0F && shift == 0)
KernelScaleAndShift<false, true> << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, a->unitNum, scale, shift);
KernelScaleAndShift<DTYPE, false, true> << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, a->unitNum, scale, shift);
else
KernelScaleAndShift<false, false> << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, a->unitNum, scale, shift);
KernelScaleAndShift<DTYPE, false, false> << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, a->unitNum, scale, shift);
}
else if(a->dataType == X_FLOAT16){
unsigned short scale2 = FloatToFloat16(scale);
unsigned short shift2 = FloatToFloat16(shift);
__half * scaleft16p = (__half*)&scale2;
__half * shiftft16p = (__half*)&shift2;
KernelScaleAndShift<<<blocks, threads>>>((__half*)a->data, (__half*)b->data, a->unitNum, *scaleft16p, *shiftft16p);
half scale1 = __float2half(scale);
half shift1 = __float2half(shift);
if (scale == 1.0F && shift == 0)
KernelScaleAndShift<__half, true, true><<<blocks, threads>>>((__half*)a->data, (__half*)b->data, a->unitNum, scale1, shift1);
else if (scale == 1.0F && shift != 0)
KernelScaleAndShift<__half, true, false><<<blocks, threads>>>((__half*)a->data, (__half*)b->data, a->unitNum, scale1, shift1);
else if (scale != 1.0F && shift == 0)
KernelScaleAndShift<__half, false, true><<<blocks, threads>>>((__half*)a->data, (__half*)b->data, a->unitNum, scale1, shift1);
else
KernelScaleAndShift<__half, false, false> << <blocks, threads >> >((__half*)a->data, (__half*)b->data, a->unitNum, scale1, shift1);
}
else if (a->dataType == X_INT){
int scale2 = int(scale);
int shift2 = int(shift);
if (scale == 1.0F && shift == 0)
KernelScaleAndShift<int, true, true><<<blocks, threads>>>((int *)a->data, (int *)b->data, a->unitNum, scale2, shift2);
else if (scale == 1.0F && shift != 0)
KernelScaleAndShift<int, true, false><<<blocks, threads>>>((int *)a->data, (int *)b->data, a->unitNum, scale2, shift2);
else if (scale != 1.0F && shift == 0)
KernelScaleAndShift<int, false, true><<<blocks, threads>>>((int *)a->data, (int *)b->data, a->unitNum, scale2, shift2);
else
KernelScaleAndShift<int, false, false><<<blocks, threads>>>((int *)a->data, (int *)b->data, a->unitNum, scale2, shift2);
}
else if (a->dataType == X_INT8){
__int8 scale2 = __int8(scale);
__int8 shift2 = __int8(shift);
if (scale == 1.0F && shift == 0)
KernelScaleAndShift<__int8, true, true> << <blocks, threads >> >((__int8 *)a->data, (__int8 *)b->data, a->unitNum, scale2, shift2);
else if (scale == 1.0F && shift != 0)
KernelScaleAndShift<__int8, true, false> << <blocks, threads >> >((__int8 *)a->data, (__int8 *)b->data, a->unitNum, scale2, shift2);
else if (scale != 1.0F && shift == 0)
KernelScaleAndShift<__int8, false, true> << <blocks, threads >> >((__int8 *)a->data, (__int8 *)b->data, a->unitNum, scale2, shift2);
else
KernelScaleAndShift<__int8, false, false> << <blocks, threads >> >((__int8 *)a->data, (__int8 *)b->data, a->unitNum, scale2, shift2);
}
else{
ShowNTErrors("TODO!");
......
......@@ -17,6 +17,7 @@
/*
* $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-04-25
* $Update by: Lin Ye (email: linye2015@outlook.com) 2019-07-12 float16 added
*/
#include "HardTanH.h"
......@@ -38,17 +39,18 @@ y = 1 if x > 1
>> y - output data array
>> size - size of input/output
*/
__global__
void KernelHardtanhCompute(DTYPE * x, DTYPE * y, int size)
template <class T>
__global__
void KernelHardtanhCompute(T * x, T * y, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
DTYPE p = x[i];
if(p > (DTYPE)1.0)
p = (DTYPE)1.0;
else if(p < (DTYPE)-1.0)
p = (DTYPE)-1.0;
if (i < size) {
T p = x[i];
if (p >(T)1.0)
p = (T)1.0;
else if (p < (T)-1.0)
p = (T)-1.0;
y[i] = p;
}
}
......@@ -63,25 +65,31 @@ y = 1 if x > 1
*/
void _CudaHardTanH(const XTensor * x, XTensor * y)
{
if(x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE){
CheckNTErrors(!x->isSparse && !y->isSparse, "The hard tanh activation function does not support sparse tensors.");
CheckNTErrors(x->unitNum && y->unitNum, "The x vectors must be of the same length.");
CheckNTErrors(!x->isSparse && !y->isSparse, "The hard tanh activation function does not support sparse tensors.");
CheckNTErrors(x->unitNum && y->unitNum, "The x vectors must be of the same length.");
CheckNTErrors((x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) ||
(x->dataType == X_FLOAT16 && y->dataType == X_FLOAT16),
"The hard tanh activation function does not support this datatype.");
int gridSize[3], blockSize[3];
int gridSize[3], blockSize[3];
GDevs.GetCudaThread(x->devID, x->unitNum, gridSize, blockSize);
GDevs.GetCudaThread(x->devID, x->unitNum, gridSize, blockSize);
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
if(x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE){
KernelHardtanhCompute<<<dim3(gridSize[0]), dim3(blockSize[0])>>>((DTYPE*)x->data, (DTYPE*)y->data, x->unitNum);
BacktoCudaDev(x->devID, devIDBackup);
}
else{
else if (x->dataType == X_FLOAT16 && y->dataType == X_FLOAT16) {
KernelHardtanhCompute<<<dim3(gridSize[0]), dim3(blockSize[0])>>>((__half *)x->data, (__half *)y->data, x->unitNum);
}
else {
//TODO!
ShowNTErrors("TODO!");
}
BacktoCudaDev(x->devID, devIDBackup);
}
/*
......@@ -97,14 +105,15 @@ dy/dx = 1 if -1 <= x <= 1
>> x - x of the function
>> size - size of y/x
*/
template <class T>
__global__
void KernelHardtanhBackward(DTYPE * dedy, DTYPE * dedx, DTYPE * gold, DTYPE * y, DTYPE * x, int size)
void KernelHardtanhBackward(T * dedy, T * dedx, T * gold, T * y, T * x, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
DTYPE s = x[i];
if(s > (DTYPE)1.0 || s < (DTYPE)-1.0)
T s = x[i];
if(s > (T)1.0 || s < (T)-1.0)
dedx[i] = 0;
else
dedx[i] = dedy[i];
......@@ -134,21 +143,24 @@ void _CudaHardTanHBackward(XTensor * gold, XTensor * y, XTensor * x,
XTensor * dedy, XTensor * dedx,
LOSS_FUNCTION_NAME lossName)
{
if(x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE){
CheckNTErrors(((x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) ||
(x->dataType == X_FLOAT16 && y->dataType == X_FLOAT16)),
"Input vectors are not in default type.");
/* calculate dE/dy */
if(lossName == CROSSENTROPY)
_CudaCrossEntropyBackward(dedy, y, gold);
else if(lossName != NOLOSS)
_CudaLossBackward(dedy, gold, y, lossName);
/* calculate dE/dy */
if (lossName == CROSSENTROPY)
_CudaCrossEntropyBackward(dedy, y, gold);
else if (lossName != NOLOSS)
_CudaLossBackward(dedy, gold, y, lossName);
int gridSize[3], blockSize[3];
int gridSize[3], blockSize[3];
GDevs.GetCudaThread(x->devID, x->unitNum, gridSize, blockSize);
GDevs.GetCudaThread(x->devID, x->unitNum, gridSize, blockSize);
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
if(x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE){
/* dE/dx = dE/dy * dy/dx */
KernelHardtanhBackward<<<dim3(gridSize[0]),dim3(blockSize[0])>>>
((DTYPE*)dedy->data,
......@@ -156,11 +168,18 @@ void _CudaHardTanHBackward(XTensor * gold, XTensor * y, XTensor * x,
gold == NULL ? NULL : (DTYPE*)gold->data,
(DTYPE*)y->data, (DTYPE*)x->data,
x->unitNum);
BacktoCudaDev(x->devID, devIDBackup);
}
else
ShowNTErrors("TODO!");
else if (x->dataType == X_FLOAT16 && y->dataType == X_FLOAT16) {
/* dE/dx = dE/dy * dy/dx */
KernelHardtanhBackward<<<dim3(gridSize[0]), dim3(blockSize[0])>>>
((half*)dedy->data,
(half*)dedx->data,
gold == NULL ? NULL : (half*)gold->data,
(half*)y->data, (half*)x->data,
x->unitNum);
}
BacktoCudaDev(x->devID, devIDBackup);
}
#endif
......
......@@ -50,121 +50,136 @@ void _LogSoftmax(const XTensor * x, XTensor * y, int leadDim)
}
int leadDimRDI = x->order - leadDim - 1;
if (!x->isSparse && !y->isSparse &&
x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE)
{
int * dimSize = new int[x->order - 1];
for (int i = 0; i < x->order; i++) {
if (i < leadDim)
dimSize[i] = -x->dimSize[i];
else if (i > leadDim)
dimSize[i - 1] = -x->dimSize[i];
}
int * dimSize = new int[x->order - 1];
for (int i = 0; i < x->order; i++) {
if (i < leadDim)
dimSize[i] = -x->dimSize[i];
else if (i > leadDim)
dimSize[i - 1] = -x->dimSize[i];
}
XMem * mem = x->mem;
XTensor * max = NULL;
XTensor * sum = NULL;
XTensor * blockx = NULL;
XTensor * blocky = NULL;
XTensor * blockMax = NULL;
XTensor * blockSum = NULL;
int dimensionSize = y->dimSizeRDI[leadDimRDI];
int stride = 1;
int blockSize = 1;
int blockNum = 1;
for (int i = 0; i < leadDimRDI; i++)
stride *= y->dimSizeRDI[i];
blockSize = stride * dimensionSize;
blockNum = y->unitNum / blockSize;
max = NewTensorBuf(x->order - 1, dimSize, x->dataType, x->denseRatio, x->devID, mem);
sum = NewTensorBuf(x->order - 1, dimSize, x->dataType, x->denseRatio, x->devID, mem);
_ReduceMax(x, max, leadDim);
_ReduceSum(x, sum, leadDim, max, 1.0F, true);
if (x->devID >= 0) {
if(leadDimRDI == 0){
blockSize = y->unitNum;
blockNum = 1;
blockx = NewTensor2D(blockSize/dimensionSize, -dimensionSize, x->dataType, x->devID, mem);
blocky = NewTensor2D(blockSize/dimensionSize, -dimensionSize, x->dataType, x->devID, mem);
blockMax = NewTensor2D(blockSize/dimensionSize, -1, x->dataType, x->devID, mem);
blockSum = NewTensor2D(blockSize/dimensionSize, -1, x->dataType, x->devID, mem);
}
else{
blockx = NewTensor2D(-stride, dimensionSize, x->dataType, x->devID, mem);
blocky = NewTensor2D(-stride, dimensionSize, x->dataType, x->devID, mem);
blockMax = NewTensor2D(-stride, 1, x->dataType, x->devID, mem);
blockSum = NewTensor2D(-stride, 1, x->dataType, x->devID, mem);
}
XMem * mem = x->mem;
XTensor * max = NULL;
XTensor * sum = NULL;
XTensor * blockx = NULL;
XTensor * blocky = NULL;
XTensor * blockMax = NULL;
XTensor * blockSum = NULL;
int dimensionSize = y->dimSizeRDI[leadDimRDI];
int stride = 1;
int blockSize = 1;
int blockNum = 1;
for (int i = 0; i < leadDimRDI; i++)
stride *= y->dimSizeRDI[i];
blockSize = stride * dimensionSize;
blockNum = y->unitNum / blockSize;
max = NewTensorBuf(x->order - 1, dimSize, x->dataType, x->denseRatio, x->devID, mem);
sum = NewTensorBuf(x->order - 1, dimSize, x->dataType, x->denseRatio, x->devID, mem);
_ReduceMax(x, max, leadDim);
_ReduceSum(x, sum, leadDim, max, 1.0F, true);
if (x->devID >= 0) {
if(leadDimRDI == 0){
blockSize = y->unitNum;
blockNum = 1;
blockx = NewTensor2D(blockSize/dimensionSize, -dimensionSize, x->dataType, x->devID, mem);
blocky = NewTensor2D(blockSize/dimensionSize, -dimensionSize, x->dataType, x->devID, mem);
blockMax = NewTensor2D(blockSize/dimensionSize, -1, x->dataType, x->devID, mem);
blockSum = NewTensor2D(blockSize/dimensionSize, -1, x->dataType, x->devID, mem);
}
else{
blockx = NewTensor2D(-stride, dimensionSize, x->dataType, x->devID, mem);
blocky = NewTensor2D(-stride, dimensionSize, x->dataType, x->devID, mem);
blockMax = NewTensor2D(-stride, 1, x->dataType, x->devID, mem);
blockSum = NewTensor2D(-stride, 1, x->dataType, x->devID, mem);
}
}
for (int k = 0; k < blockNum; k++) {
int m = stride;
int n = dimensionSize;
for (int k = 0; k < blockNum; k++) {
int m = stride;
int n = dimensionSize;
if (x->devID < 0) {
DTYPE * ip = (DTYPE*)x->data + k * blockSize;
DTYPE * op = (DTYPE*)y->data + k * blockSize;
DTYPE * mp = (DTYPE*)max->data + k * blockSize / dimensionSize;
DTYPE * sp = (DTYPE*)sum->data + k * blockSize / dimensionSize;
if (x->devID < 0) {
for (int j = 0; j < m; j++) {
DTYPE sumValue = sp[j];
if (sumValue == 0) {
for (int i = 0; i < n; i++)
op[i * m + j] = 0;
}
else {
for (int i = 0; i < n; i++) {
DTYPE r = (DTYPE)log(exp(ip[i * m + j] - mp[j]) / sp[j]);
if (IsNAN(r))
r = LOGPROB_MIN;
if (IsINF(r))
r = LOGPROB_MIN;
op[i * m + j] = MAX(r, LOGPROB_MIN);
}
for (int j = 0; j < m; j++) {
DTYPE sumValue = sp[j];
if (sumValue == 0) {
for (int i = 0; i < n; i++)
op[i * m + j] = 0;
}
else {
for (int i = 0; i < n; i++) {
DTYPE r = (DTYPE)log(exp(ip[i * m + j] - mp[j]) / sp[j]);
if (IsNAN(r))
r = LOGPROB_MIN;
if (IsINF(r))
r = LOGPROB_MIN;
op[i * m + j] = MAX(r, LOGPROB_MIN);
}
}
}
}
else {
if (x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) {
DTYPE * ip = (DTYPE*)x->data + k * blockSize;
DTYPE * op = (DTYPE*)y->data + k * blockSize;
DTYPE * mp = (DTYPE*)max->data + k * blockSize / dimensionSize;
DTYPE * sp = (DTYPE*)sum->data + k * blockSize / dimensionSize;
blockx->data = ip;
blocky->data = op;
blockMax->data = mp;
blockSum->data = sp;
}
else {
half * ip = (half*)x->data + k * blockSize;
half * op = (half*)y->data + k * blockSize;
half * mp = (half*)max->data + k * blockSize / dimensionSize;
half * sp = (half*)sum->data + k * blockSize / dimensionSize;
blockx->data = ip;
blocky->data = op;
blockMax->data = mp;
blockSum->data = sp;
}
#ifdef USE_CUDA
if(leadDimRDI == 0)
_CudaLogSoftmaxSumMax(blockx, blocky, 1, blockSum, blockMax);
else
_CudaLogSoftmaxSumMax(blockx, blocky, leadDim, blockSum, blockMax);
if (leadDimRDI == 0)
_CudaLogSoftmaxSumMax(blockx, blocky, 1, blockSum, blockMax);
else
_CudaLogSoftmaxSumMax(blockx, blocky, leadDim, blockSum, blockMax);
#else
ShowNTErrors("Please specify USE_CUDA and recompile the code!");
ShowNTErrors("Please specify USE_CUDA and recompile the code!");
#endif
blockx->data = NULL;
blocky->data = NULL;
blockMax->data = NULL;
blockSum->data = NULL;
}
blockx->data = NULL;
blocky->data = NULL;
blockMax->data = NULL;
blockSum->data = NULL;
}
}
DelTensorBuf(max);
DelTensorBuf(sum);
if (x->devID >= 0) {
delete blockx;
delete blocky;
delete blockMax;
delete blockSum;
}
DelTensorBuf(max);
DelTensorBuf(sum);
delete[] dimSize;
if (x->devID >= 0) {
delete blockx;
delete blocky;
delete blockMax;
delete blockSum;
}
else
ShowNTErrors("TODO!");
delete[] dimSize;
}
/*
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论