Commit a73f8e42 by ltb

merge into xiao clip/scaleandshift(float16/int/int8) …

merge into xiao    clip/scaleandshift(float16/int/int8)            logsoftmax/hardtanh(float16)    modify  XGlobal  __int8
parent 3501c0fa
......@@ -32,6 +32,8 @@
#ifndef WIN32
#include <sys/time.h>
#include <unistd.h>
#include <stdint.h>
typedef int8_t __int8;
#endif
// the CUDA stuff
......@@ -43,6 +45,10 @@
/* the nts (NiuTrans.Tensor) namespace */
namespace nts {
#if (__cplusplus >= 201103L || _MSC_VER >= 1700)
#define USE_CPP11
#endif
#define _XINLINE_
//#define DOUBELPRICSION
......
......@@ -17,6 +17,7 @@
/*
* $Created by: Lin Ye (email: linye2015@outlook.com) 2018-08-03
* $Update by: Lin Ye (email: linye2015@outlook.com) 2019-07-06 float16/int/int8 added
*/
#include "../../XDevice.h"
......@@ -35,8 +36,9 @@ set each entry to its clip value (CUDA Kernel)
>> upper - the upper border
>> size - size of the data array
*/
template <class T>
__global__
void KernelClip(DTYPE * a, DTYPE * b, DTYPE lower, DTYPE upper, int size)
void KernelClip(T * a, T * b, T lower, T upper, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
......@@ -51,21 +53,6 @@ __global__
}
/*
set each entry to its clip value with float16 data type value (CUDA Kernel)
This is for float16 computation
>> a - pointer to input data array
>> b - pointer to output data array
>> lower - the lower border
>> upper - the upper border
>> size - size of the data array
*/
__global__
void KernelClip(__half * a, __half * b, DTYPE lower, DTYPE upper, int size)
{
return;
}
/*
set each entry to its clip value
>> a - input tensor we are processing
>> b - output tensor we are processing
......@@ -92,7 +79,22 @@ void _CudaClip(const XTensor * a, XTensor * b, DTYPE lower, DTYPE upper)
KernelClip << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, lower, upper, a->unitNum);
}
else if (a->dataType == X_FLOAT16) {
KernelClip << <blocks, threads >> >((__half*)a->data, (__half*)b->data, lower, upper, a->unitNum);
half lower1 = __float2half(lower);
half upper1 = __float2half(upper);
KernelClip << <blocks, threads >> >((__half*)a->data, (__half*)b->data, lower1, upper1, a->unitNum);
}
else if (a->dataType == X_INT) {
int lower1 = (int)lower;
int upper1 = (int)upper;
KernelClip << <blocks, threads >> >((int *)a->data, (int *)b->data, lower1, upper1, a->unitNum);
}
else if (a->dataType == X_INT8) {
__int8 lower1 = (__int8)lower;
__int8 upper1 = (__int8)upper;
KernelClip << <blocks, threads >> >((__int8 *)a->data, (__int8 *)b->data, lower1, upper1, a->unitNum);
}
else {
ShowNTErrors("TODO!");
......
......@@ -29,12 +29,9 @@ namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/* set each entry to its clip value (CUDA Kernel) */
template <class T>
__global__
void KernelClip(DTYPE * a, DTYPE * b, DTYPE lower, DTYPE upper, int size);
/* set each entry to its clip value (CUDA Kernel) with float16 data type*/
__global__
void KernelClip(__half * a, __half * b, DTYPE lower, DTYPE upper, int size);
void KernelClip(T * a, T * b, T lower, T upper, int size);
/* set each entry to its clip value */
void _CudaClip(const XTensor * a, XTensor * b, DTYPE lower, DTYPE upper);
......
......@@ -17,6 +17,7 @@
/*
* $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-04-24
* $Update by: Lin Ye (email: linye2015@outlook.com) 2019-07-06 float16/int added
*/
#include "ScaleAndShift.cuh"
......@@ -34,9 +35,9 @@ scale and shift all tensor entires b = a * scale + shift (CUDA Kernel)
>> scale - how much we want to scale it
>> shift - how much we want to shift it
*/
template<bool isUnitScale, bool isZeroShift>
template<class T, bool isUnitScale, bool isZeroShift>
__global__
void KernelScaleAndShift(DTYPE * a, DTYPE * b, int size, DTYPE scale, DTYPE shift)
void KernelScaleAndShift(T * a, T * b, int size, T scale, T shift)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
......@@ -56,28 +57,6 @@ void KernelScaleAndShift(DTYPE * a, DTYPE * b, int size, DTYPE scale, DTYPE shif
}
}
/*
scale and shift all tensor entires p = p * scale + shift (CUDA Kernel)
This is for float16 computation
>> a - the input data array
>> b - the output data array
>> size - the size of d
>> scale - how much we want to scale it
>> shift - how much we want to shift it
*/
__global__
void KernelScaleAndShift(__half * a, __half * b, int size, __half scale, __half shift)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
if(i < size)
b[i] = __hadd(__hmul(a[i], scale), shift);
#else
if (i < size)
b[i] = __float2half(__half2float(a[i]) * __half2float(scale) + __half2float(shift));
#endif
}
/*
scale and shift all tensor entires
......@@ -108,20 +87,52 @@ void _CudaScaleAndShift(const XTensor * a, XTensor * b, DTYPE scale, DTYPE shift
if(a->dataType == DEFAULT_DTYPE){
if(scale == 1.0F && shift == 0)
KernelScaleAndShift<true, true> <<<blocks, threads>>>((DTYPE*)a->data, (DTYPE*)b->data, a->unitNum, scale, shift);
KernelScaleAndShift<DTYPE, true, true> <<<blocks, threads>>>((DTYPE*)a->data, (DTYPE*)b->data, a->unitNum, scale, shift);
else if (scale == 1.0F && shift != 0)
KernelScaleAndShift<true, false> << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, a->unitNum, scale, shift);
KernelScaleAndShift<DTYPE, true, false> << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, a->unitNum, scale, shift);
else if(scale != 1.0F && shift == 0)
KernelScaleAndShift<false, true> << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, a->unitNum, scale, shift);
KernelScaleAndShift<DTYPE, false, true> << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, a->unitNum, scale, shift);
else
KernelScaleAndShift<false, false> << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, a->unitNum, scale, shift);
KernelScaleAndShift<DTYPE, false, false> << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, a->unitNum, scale, shift);
}
else if(a->dataType == X_FLOAT16){
unsigned short scale2 = FloatToFloat16(scale);
unsigned short shift2 = FloatToFloat16(shift);
__half * scaleft16p = (__half*)&scale2;
__half * shiftft16p = (__half*)&shift2;
KernelScaleAndShift<<<blocks, threads>>>((__half*)a->data, (__half*)b->data, a->unitNum, *scaleft16p, *shiftft16p);
half scale1 = __float2half(scale);
half shift1 = __float2half(shift);
if (scale == 1.0F && shift == 0)
KernelScaleAndShift<__half, true, true><<<blocks, threads>>>((__half*)a->data, (__half*)b->data, a->unitNum, scale1, shift1);
else if (scale == 1.0F && shift != 0)
KernelScaleAndShift<__half, true, false><<<blocks, threads>>>((__half*)a->data, (__half*)b->data, a->unitNum, scale1, shift1);
else if (scale != 1.0F && shift == 0)
KernelScaleAndShift<__half, false, true><<<blocks, threads>>>((__half*)a->data, (__half*)b->data, a->unitNum, scale1, shift1);
else
KernelScaleAndShift<__half, false, false> << <blocks, threads >> >((__half*)a->data, (__half*)b->data, a->unitNum, scale1, shift1);
}
else if (a->dataType == X_INT){
int scale2 = int(scale);
int shift2 = int(shift);
if (scale == 1.0F && shift == 0)
KernelScaleAndShift<int, true, true><<<blocks, threads>>>((int *)a->data, (int *)b->data, a->unitNum, scale2, shift2);
else if (scale == 1.0F && shift != 0)
KernelScaleAndShift<int, true, false><<<blocks, threads>>>((int *)a->data, (int *)b->data, a->unitNum, scale2, shift2);
else if (scale != 1.0F && shift == 0)
KernelScaleAndShift<int, false, true><<<blocks, threads>>>((int *)a->data, (int *)b->data, a->unitNum, scale2, shift2);
else
KernelScaleAndShift<int, false, false><<<blocks, threads>>>((int *)a->data, (int *)b->data, a->unitNum, scale2, shift2);
}
else if (a->dataType == X_INT8){
__int8 scale2 = __int8(scale);
__int8 shift2 = __int8(shift);
if (scale == 1.0F && shift == 0)
KernelScaleAndShift<__int8, true, true> << <blocks, threads >> >((__int8 *)a->data, (__int8 *)b->data, a->unitNum, scale2, shift2);
else if (scale == 1.0F && shift != 0)
KernelScaleAndShift<__int8, true, false> << <blocks, threads >> >((__int8 *)a->data, (__int8 *)b->data, a->unitNum, scale2, shift2);
else if (scale != 1.0F && shift == 0)
KernelScaleAndShift<__int8, false, true> << <blocks, threads >> >((__int8 *)a->data, (__int8 *)b->data, a->unitNum, scale2, shift2);
else
KernelScaleAndShift<__int8, false, false> << <blocks, threads >> >((__int8 *)a->data, (__int8 *)b->data, a->unitNum, scale2, shift2);
}
else{
ShowNTErrors("TODO!");
......
......@@ -17,6 +17,7 @@
/*
* $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-04-25
* $Update by: Lin Ye (email: linye2015@outlook.com) 2019-07-12 float16 added
*/
#include "HardTanH.h"
......@@ -38,17 +39,18 @@ y = 1 if x > 1
>> y - output data array
>> size - size of input/output
*/
template <class T>
__global__
void KernelHardtanhCompute(DTYPE * x, DTYPE * y, int size)
void KernelHardtanhCompute(T * x, T * y, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
DTYPE p = x[i];
if(p > (DTYPE)1.0)
p = (DTYPE)1.0;
else if(p < (DTYPE)-1.0)
p = (DTYPE)-1.0;
if (i < size) {
T p = x[i];
if (p >(T)1.0)
p = (T)1.0;
else if (p < (T)-1.0)
p = (T)-1.0;
y[i] = p;
}
}
......@@ -63,10 +65,11 @@ y = 1 if x > 1
*/
void _CudaHardTanH(const XTensor * x, XTensor * y)
{
if(x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE){
CheckNTErrors(!x->isSparse && !y->isSparse, "The hard tanh activation function does not support sparse tensors.");
CheckNTErrors(x->unitNum && y->unitNum, "The x vectors must be of the same length.");
CheckNTErrors((x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) ||
(x->dataType == X_FLOAT16 && y->dataType == X_FLOAT16),
"The hard tanh activation function does not support this datatype.");
int gridSize[3], blockSize[3];
......@@ -75,13 +78,18 @@ void _CudaHardTanH(const XTensor * x, XTensor * y)
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
if(x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE){
KernelHardtanhCompute<<<dim3(gridSize[0]), dim3(blockSize[0])>>>((DTYPE*)x->data, (DTYPE*)y->data, x->unitNum);
BacktoCudaDev(x->devID, devIDBackup);
}
else{
else if (x->dataType == X_FLOAT16 && y->dataType == X_FLOAT16) {
KernelHardtanhCompute<<<dim3(gridSize[0]), dim3(blockSize[0])>>>((__half *)x->data, (__half *)y->data, x->unitNum);
}
else {
//TODO!
ShowNTErrors("TODO!");
}
BacktoCudaDev(x->devID, devIDBackup);
}
/*
......@@ -97,14 +105,15 @@ dy/dx = 1 if -1 <= x <= 1
>> x - x of the function
>> size - size of y/x
*/
template <class T>
__global__
void KernelHardtanhBackward(DTYPE * dedy, DTYPE * dedx, DTYPE * gold, DTYPE * y, DTYPE * x, int size)
void KernelHardtanhBackward(T * dedy, T * dedx, T * gold, T * y, T * x, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
DTYPE s = x[i];
if(s > (DTYPE)1.0 || s < (DTYPE)-1.0)
T s = x[i];
if(s > (T)1.0 || s < (T)-1.0)
dedx[i] = 0;
else
dedx[i] = dedy[i];
......@@ -134,12 +143,14 @@ void _CudaHardTanHBackward(XTensor * gold, XTensor * y, XTensor * x,
XTensor * dedy, XTensor * dedx,
LOSS_FUNCTION_NAME lossName)
{
if(x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE){
CheckNTErrors(((x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) ||
(x->dataType == X_FLOAT16 && y->dataType == X_FLOAT16)),
"Input vectors are not in default type.");
/* calculate dE/dy */
if(lossName == CROSSENTROPY)
if (lossName == CROSSENTROPY)
_CudaCrossEntropyBackward(dedy, y, gold);
else if(lossName != NOLOSS)
else if (lossName != NOLOSS)
_CudaLossBackward(dedy, gold, y, lossName);
int gridSize[3], blockSize[3];
......@@ -149,6 +160,7 @@ void _CudaHardTanHBackward(XTensor * gold, XTensor * y, XTensor * x,
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
if(x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE){
/* dE/dx = dE/dy * dy/dx */
KernelHardtanhBackward<<<dim3(gridSize[0]),dim3(blockSize[0])>>>
((DTYPE*)dedy->data,
......@@ -156,11 +168,18 @@ void _CudaHardTanHBackward(XTensor * gold, XTensor * y, XTensor * x,
gold == NULL ? NULL : (DTYPE*)gold->data,
(DTYPE*)y->data, (DTYPE*)x->data,
x->unitNum);
}
else if (x->dataType == X_FLOAT16 && y->dataType == X_FLOAT16) {
/* dE/dx = dE/dy * dy/dx */
KernelHardtanhBackward<<<dim3(gridSize[0]), dim3(blockSize[0])>>>
((half*)dedy->data,
(half*)dedx->data,
gold == NULL ? NULL : (half*)gold->data,
(half*)y->data, (half*)x->data,
x->unitNum);
}
BacktoCudaDev(x->devID, devIDBackup);
}
else
ShowNTErrors("TODO!");
}
#endif
......
......@@ -50,9 +50,7 @@ void _LogSoftmax(const XTensor * x, XTensor * y, int leadDim)
}
int leadDimRDI = x->order - leadDim - 1;
if (!x->isSparse && !y->isSparse &&
x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE)
{
int * dimSize = new int[x->order - 1];
for (int i = 0; i < x->order; i++) {
if (i < leadDim)
......@@ -106,12 +104,12 @@ void _LogSoftmax(const XTensor * x, XTensor * y, int leadDim)
int m = stride;
int n = dimensionSize;
if (x->devID < 0) {
DTYPE * ip = (DTYPE*)x->data + k * blockSize;
DTYPE * op = (DTYPE*)y->data + k * blockSize;
DTYPE * mp = (DTYPE*)max->data + k * blockSize / dimensionSize;
DTYPE * sp = (DTYPE*)sum->data + k * blockSize / dimensionSize;
if (x->devID < 0) {
for (int j = 0; j < m; j++) {
DTYPE sumValue = sp[j];
if (sumValue == 0) {
......@@ -132,12 +130,31 @@ void _LogSoftmax(const XTensor * x, XTensor * y, int leadDim)
}
}
else {
if (x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) {
DTYPE * ip = (DTYPE*)x->data + k * blockSize;
DTYPE * op = (DTYPE*)y->data + k * blockSize;
DTYPE * mp = (DTYPE*)max->data + k * blockSize / dimensionSize;
DTYPE * sp = (DTYPE*)sum->data + k * blockSize / dimensionSize;
blockx->data = ip;
blocky->data = op;
blockMax->data = mp;
blockSum->data = sp;
}
else {
half * ip = (half*)x->data + k * blockSize;
half * op = (half*)y->data + k * blockSize;
half * mp = (half*)max->data + k * blockSize / dimensionSize;
half * sp = (half*)sum->data + k * blockSize / dimensionSize;
blockx->data = ip;
blocky->data = op;
blockMax->data = mp;
blockSum->data = sp;
}
#ifdef USE_CUDA
if(leadDimRDI == 0)
if (leadDimRDI == 0)
_CudaLogSoftmaxSumMax(blockx, blocky, 1, blockSum, blockMax);
else
_CudaLogSoftmaxSumMax(blockx, blocky, leadDim, blockSum, blockMax);
......@@ -162,9 +179,7 @@ void _LogSoftmax(const XTensor * x, XTensor * y, int leadDim)
}
delete[] dimSize;
}
else
ShowNTErrors("TODO!");
}
/*
......
......@@ -17,6 +17,7 @@
/*
* $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-04-26
* $Update by: Lin Ye (email: linye2015@outlook.com) 2019-07-01 float16 added
*/
#include "LogSoftmax.h"
......@@ -26,6 +27,7 @@
#include "../core/reduce/ReduceSum.cuh"
#include "../core/reduce/ReduceMax.cuh"
#include "../XDevice.h"
#include <device_launch_parameters.h>
namespace nts { // namespace nts(NiuTrans.Tensor)
......@@ -57,11 +59,12 @@ y_{i,j} = log(e^x_{i,j} / \sum_{i} e^{x_{i,j})
>> rowNum - row number of the matrix
>> colNum - column number of the matrix
*/
template <class T ,TENSOR_DATA_TYPE dataType>
__global__
void KernelLogSoftmaxComputeByRow(DTYPE * x, DTYPE * max, DTYPE * sum, DTYPE * y, int rowNum, int colNum)
void KernelLogSoftmaxComputeByRow(T * x, T * max, T * sum, T * y, int rowNum, int colNum)
{
__shared__ DTYPE inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ DTYPE inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ T inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ T inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK];
int i = blockDim.y * blockIdx.y + threadIdx.y;
int j = blockDim.x * blockIdx.x + threadIdx.x;
......@@ -78,7 +81,9 @@ void KernelLogSoftmaxComputeByRow(DTYPE * x, DTYPE * max, DTYPE * sum, DTYPE * y
/* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */
if (i < rowNum && j < colNum) {
int key = i * colNum + j;
DTYPE r = log(exp(x[key] - inputMax[threadIdx.x]) / inputSum[threadIdx.x]);
if (dataType == X_FLOAT) {
DTYPE r = log((DTYPE)exp((DTYPE)(x[key] - inputMax[threadIdx.x])) / (DTYPE)inputSum[threadIdx.x]);
if (isnan(r))
r = LOGPROB_MIN;
......@@ -87,6 +92,11 @@ void KernelLogSoftmaxComputeByRow(DTYPE * x, DTYPE * max, DTYPE * sum, DTYPE * y
y[key] = MAX(r, LOGPROB_MIN);
}
else if (dataType == X_FLOAT16) {
half r = hlog((half)hexp(x[key] - inputMax[threadIdx.y]) / (half)inputSum[threadIdx.y]);
y[key] = r;
}
}
}
/*
......@@ -104,11 +114,12 @@ y_{i,j} = log(e^x_{i,j} / \sum_{j} e^{x_{i,j})
>> rowNum - row number of the matrix
>> colNum - column number of the matrix
*/
template <class T ,TENSOR_DATA_TYPE dataType>
__global__
void KernelLogSoftmaxComputeByCol(DTYPE * x, DTYPE * max, DTYPE * sum, DTYPE * y, int rowNum, int colNum)
void KernelLogSoftmaxComputeByCol(T * x, T * max, T * sum, T * y, int rowNum, int colNum)
{
__shared__ DTYPE inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ DTYPE inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ T inputSum[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ T inputMax[MAX_CUDA_THREAD_NUM_PER_BLOCK];
int i = blockDim.y * blockIdx.y + threadIdx.y;
int j = blockDim.x * blockIdx.x + threadIdx.x;
......@@ -125,12 +136,8 @@ void KernelLogSoftmaxComputeByCol(DTYPE * x, DTYPE * max, DTYPE * sum, DTYPE * y
/* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */
if (i < rowNum && j < colNum) {
int key = i * colNum + j;
DTYPE r = log(exp(x[key] - inputMax[threadIdx.y]) / inputSum[threadIdx.y]);
/*if (r < LOGPROB_MIN)
{
printf("min %e %e, %e %e, %e %e\n", r, x[key] - inputMax[threadIdx.y], x[key], inputMax[threadIdx.y], exp(x[key] - inputMax[threadIdx.y]), inputSum[threadIdx.y]);
}*/
if (dataType == X_FLOAT) {
DTYPE r = log((DTYPE)exp((DTYPE)(x[key] - inputMax[threadIdx.y])) / (DTYPE)inputSum[threadIdx.y]);
if (isnan(r))
r = LOGPROB_MIN;
......@@ -139,6 +146,11 @@ void KernelLogSoftmaxComputeByCol(DTYPE * x, DTYPE * max, DTYPE * sum, DTYPE * y
y[key] = MAX(r, LOGPROB_MIN);
}
else if (dataType == X_FLOAT16) {
half r = hlog((half)hexp(x[key] - inputMax[threadIdx.y]) / (half)inputSum[threadIdx.y]);
y[key] = r;
}
}
}
/*
......@@ -173,17 +185,43 @@ void _CudaLogSoftmaxSumMax(XTensor * x, XTensor * y, int leadDim, XTensor * sum,
GDevs.GetCudaThread2D(x->devID, n, m, MAX_INT, gridSize, blockSize);
/* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */
KernelLogSoftmaxComputeByRow << <dim3(gridSize[1], gridSize[0]), dim3(blockSize[1], blockSize[0]) >> >
KernelLogSoftmaxComputeByRow<DTYPE, DEFAULT_DTYPE> <<<dim3(gridSize[1], gridSize[0]), dim3(blockSize[1], blockSize[0])>>>
((DTYPE*)x->data, maxData, sumData, (DTYPE*)y->data, n, m);
}
else {
GDevs.GetCudaThread2D(x->devID, m, n, MAX_INT, gridSize, blockSize);
/* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */
KernelLogSoftmaxComputeByCol << <dim3(gridSize[0], gridSize[1]), dim3(blockSize[0], blockSize[1]) >> >
KernelLogSoftmaxComputeByCol<DTYPE, DEFAULT_DTYPE> <<<dim3(gridSize[0], gridSize[1]), dim3(blockSize[0], blockSize[1])>>>
((DTYPE*)x->data, maxData, sumData, (DTYPE*)y->data, n, m);
}
}
else if (x->dataType == X_FLOAT16 && y->dataType == X_FLOAT16) {
int gridSize[3], blockSize[3];
int n = x->dimSize[0];
int m = x->dimSize[1];
/* allocate the buffer */
__half * maxData = (half*)max->data;
__half * sumData = (half*)sum->data;
if (leadDim == 0) {
GDevs.GetCudaThread2D(x->devID, n, m, MAX_INT, gridSize, blockSize);
/* y_{i,j} = log(e^(s_{i,j} - max_{j}) / \sum_{k} e^{s_{k,j} - max_{j}}) */
KernelLogSoftmaxComputeByRow<half, X_FLOAT16> <<<dim3(gridSize[1], gridSize[0]), dim3(blockSize[1], blockSize[0])>>>
((half*)x->data, maxData, sumData, (half *)y->data, n, m);
}
else {
GDevs.GetCudaThread2D(x->devID, m, n, MAX_INT, gridSize, blockSize);
/* y_{i,j} = log(e^(s_{i,j} - max_{i}) / \sum_{k} e^{s_{i,k} - max_{i}}) */
KernelLogSoftmaxComputeByCol<half, X_FLOAT16> <<<dim3(gridSize[0], gridSize[1]), dim3(blockSize[0], blockSize[1])>>>
((half*)x->data, maxData, sumData, (half*)y->data, n, m);
}
}
else {
ShowNTErrors("TODO!");
}
......@@ -200,18 +238,19 @@ set dE/dx = exp(y)
>> size - size of output
>> lossName - name of the loss function
*/
template <class T>
__global__
void KernelExpLoss(DTYPE * dedy, DTYPE * dedx, DTYPE * y, int size, LOSS_FUNCTION_NAME lossName)
void KernelExpLoss(T * dedy, T * dedx, T * y, int size, LOSS_FUNCTION_NAME lossName)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
/* dE/dx_j = exp(y_j) */
if (lossName == CROSSENTROPY)
dedx[i] = exp(y[i]);
dedx[i] = exp(((DTYPE)y[i]));
/* dE/dx_j = exp(y_j) */
else if (lossName == SQUAREDERROR)
dedx[i] = exp(y[i]);
dedx[i] = exp(((DTYPE)y[i]));
else if (lossName == ONEHOTERROR)
dedx[i] = 0;
else
......@@ -232,23 +271,25 @@ dE/dx = dE/dy * dy/dx
>> size - size of input/output
>> lossName - name of the loss function
*/
template <class T, TENSOR_DATA_TYPE dataType>
__global__
void KernelLogSoftmaxBackwardDEDS(DTYPE * dedy, DTYPE * dedx, DTYPE * gold, DTYPE * y, DTYPE * x,
void KernelLogSoftmaxBackwardDEDS(T * dedy, T * dedx, T * gold, T * y, T * x,
int size, LOSS_FUNCTION_NAME lossName)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
if (dataType == X_FLOAT) {
DTYPE r = 0;
/* dE/ds_j = exp(y_j) */
if (lossName == CROSSENTROPY)
r = -gold[i] + exp(y[i]);
r = -(DTYPE)gold[i] + (DTYPE)exp(((DTYPE)y[i]));
/* dE/ds_j = exp(y_j) */
else if (lossName == SQUAREDERROR)
r = -gold[i] + exp(y[i]);
r = -(DTYPE)gold[i] + (DTYPE)exp(((DTYPE)y[i]));
else if (lossName == ONEHOTERROR) {
if (gold[i] == 1.0F)
r = -gold[i] + exp(y[i]);
if ((DTYPE)gold[i] == 1.0)
r = -(DTYPE)gold[i] + (DTYPE)exp(((DTYPE)y[i]));
else
r = 0;
}
......@@ -263,6 +304,27 @@ void KernelLogSoftmaxBackwardDEDS(DTYPE * dedy, DTYPE * dedx, DTYPE * gold, DTYP
dedx[i] = r;
}
else if (dataType == X_FLOAT16) {
half r = 0;
/* dE/ds_j = exp(y_j) */
if (lossName == CROSSENTROPY)
r = -(half)gold[i] + (half)hexp(y[i]);
/* dE/ds_j = exp(y_j) */
else if (lossName == SQUAREDERROR)
r = -(half)gold[i] + (half)hexp(y[i]);
else if (lossName == ONEHOTERROR) {
if ((half)gold[i] == (half)1.0)
r = -(half)gold[i] + (half)hexp(y[i]);
else
r = 0;
}
else {
r = dedy[i];
}
dedx[i] = r;
}
}
}
/*
......@@ -282,11 +344,12 @@ dE/dx_j += -gold_j
>> gNonZeroNum -
>> lossName - name of the loss function
*/
template <class T>
__global__
void KernelLogSoftmaxBackwardDEDSSparseByRow(DTYPE * dedy, DTYPE * dedx, void * gold, DTYPE * y, DTYPE * x,
void KernelLogSoftmaxBackwardDEDSSparseByRow(T * dedy, T * dedx, void * gold, T * y, T * x,
int rowNum, int colNum, int gNonZeroNum, LOSS_FUNCTION_NAME lossName)
{
int tupleSize = sizeof(int) + sizeof(DTYPE);
int tupleSize = sizeof(int) + sizeof(T);
int k = blockDim.x * blockIdx.x + threadIdx.x;
if (k < gNonZeroNum) {
......@@ -294,7 +357,7 @@ void KernelLogSoftmaxBackwardDEDSSparseByRow(DTYPE * dedy, DTYPE * dedx, void *
int key = *(int*)((char*)gold + tupleSize * k);
int ni = key / colNum;
int mi = key % colNum;
int value = *(DTYPE*)((char*)gold + tupleSize * k + sizeof(int));
int value = *(T*)((char*)gold + tupleSize * k + sizeof(int));
if (lossName == CROSSENTROPY)
dedx[colNum * ni + mi] += -value;
......@@ -303,7 +366,7 @@ void KernelLogSoftmaxBackwardDEDSSparseByRow(DTYPE * dedy, DTYPE * dedx, void *
else if (lossName == ONEHOTERROR) {
int offset = colNum * ni + mi;
if (value == 1.0F)
dedx[offset] += (-value + exp(y[offset]));
dedx[offset] += (-value + exp(((DTYPE)y[offset])));
//dedx[offset] += -value * 0.005;
}
}
......@@ -383,6 +446,8 @@ void _CudaLogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
CheckNTErrors((x->devID == y->devID && gold->devID == y->devID),
"Tensors used in log softmax are not on the same GPU.");
CheckNTErrors((gold != NULL), "No x gold standard is found!");
CheckNTErrors((lossName == CROSSENTROPY || lossName == SQUAREDERROR || lossName == NOLOSS),
"Unknown loss function.");
int leadDimRDI = y->order - leadDim - 1;
int dimensionSize = y->dimSizeRDI[leadDimRDI];
......@@ -399,9 +464,6 @@ void _CudaLogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
if (x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) {
CheckNTErrors((lossName == CROSSENTROPY || lossName == SQUAREDERROR || lossName == NOLOSS),
"Unknown loss function.");
int cudaGridSize[3], cudaBlockSize[3];
if (lossName == CROSSENTROPY || lossName == SQUAREDERROR) {
......@@ -411,7 +473,7 @@ void _CudaLogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
GDevs.GetCudaThread(x->devID, x->unitNum, cudaGridSize, cudaBlockSize);
/* dE/ds_j = exp(y_j) */
KernelExpLoss <<<dim3(cudaGridSize[0]), dim3(cudaBlockSize[0]) >>>
KernelExpLoss <DTYPE> <<< dim3(cudaGridSize[0]), dim3(cudaBlockSize[0]) >>>
(NULL,
(DTYPE*)dedx->data,
(DTYPE*)y->data,
......@@ -421,7 +483,7 @@ void _CudaLogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
GDevs.GetCudaThread(x->devID, gold->unitNumNonZero, cudaGridSize, cudaBlockSize);
/* dE/ds_j += -gold_j */
KernelLogSoftmaxBackwardDEDSSparseByRow <<<dim3(cudaGridSize[0]), dim3(cudaBlockSize[0]) >>>
KernelLogSoftmaxBackwardDEDSSparseByRow <DTYPE> <<< dim3(cudaGridSize[0]), dim3(cudaBlockSize[0]) >>>
(NULL,
(DTYPE*)dedx->data,
(char*)gold->data + sizeof(int),
......@@ -436,7 +498,7 @@ void _CudaLogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
GDevs.GetCudaThread(x->devID, blockSize, cudaGridSize, cudaBlockSize);
/* dE/ds_j = -gold_j + exp(y_j) */
KernelLogSoftmaxBackwardDEDS <<<dim3(cudaGridSize[0]), dim3(cudaBlockSize[0]) >>>
KernelLogSoftmaxBackwardDEDS <DTYPE, X_FLOAT> <<< dim3(cudaGridSize[0]), dim3(cudaBlockSize[0]) >>>
(NULL,
(DTYPE*)dedx->data + k * blockSize,
(DTYPE*)gold->data + k * blockSize,
......@@ -470,6 +532,76 @@ void _CudaLogSoftmaxBackward(XTensor * gold, XTensor * y, XTensor * x,
ShowNTErrors("TODO!");
}
}
else if (x->dataType == X_FLOAT16 && y->dataType == X_FLOAT16) {
int cudaGridSize[3], cudaBlockSize[3];
if (lossName == CROSSENTROPY || lossName == SQUAREDERROR) {
if (gold->isSparse) {
CheckNTErrors((gold->order == 2), "TODO!")
CheckNTErrors((leadDim == 0), "TODO!");
GDevs.GetCudaThread(x->devID, x->unitNum, cudaGridSize, cudaBlockSize);
/* dE/ds_j = exp(y_j) */
KernelExpLoss <__half> <<< dim3(cudaGridSize[0]), dim3(cudaBlockSize[0]) >>>
(NULL,
(__half*)dedx->data,
(__half*)y->data,
dimensionSize * stride,
lossName);
GDevs.GetCudaThread(x->devID, gold->unitNumNonZero, cudaGridSize, cudaBlockSize);
/* dE/ds_j += -gold_j */
KernelLogSoftmaxBackwardDEDSSparseByRow <__half> <<< dim3(cudaGridSize[0]), dim3(cudaBlockSize[0]) >>>
(NULL,
(__half*)dedx->data,
(char*)gold->data + sizeof(int),
(__half*)y->data,
(__half*)x->data,
dedx->dimSize[0], dedx->dimSize[1], gold->unitNumNonZero, lossName);
}
else {
CheckNTErrors((XTensor::IsSameShaped(gold, y)), "The tensors must be of the same size!");
for (int k = 0; k < blockNum; k++) {
GDevs.GetCudaThread(x->devID, blockSize, cudaGridSize, cudaBlockSize);
/* dE/ds_j = -gold_j + exp(y_j) */
KernelLogSoftmaxBackwardDEDS <__half, X_FLOAT16> <<< dim3(cudaGridSize[0]), dim3(cudaBlockSize[0]) >>>
(NULL,
(__half*)dedx->data + k * blockSize,
(__half*)gold->data + k * blockSize,
(__half*)y->data + k * blockSize,
(__half*)x->data + k * blockSize,
dimensionSize * stride, lossName);
}
}
if (padding != NULL) {
int n = leadDim;
int paddingOrder = padding->order;
int * paddingDims = new int[paddingOrder];
memcpy(paddingDims, padding->dimSize, padding->order * sizeof(int));
padding->Reshape(padding->unitNum);
int order = dedx->order;
int * dims = new int[order];
memcpy(dims, dedx->dimSize, dedx->order * sizeof(int));
dedx->Reshape(dedx->unitNum / dedx->GetDim(n), dedx->GetDim(n));
_MultiplyDimMe(dedx, padding, 0);
padding->Reshape(paddingOrder, paddingDims);
dedx->Reshape(order, dims);
delete[] paddingDims;
delete[] dims;
}
}
else {
ShowNTErrors("TODO!");
}
}
else{
ShowNTErrors("TODO!");
}
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论