Unary.cu 4.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*   http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-31
*/

22 23 24
#include <math.h>
#include "../../XDevice.h"
#include "../../XName.h"
25
#include "Unary.h"
26 27 28 29
#include "Unary.cuh"

namespace nts {

30 31 32 33 34 35 36 37 38 39 40 41
__device__
DTYPE CudaSquare(DTYPE x)
{
    return x * x;
}

__device__
DTYPE CudaRound(DTYPE r)
{
	return (r > 0.0) ? (DTYPE)floor(r + 0.5) : (DTYPE)ceil(r - 0.5);
}

42 43 44 45 46 47 48 49 50 51
#define SIMPLE_UNARY_FUNCTION_GPU(funcName, origFunc)                       \
__global__                                                                  \
void Kernel##funcName(DTYPE * a, DTYPE * b, int size)                       \
{                                                                           \
    int i = blockDim.x * blockIdx.x + threadIdx.x;                          \
                                                                            \
    if (i < size)                                                           \
        b[i] = (DTYPE)origFunc(a[i]);                                       \
}                                                                           \
__global__                                                                  \
52
void Kernel##funcName(__half * a, __half * b, int size)                     \
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
{                                                                           \
    return;                                                                 \
}                                                                           \
void _Cuda##funcName(const XTensor * a, XTensor * b)                        \
{                                                                           \
    CheckNTErrors((XTensor::IsSameShaped(a, b)),                            \
                  "Input tensors should have the same type!");              \
    CheckNTErrors((a->isSparse == false), "TODO!");                         \
                                                                            \
    int gridSize[3];                                                        \
    int blockSize[3];                                                       \
                                                                            \
    GDevs.GetCudaThread(a->devID, a->unitNum, gridSize, blockSize);         \
                                                                            \
    dim3 blocks(gridSize[0]);                                               \
    dim3 threads(blockSize[0]);                                             \
                                                                            \
    int devIDBackup;                                                        \
    ProtectCudaDev(a->devID, devIDBackup);                                  \
                                                                            \
    if (a->dataType == DEFAULT_DTYPE) {                                     \
74 75
        Kernel##funcName<<<blocks, threads>>>                               \
                         ((DTYPE*)a->data, (DTYPE*)b->data, a->unitNum);    \
76 77
    }                                                                       \
    else if (a->dataType == X_FLOAT16) {                                    \
78 79
        Kernel##funcName<<<blocks, threads>>>                               \
                         ((__half*)a->data, (__half*)b->data, a->unitNum);  \
80 81 82 83 84 85 86
    }                                                                       \
    else {                                                                  \
        ShowNTErrors("TODO!");                                              \
    }                                                                       \
                                                                            \
    BacktoCudaDev(a->devID, devIDBackup);                                   \
}                                                                           \
87 88

SIMPLE_UNARY_FUNCTION_GPU(Absolute, fabs)
89
SIMPLE_UNARY_FUNCTION_GPU(Ceil, ceil)
90
SIMPLE_UNARY_FUNCTION_GPU(Exp, exp)
91
SIMPLE_UNARY_FUNCTION_GPU(Floor, floor)
92
SIMPLE_UNARY_FUNCTION_GPU(Log, log)
93 94 95 96
SIMPLE_UNARY_FUNCTION_GPU(Round, CudaRound)
SIMPLE_UNARY_FUNCTION_GPU(Sqrt, sqrt)
SIMPLE_UNARY_FUNCTION_GPU(Square, CudaSquare)

97 98 99 100 101
SIMPLE_UNARY_FUNCTION_GPU(Sin, sin)
SIMPLE_UNARY_FUNCTION_GPU(Cos, cos)
SIMPLE_UNARY_FUNCTION_GPU(Tan, tan)

}