TSigmoid.cpp 5.93 KB
Newer Older
xiaotong committed
1
/* NiuTrans.Tensor - an open-source tensor library
2
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
xiaotong committed
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*   http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-19
*/

#include "../XUtility.h"
liyinqiao committed
23
#include "../core/utilities/CheckData.h"
xiaotong committed
24 25 26
#include "TSigmoid.h"

namespace nts { // namespace nts(NiuTrans.Tensor)
liyinqiao committed
27 28

/* 
29
case 1: test Sigmoid function.
liyinqiao committed
30
sigmoid function: y = 1/(1+exp(-x))
xiaotong committed
31 32 33 34
*/
bool TestSigmoid1()
{
    /* a input tensor of size (3) */
35 36 37
    int order = 1;
    int * dimSize = new int[order];
    dimSize[0] = 3;
xiaotong committed
38

39 40 41
    int unitNum = 1;
    for (int i = 0; i < order; i++)
        unitNum *= dimSize[i];
xiaotong committed
42 43

    DTYPE xData[3] = {0.0F, 1.0F, 2.0F};
44
    DTYPE answer[3] = {0.5F, 0.7311F, 0.8808F};
xiaotong committed
45 46 47 48 49

    /* CPU test */
    bool cpuTest = true;

    /* create tensors */
50 51
    XTensor * x = NewTensorV2(order, dimSize);
    XTensor * y = NewTensorV2(order, dimSize);
52
    XTensor yUser;
xiaotong committed
53 54

    /* initialize variables */
55
    x->SetData(xData, unitNum);
xiaotong committed
56 57 58
    y->SetZeroAll();

    /* call Sigmoid function */
59
    _Sigmoid(x, y);
60
    yUser = Sigmoid(*x);
xiaotong committed
61 62

    /* check result */
liyinqiao committed
63 64
	cpuTest = _CheckData(y, answer, unitNum, 1e-4F) &&
              _CheckData(&yUser, answer, unitNum, 1e-4F);
xiaotong committed
65 66 67 68 69 70

#ifdef USE_CUDA
    /* GPU test */
    bool gpuTest = true;

        /* create tensors */
71 72
    XTensor * xGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * yGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
73
    XTensor yUserGPU;
xiaotong committed
74 75

    /* initialize variables */
76
    xGPU->SetData(xData, unitNum);
xiaotong committed
77 78 79
    yGPU->SetZeroAll();

    /* call Sigmoid function */
80
    _Sigmoid(xGPU, yGPU);
81
    yUserGPU = Sigmoid(*xGPU);
xiaotong committed
82 83

    /* check result */
liyinqiao committed
84 85
	gpuTest = _CheckData(yGPU, answer, unitNum, 1e-4F) &&
              _CheckData(&yUserGPU, answer, unitNum, 1e-4F);
xiaotong committed
86 87

    /* destroy variables */
liyinqiao committed
88 89 90 91
    delete x;
    delete y;
    delete xGPU;
    delete yGPU;
92
    delete[] dimSize;
xiaotong committed
93 94 95 96

    return cpuTest && gpuTest;
#else
    /* destroy variables */
liyinqiao committed
97 98
    delete x;
    delete y;
99
    delete[] dimSize;
xiaotong committed
100 101 102 103 104

    return cpuTest;
#endif // USE_CUDA
}

liyinqiao committed
105 106 107
/* 
case 2: test Sigmoid function and SigmoidBackward function.
sigmoid function: y = 1/(1+exp(-x))
108 109
backward computation: 
dE/ds = dE/dy * dy/dx
liyinqiao committed
110
dy/dx = y * (1 - y)
111
In this case, LossName=CROSSENTROPY.
xiaotong committed
112 113 114 115
*/
bool TestSigmoid2()
{
    /* a input tensor of size (3) */
116 117 118
    int order = 1;
    int * dimSize = new int[order];
    dimSize[0] = 3;
xiaotong committed
119

120 121 122
    int unitNum = 1;
    for (int i = 0; i < order; i++)
        unitNum *= dimSize[i];
xiaotong committed
123 124

    DTYPE xData[3] = {0.0F, 1.0F, 2.0F};
125
    DTYPE yAnswer[3] = {0.5F, 0.7311F, 0.8808F};
liyinqiao committed
126 127
    DTYPE dedyData[3] = {0.0F, 1.0F, 2.0F};
    DTYPE dedxAnswer[3] = {0.0F, 0.1966F, 0.2100F};
xiaotong committed
128 129 130 131 132

    /* CPU test */
    bool cpuTest = true;

    /* create tensors */
133 134 135 136
    XTensor * x = NewTensorV2(order, dimSize);
    XTensor * y = NewTensorV2(order, dimSize);
    XTensor * dedy = NewTensorV2(order, dimSize);
    XTensor * dedx = NewTensorV2(order, dimSize);
xiaotong committed
137 138

    /* initialize variables */
139
    x->SetData(xData, unitNum);
xiaotong committed
140 141
    y->SetZeroAll();
    dedx->SetZeroAll();
liyinqiao committed
142
    dedy->SetData(dedyData, unitNum);
xiaotong committed
143 144

    /* call Sigmoid function */
145
    _Sigmoid(x, y);
xiaotong committed
146 147

    /* call SigmoidBackward function */
liyinqiao committed
148 149
    _SigmoidBackward(y, x, dedy, dedx);

xiaotong committed
150
    /* check result */
liyinqiao committed
151 152
    cpuTest = _CheckData(y, yAnswer, unitNum, 1e-4F) &&
              _CheckData(dedx, dedxAnswer, unitNum, 1e-4F);
xiaotong committed
153 154 155 156 157

#ifdef USE_CUDA
    /* GPU test */
    bool gpuTest = true;

liyinqiao committed
158
    /* create tensors */
159 160 161 162
    XTensor * xGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * yGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * dedyGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * dedxGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
xiaotong committed
163 164

    /* initialize variables */
165
    xGPU->SetData(xData, unitNum);
xiaotong committed
166 167
    yGPU->SetZeroAll();
    dedxGPU->SetZeroAll();
liyinqiao committed
168
    dedyGPU->SetData(dedyData, unitNum);
xiaotong committed
169 170

    /* call Sigmoid function */
171
    _Sigmoid(xGPU, yGPU);
xiaotong committed
172 173

    /* call SigmoidBackward function */
liyinqiao committed
174
    _SigmoidBackward(yGPU, xGPU, dedyGPU, dedxGPU);
xiaotong committed
175 176
    
    /* check result */
liyinqiao committed
177 178
    gpuTest = _CheckData(yGPU, yAnswer, unitNum, 1e-4F) &&
              _CheckData(dedxGPU, dedxAnswer, unitNum, 1e-4F);
liyinqiao committed
179

xiaotong committed
180
    /* destroy variables */
liyinqiao committed
181 182 183 184 185 186 187 188
    delete x;
    delete y;
    delete dedx;
    delete dedy;
    delete xGPU;
    delete yGPU;
    delete dedxGPU;
    delete dedyGPU;
189
    delete[] dimSize;
xiaotong committed
190 191 192 193

    return cpuTest && gpuTest;
#else
    /* destroy variables */
liyinqiao committed
194 195 196 197
    delete x;
    delete y;
    delete dedx;
    delete dedy;
198
    delete[] dimSize;
xiaotong committed
199 200 201 202 203 204 205 206 207 208 209 210 211

    return cpuTest;
#endif // USE_CUDA
}

/* other cases */
/*
    TODO!!
*/

/* test for Sigmoid Function */
bool TestSigmoid()
{
liyinqiao committed
212
    XPRINT(0, stdout, "[TEST SIGMOID] sigmoid function and its backward computation \n");
xiaotong committed
213 214 215 216 217 218 219 220 221 222 223
    bool returnFlag = true, caseFlag = true;

    /* case 1 test */
    caseFlag = TestSigmoid1();

    if (!caseFlag) {
        returnFlag = false;
        XPRINT(0, stdout, ">> case 1 failed!\n");
    }
    else
        XPRINT(0, stdout, ">> case 1 passed!\n");
224 225 226 227 228 229 230 231 232 233
    
    /* case 2 test */
    caseFlag = TestSigmoid2();

    if (!caseFlag) {
        returnFlag = false;
        XPRINT(0, stdout, ">> case 2 failed!\n");
    }
    else
        XPRINT(0, stdout, ">> case 2 passed!\n");
xiaotong committed
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251

    /* other cases test */
    /*
    TODO!!
    */

    if (returnFlag) {
        XPRINT(0, stdout, ">> All Passed!\n");
    }
    else
        XPRINT(0, stdout, ">> Failed!\n");

    XPRINT(0, stdout, "\n");

    return returnFlag;
}

} // namespace nts(NiuTrans.Tensor)