TSigmoid.cpp 6.24 KB
Newer Older
xiaotong committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*   http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-19
*/

#include "../XUtility.h"
#include "TSigmoid.h"

namespace nts { // namespace nts(NiuTrans.Tensor)
liyinqiao committed
26 27

/* 
28
case 1: test Sigmoid function.
liyinqiao committed
29
sigmoid function: y = 1/(1+exp(-x))
xiaotong committed
30 31 32 33
*/
bool TestSigmoid1()
{
    /* a input tensor of size (3) */
34 35 36
    int order = 1;
    int * dimSize = new int[order];
    dimSize[0] = 3;
xiaotong committed
37

38 39 40
    int unitNum = 1;
    for (int i = 0; i < order; i++)
        unitNum *= dimSize[i];
xiaotong committed
41 42

    DTYPE xData[3] = {0.0F, 1.0F, 2.0F};
43
    DTYPE answer[3] = {0.5F, 0.7311F, 0.8808F};
xiaotong committed
44 45 46 47 48

    /* CPU test */
    bool cpuTest = true;

    /* create tensors */
49 50
    XTensor * x = NewTensor(order, dimSize);
    XTensor * y = NewTensor(order, dimSize);
51
    XTensor yUser;
xiaotong committed
52 53

    /* initialize variables */
54
    x->SetData(xData, unitNum);
xiaotong committed
55 56 57
    y->SetZeroAll();

    /* call Sigmoid function */
58
    _Sigmoid(x, y);
59
    yUser = Sigmoid(*x);
xiaotong committed
60 61

    /* check result */
62
	cpuTest = y->CheckData(answer, unitNum, 1e-4F) && yUser.CheckData(answer, unitNum, 1e-4F);
xiaotong committed
63 64 65 66 67 68

#ifdef USE_CUDA
    /* GPU test */
    bool gpuTest = true;

        /* create tensors */
69 70
    XTensor * xGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * yGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
71
    XTensor yUserGPU;
xiaotong committed
72 73

    /* initialize variables */
74
    xGPU->SetData(xData, unitNum);
xiaotong committed
75 76 77
    yGPU->SetZeroAll();

    /* call Sigmoid function */
78
    _Sigmoid(xGPU, yGPU);
79
    yUserGPU = Sigmoid(*xGPU);
xiaotong committed
80 81

    /* check result */
82
	gpuTest = yGPU->CheckData(answer, unitNum, 1e-4F) && yUserGPU.CheckData(answer, unitNum, 1e-4F);
xiaotong committed
83 84

    /* destroy variables */
liyinqiao committed
85 86 87 88
    delete x;
    delete y;
    delete xGPU;
    delete yGPU;
89
    delete[] dimSize;
xiaotong committed
90 91 92 93

    return cpuTest && gpuTest;
#else
    /* destroy variables */
liyinqiao committed
94 95
    delete x;
    delete y;
96
    delete[] dimSize;
xiaotong committed
97 98 99 100 101

    return cpuTest;
#endif // USE_CUDA
}

liyinqiao committed
102 103 104
/* 
case 2: test Sigmoid function and SigmoidBackward function.
sigmoid function: y = 1/(1+exp(-x))
105 106 107 108
backward computation: 
dE/ds = dE/dy * dy/dx
dy/dx = y * (1 -y)
In this case, LossName=CROSSENTROPY.
xiaotong committed
109 110 111 112
*/
bool TestSigmoid2()
{
    /* a input tensor of size (3) */
113 114 115
    int order = 1;
    int * dimSize = new int[order];
    dimSize[0] = 3;
xiaotong committed
116

117 118 119
    int unitNum = 1;
    for (int i = 0; i < order; i++)
        unitNum *= dimSize[i];
xiaotong committed
120 121 122

    DTYPE xData[3] = {0.0F, 1.0F, 2.0F};
    DTYPE gData[3] = {0.4F, 0.8F, 1.0F};
123 124 125
    DTYPE yAnswer[3] = {0.5F, 0.7311F, 0.8808F};
    DTYPE dedyAnswer[3] = {-0.8F, -1.0943F, -1.1353F};
    DTYPE dedxAnswer[3] = {-0.2F, -0.2151F, -0.1192F};
xiaotong committed
126 127 128 129 130

    /* CPU test */
    bool cpuTest = true;

    /* create tensors */
131 132 133 134 135
    XTensor * x = NewTensor(order, dimSize);
    XTensor * y = NewTensor(order, dimSize);
    XTensor * g = NewTensor(order, dimSize);
    XTensor * dedy = NewTensor(order, dimSize);
    XTensor * dedx = NewTensor(order, dimSize);
xiaotong committed
136 137

    /* initialize variables */
138 139
    x->SetData(xData, unitNum);
    g->SetData(gData, unitNum);
xiaotong committed
140
    y->SetZeroAll();
141
    dedy->SetZeroAll();
xiaotong committed
142 143 144
    dedx->SetZeroAll();

    /* call Sigmoid function */
145
    _Sigmoid(x, y);
xiaotong committed
146 147

    /* call SigmoidBackward function */
148
    _SigmoidBackward(g, y, x, dedy, dedx, CROSSENTROPY);
xiaotong committed
149 150
    
    /* check result */
151 152 153
    cpuTest = y->CheckData(yAnswer, unitNum, 1e-4F)
              && dedx->CheckData(dedxAnswer, unitNum, 1e-4F)
              && dedy->CheckData(dedyAnswer, unitNum, 1e-4F);
xiaotong committed
154 155 156 157 158 159

#ifdef USE_CUDA
    /* GPU test */
    bool gpuTest = true;

        /* create tensors */
160 161 162 163 164
    XTensor * xGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * yGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * gGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * dedyGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * dedxGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
xiaotong committed
165 166

    /* initialize variables */
167 168
    xGPU->SetData(xData, unitNum);
    gGPU->SetData(gData, unitNum);
xiaotong committed
169
    yGPU->SetZeroAll();
170
    dedyGPU->SetZeroAll();
xiaotong committed
171 172 173
    dedxGPU->SetZeroAll();

    /* call Sigmoid function */
174
    _Sigmoid(xGPU, yGPU);
xiaotong committed
175 176

    /* call SigmoidBackward function */
177
    _SigmoidBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, CROSSENTROPY);
xiaotong committed
178 179
    
    /* check result */
180 181 182
    gpuTest = yGPU->CheckData(yAnswer, unitNum, 1e-4F)
              && dedxGPU->CheckData(dedxAnswer, unitNum, 1e-4F)
              && dedyGPU->CheckData(dedyAnswer, unitNum, 1e-4F);
xiaotong committed
183
    /* destroy variables */
liyinqiao committed
184 185 186 187 188 189 190 191 192 193
    delete x;
    delete y;
    delete g;
    delete dedx;
    delete dedy;
    delete xGPU;
    delete yGPU;
    delete gGPU;
    delete dedxGPU;
    delete dedyGPU;
194
    delete[] dimSize;
xiaotong committed
195 196 197 198

    return cpuTest && gpuTest;
#else
    /* destroy variables */
liyinqiao committed
199 200 201 202 203
    delete x;
    delete y;
    delete g;
    delete dedx;
    delete dedy;
204
    delete[] dimSize;
xiaotong committed
205 206 207 208 209 210 211 212 213 214 215 216 217

    return cpuTest;
#endif // USE_CUDA
}

/* other cases */
/*
    TODO!!
*/

/* test for Sigmoid Function */
bool TestSigmoid()
{
liyinqiao committed
218
    XPRINT(0, stdout, "[TEST SIGMOID] sigmoid function and its backward computation \n");
xiaotong committed
219 220 221 222 223 224 225 226 227 228 229
    bool returnFlag = true, caseFlag = true;

    /* case 1 test */
    caseFlag = TestSigmoid1();

    if (!caseFlag) {
        returnFlag = false;
        XPRINT(0, stdout, ">> case 1 failed!\n");
    }
    else
        XPRINT(0, stdout, ">> case 1 passed!\n");
230 231 232 233 234 235 236 237 238 239
    
    /* case 2 test */
    caseFlag = TestSigmoid2();

    if (!caseFlag) {
        returnFlag = false;
        XPRINT(0, stdout, ">> case 2 failed!\n");
    }
    else
        XPRINT(0, stdout, ">> case 2 passed!\n");
xiaotong committed
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257

    /* other cases test */
    /*
    TODO!!
    */

    if (returnFlag) {
        XPRINT(0, stdout, ">> All Passed!\n");
    }
    else
        XPRINT(0, stdout, ">> Failed!\n");

    XPRINT(0, stdout, "\n");

    return returnFlag;
}

} // namespace nts(NiuTrans.Tensor)