TSoftmax.cpp 6.36 KB
Newer Older
xiaotong committed
1
/* NiuTrans.Tensor - an open-source tensor library
2
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
xiaotong committed
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*   http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-19
*/

#include "../XTensor.h"
#include "../XUtility.h"
liyinqiao committed
24
#include "../core/utilities/CheckData.h"
xiaotong committed
25 26 27
#include "TSoftmax.h"

namespace nts { // namespace nts(NiuTrans.Tensor)
liyinqiao committed
28 29 30 31

/* 
case 1: test Softmax function.
softmax function: y = e^x / \sum_{i} e^{x_i}
xiaotong committed
32 33 34
*/
bool TestSoftmax1()
{
35 36 37 38 39
    /* a tensor of size (2, 3) */
    int order = 2;
    int * dimSize = new int[order];
    dimSize[0] = 2;
    dimSize[1] = 3;
xiaotong committed
40

41 42 43
    int unitNum = 1;
    for (int i = 0; i < order; i++)
        unitNum *= dimSize[i];
xiaotong committed
44 45 46

    DTYPE xData[2][3] = { {0.0F, 1.0F, 2.0F}, 
                          {0.5F, 0.7F, 1.4F} };
47 48
    DTYPE answer[2][3] = { {0.0900F, 0.2447F, 0.6652F}, 
                           {0.2136F, 0.2609F, 0.5254F} };
xiaotong committed
49 50 51 52 53

    /* CPU test */
    bool cpuTest = true;

    /* create tensors */
54 55
    XTensor * x = NewTensorV2(order, dimSize);
    XTensor * y = NewTensorV2(order, dimSize);
56
    XTensor yUser;
xiaotong committed
57 58

    /* initialize variables */
59
    x->SetData(xData, unitNum);
xiaotong committed
60 61 62
    y->SetZeroAll();

    /* call Softmax function */
63
    _Softmax(x, y, 1);
64
    yUser = Softmax(*x, 1);
xiaotong committed
65 66
    
    /* check result */
liyinqiao committed
67 68
    cpuTest = _CheckData(y, answer, unitNum, 1e-4F) &&
              _CheckData(&yUser, answer, unitNum, 1e-4F);
xiaotong committed
69 70 71 72 73 74

#ifdef USE_CUDA
    /* GPU test */
    bool gpuTest = true;

    /* create tensors */
75 76
    XTensor * xGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * yGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
77
    XTensor yUserGPU;
xiaotong committed
78 79

    /* initialize variables */
80
    xGPU->SetData(xData, unitNum);
xiaotong committed
81 82 83
    yGPU->SetZeroAll();

    /* call Softmax function */
84
    _Softmax(xGPU, yGPU, 1);
85
    yUserGPU = Softmax(*xGPU, 1);
xiaotong committed
86 87
    
    /* check result */
liyinqiao committed
88 89
    gpuTest = _CheckData(yGPU, answer, unitNum, 1e-4F) &&
              _CheckData(&yUserGPU, answer, unitNum, 1e-4F);
xiaotong committed
90 91

    /* destroy variables */
liyinqiao committed
92 93 94 95
    delete x;
    delete y;
    delete xGPU;
    delete yGPU;
96
    delete[] dimSize;
xiaotong committed
97 98 99 100

    return cpuTest && gpuTest;
#else
    /* destroy variables */
101 102 103
    delete x;
    delete y;
    delete[] dimSize;
xiaotong committed
104 105 106 107 108

    return cpuTest;
#endif // USE_CUDA
}

liyinqiao committed
109 110 111
/* 
case 2: test SoftmaxBackward function.
SoftmaxBackward function: dE/dx_j = -gold_j + y_j
112
In this case, LossName=CROSSENTROPY.
xiaotong committed
113 114 115
*/
bool TestSoftmax2()
{
liyinqiao committed
116
    /* a input tensor of size (2, 3) */
117 118 119 120
    int order = 2;
    int * dimSize = new int[order];
    dimSize[0] = 1;
    dimSize[1] = 3;
xiaotong committed
121

122 123 124
    int unitNum = 1;
    for (int i = 0; i < order; i++)
        unitNum *= dimSize[i];
xiaotong committed
125 126 127

    DTYPE xData[1][3] = { {0.0F, 1.0F, 2.0F} };
    DTYPE gData[1][3] = { {0.0F, 0.0F, 1.0F} };
128 129
    DTYPE yAnswer[1][3] = { {0.0900F, 0.2447F, 0.6652F} };
    DTYPE dedxAnswer[1][3] = {0.0900F, 0.2447F, -0.3347F};
xiaotong committed
130 131 132 133 134

    /* CPU test */
    bool cpuTest = true;

    /* create tensors */
135 136 137 138 139
    XTensor * x = NewTensorV2(order, dimSize);
    XTensor * y = NewTensorV2(order, dimSize);
    XTensor * g = NewTensorV2(order, dimSize);
    XTensor * dedy = NewTensorV2(order, dimSize);
    XTensor * dedx = NewTensorV2(order, dimSize);
xiaotong committed
140 141

    /* initialize variables */
142 143
    x->SetData(xData, unitNum);
    g->SetData(gData, unitNum);
xiaotong committed
144 145 146 147 148
    y->SetZeroAll();
    dedx->SetZeroAll();
    dedy->SetZeroAll();

    /* call Softmax function */
149
    _Softmax(x, y, 1);
150 151
    
    /* call SoftmaxBackward function */
152
    _SoftmaxBackward(g, y, x, dedy, dedx, NULL, 1, CROSSENTROPY);
xiaotong committed
153 154
    
    /* check result */
liyinqiao committed
155 156
    cpuTest = _CheckData(y, yAnswer, unitNum, 1e-4F) &&
              _CheckData(dedx, dedxAnswer, unitNum, 1e-4F);
xiaotong committed
157 158 159 160 161 162

#ifdef USE_CUDA
    /* GPU test */
    bool gpuTest = true;

        /* create tensors */
163 164 165 166 167
    XTensor * xGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * yGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * gGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * dedyGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * dedxGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
xiaotong committed
168 169

    /* initialize variables */
170 171
    xGPU->SetData(xData, unitNum);
    gGPU->SetData(gData, unitNum);
xiaotong committed
172 173 174 175 176
    yGPU->SetZeroAll();
    dedxGPU->SetZeroAll();
    dedyGPU->SetZeroAll();

    /* call Softmax function */
177
    _Softmax(xGPU, yGPU, 1);
xiaotong committed
178 179

    /* call SoftmaxBackward function */
180
    _SoftmaxBackward(gGPU, yGPU, xGPU, dedyGPU, dedxGPU, NULL, 1, CROSSENTROPY);
xiaotong committed
181 182
    
    /* check result */
liyinqiao committed
183 184
    gpuTest = _CheckData(yGPU, yAnswer, unitNum, 1e-4F) &&
              _CheckData(dedxGPU, dedxAnswer, unitNum, 1e-4F);
xiaotong committed
185 186

    /* destroy variables */
liyinqiao committed
187 188 189 190 191 192 193 194 195 196
    delete x;
    delete y;
    delete g;
    delete dedx;
    delete dedy;
    delete xGPU;
    delete yGPU;
    delete gGPU;
    delete dedxGPU;
    delete dedyGPU;
197
    delete[] dimSize;
xiaotong committed
198 199 200 201

    return cpuTest && gpuTest;
#else
    /* destroy variables */
liyinqiao committed
202 203 204 205 206
    delete x;
    delete y;
    delete g;
    delete dedx;
    delete dedy;
207
    delete[] dimSize;
xiaotong committed
208 209 210 211 212 213 214 215 216 217 218 219 220

    return cpuTest;
#endif // USE_CUDA
}

/* other cases */
/*
    TODO!!
*/

/* test for Softmax Function */
bool TestSoftmax()
{
liyinqiao committed
221
    XPRINT(0, stdout, "[TEST SOFTMAX] softmax function and its backward computation \n");
xiaotong committed
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
    bool returnFlag = true, caseFlag = true;

    /* case 1 test */
    caseFlag = TestSoftmax1();

    if (!caseFlag) {
        returnFlag = false;
        XPRINT(0, stdout, ">> case 1 failed!\n");
    }
    else
        XPRINT(0, stdout, ">> case 1 passed!\n");

    /* case 2 test */
    caseFlag = TestSoftmax2();

    if (!caseFlag) {
        returnFlag = false;
        XPRINT(0, stdout, ">> case 2 failed!\n");
    }
    else
        XPRINT(0, stdout, ">> case 2 passed!\n");

    /* other cases test */
    /*
    TODO!!
    */

    if (returnFlag) {
        XPRINT(0, stdout, ">> All Passed!\n");
    }
    else
        XPRINT(0, stdout, ">> Failed!\n");

    XPRINT(0, stdout, "\n");

    return returnFlag;
}

} // namespace nts(NiuTrans.Tensor)