TLoss.cpp 7.73 KB
Newer Older
xiaotong committed
1
/* NiuTrans.Tensor - an open-source tensor library
liyinqiao committed
2
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
xiaotong committed
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*   http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/*
* $Created by: LI Yinqiao (email: li.yin.qiao.2012@hotmail.com) 2018-04-30
*/

liyinqiao committed
22
#include<math.h>
23
#include "../core/math/ScaleAndShift.h"
24
#include "TLoss.h"
xiaotong committed
25

liyinqiao committed
26
namespace nts { // namespace nts(NiuTrans.Tensor)
liyinqiao committed
27 28

/* 
29
case 1: test LossCompute function.
liyinqiao committed
30 31
In this case, Loss function name = SQUAREDERROR.
loss = sum_{i} 0.5*(t_i - y_i)^2, 
32
where t_i is the gold standard and y_i is the model output.
liyinqiao committed
33
*/
xiaotong committed
34 35
bool TestLoss1()
{
liyinqiao committed
36 37 38 39 40 41 42 43 44 45 46 47 48 49
    /* a tensor of size (10, 1) */
    int order = 2;
    int * dimSize = new int[order];
    dimSize[0] = 10;
    dimSize[1] = 1;

    int unitNum = 1;
    for (int i = 0; i < order; i++)
        unitNum *= dimSize[i];

    /* CPU test */
    bool cpuTest = true;

    DTYPE answer = 5.0F;
50
    DTYPE error;
liyinqiao committed
51 52

    /* create tensors */
liyinqiao committed
53 54
    XTensor * output = NewTensorV2(order, dimSize);
    XTensor * gold = NewTensorV2(order, dimSize);
liyinqiao committed
55 56 57 58

    /* initialize variables */
    output->SetZeroAll();
    gold->SetZeroAll();
59 60
    _ScaleAndShiftMe(output, 1, 1);
    _ScaleAndShiftMe(gold, 1, 2);
liyinqiao committed
61

62 63
    /* call LossCompute function */
    error = _LossCompute(gold, output, SQUAREDERROR, false, 0, 0, dimSize[0], 0);
liyinqiao committed
64 65
    
    /* check results */
liyinqiao committed
66
    cpuTest = (fabs(error - answer) < 1e-4);
liyinqiao committed
67 68 69 70 71 72
    
#ifdef USE_CUDA
    /* GPU test */
    bool gpuTest = true;

    /* create tensor */
liyinqiao committed
73 74
    XTensor * outputGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * goldGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
liyinqiao committed
75 76 77 78

    /* Initialize variables */
    outputGPU->SetZeroAll();
    goldGPU->SetZeroAll();
79 80
    _ScaleAndShiftMe(outputGPU, 1, 1);
    _ScaleAndShiftMe(goldGPU, 1, 2);
liyinqiao committed
81 82

    /* call LossCompute function */
83
    error = _LossCompute(goldGPU, outputGPU, SQUAREDERROR, false, 0, 0, dimSize[0], 0);
liyinqiao committed
84 85
    
    /* check results */
liyinqiao committed
86
    gpuTest = (fabs(error - answer) < 1e-4);
liyinqiao committed
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105

    /* destroy variables */
    delete output;
    delete gold;
    delete outputGPU;
    delete goldGPU;
    delete[] dimSize;

    return cpuTest && gpuTest;
#else
    /* destroy variables */
    delete output;
    delete gold;
    delete[] dimSize;

    return cpuTest;
#endif // USE_CUDA
}

liyinqiao committed
106
/* 
107
case 2: test LossCompute function.
liyinqiao committed
108 109
In this case, Loss function name = CROSSENTROPY.
loss = sum_{i} (-t_i * log(y_i))
110
where t_i is the gold standard and y_i is the model output.
liyinqiao committed
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
*/
bool TestLoss2()
{
    /* a tensor of size (10, 1) */
    int order = 2;
    int * dimSize = new int[order];
    dimSize[0] = 10;
    dimSize[1] = 1;

    int unitNum = 1;
    for (int i = 0; i < order; i++)
        unitNum *= dimSize[i];

    /* CPU test */
    bool cpuTest = true;

    DTYPE answer = 0.0F;
128
    DTYPE error;
liyinqiao committed
129 130

    /* create tensors */
liyinqiao committed
131 132
    XTensor * output = NewTensorV2(order, dimSize);
    XTensor * gold = NewTensorV2(order, dimSize);
liyinqiao committed
133 134 135 136

    /* initialize variables */
    output->SetZeroAll();
    gold->SetZeroAll();
137 138
    _ScaleAndShiftMe(output, 1, 1);
    _ScaleAndShiftMe(gold, 1, 2);
liyinqiao committed
139

140 141
    /* call LossCompute function */
    error = _LossCompute(gold, output, CROSSENTROPY, false, 0, 0, dimSize[0], 0);
liyinqiao committed
142 143
    
    /* check results */
liyinqiao committed
144
    cpuTest = (fabs(error - answer) < 1e-4);
liyinqiao committed
145 146 147 148 149 150

#ifdef USE_CUDA
    /* GPU test */
    bool gpuTest = true;

    /* create tensor */
liyinqiao committed
151 152
    XTensor * outputGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * goldGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
liyinqiao committed
153 154 155 156

    /* Initialize variables */
    outputGPU->SetZeroAll();
    goldGPU->SetZeroAll();
157 158
    _ScaleAndShiftMe(outputGPU, 1, 1);
    _ScaleAndShiftMe(goldGPU, 1, 2);
liyinqiao committed
159 160

    /* call LossCompute function */
161
    error = _LossCompute(goldGPU, outputGPU, CROSSENTROPY, false, 0, 0, dimSize[0], 0);
liyinqiao committed
162 163
    
    /* check results */
liyinqiao committed
164
    gpuTest = (fabs(error - answer) < 1e-4);
liyinqiao committed
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183

    /* destroy variables */
    delete output;
    delete gold;
    delete outputGPU;
    delete goldGPU;
    delete[] dimSize;

    return cpuTest && gpuTest;
#else
    /* destroy variables */
    delete output;
    delete gold;
    delete[] dimSize;

    return cpuTest;
#endif // USE_CUDA
}

liyinqiao committed
184
/* 
185
case 3: test LossCompute function.
liyinqiao committed
186 187
In this case, Loss function name = ONEHOTERROR.
loss = sum_{i} e_i
188
where e_i = 0.5*(t_i - y_i)^2 if t_i = 1, e_i = 0 otherwise.
liyinqiao committed
189 190 191 192
*/
bool TestLoss3()
{
    /* a tensor of size (10, 1) */
xiaotong committed
193 194
    int order = 2;
    int * dimSize = new int[order];
liyinqiao committed
195
    dimSize[0] = 5;
xiaotong committed
196 197 198 199 200
    dimSize[1] = 1;

    int unitNum = 1;
    for (int i = 0; i < order; i++)
        unitNum *= dimSize[i];
liyinqiao committed
201 202 203 204 205 206 207 208 209 210
    DTYPE outputData[5][1] = { {0.5F},
                               {0.5F},
                               {0.5F},
                               {0.5F},
                               {0.5F} };
    DTYPE goldData[5][1] = { {1.0F},
                             {1.0F},
                             {0.0F},
                             {0.0F},
                             {0.0F} };
xiaotong committed
211 212 213 214

    /* CPU test */
    bool cpuTest = true;

liyinqiao committed
215
    DTYPE answer = 0.25F;
216
    DTYPE error;
liyinqiao committed
217

xiaotong committed
218
    /* create tensors */
liyinqiao committed
219 220
    XTensor * output = NewTensorV2(order, dimSize);
    XTensor * gold = NewTensorV2(order, dimSize);
xiaotong committed
221 222

    /* initialize variables */
liyinqiao committed
223 224
    output->SetData(outputData, unitNum);
    gold->SetData(goldData, unitNum);
xiaotong committed
225

226 227
    /* call LossCompute function */
    error = _LossCompute(gold, output, ONEHOTERROR, false, 0, 0, dimSize[0], 0);
liyinqiao committed
228
    
xiaotong committed
229
    /* check results */
liyinqiao committed
230
    cpuTest = (fabs(error - answer) < 1e-4);
xiaotong committed
231 232 233 234 235 236

#ifdef USE_CUDA
    /* GPU test */
    bool gpuTest = true;

    /* create tensor */
liyinqiao committed
237 238
    XTensor * outputGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * goldGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
xiaotong committed
239 240

    /* Initialize variables */
liyinqiao committed
241 242
    outputGPU->SetData(outputData, unitNum);
    goldGPU->SetData(goldData, unitNum);
xiaotong committed
243

liyinqiao committed
244
    /* call LossCompute function */
245
    error = _LossCompute(goldGPU, outputGPU, ONEHOTERROR, false, 0, 0, dimSize[0], 0);
liyinqiao committed
246
    
xiaotong committed
247
    /* check results */
liyinqiao committed
248
    gpuTest = (fabs(error - answer) < 1e-4);
xiaotong committed
249 250

    /* destroy variables */
liyinqiao committed
251 252 253 254
    delete output;
    delete gold;
    delete outputGPU;
    delete goldGPU;
xiaotong committed
255
    delete[] dimSize;
liyinqiao committed
256

xiaotong committed
257 258 259
    return cpuTest && gpuTest;
#else
    /* destroy variables */
liyinqiao committed
260 261 262 263
    delete output;
    delete gold;
    delete[] dimSize;

xiaotong committed
264 265 266 267 268 269 270 271 272
    return cpuTest;
#endif // USE_CUDA
}

/* other cases */
/*
TODO!!
*/

liyinqiao committed
273 274
/* test for Loss Function */
bool TestLoss()
xiaotong committed
275
{
liyinqiao committed
276
    XPRINT(0, stdout, "[TEST Loss] compute the loss \n");
xiaotong committed
277 278 279 280 281 282 283 284 285 286 287
    bool returnFlag = true, caseFlag = true;

    /* case 1 test */
    caseFlag = TestLoss1();
    if (!caseFlag) {
        returnFlag = false;
        XPRINT(0, stdout, ">> case 1 failed!\n");
    }
    else
        XPRINT(0, stdout, ">> case 1 passed!\n");

liyinqiao committed
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
    /* case 2 test */
    caseFlag = TestLoss2();
    if (!caseFlag) {
        returnFlag = false;
        XPRINT(0, stdout, ">> case 2 failed!\n");
    }
    else
        XPRINT(0, stdout, ">> case 2 passed!\n");
        
    caseFlag = TestLoss3();
    if (!caseFlag) {
        returnFlag = false;
        XPRINT(0, stdout, ">> case 3 failed!\n");
    }
    else
        XPRINT(0, stdout, ">> case 3 passed!\n");

xiaotong committed
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
    ///* other cases test */
    ///*
    //TODO!!
    //*/

    if (returnFlag) {
        XPRINT(0, stdout, ">> All Passed!\n");
    }
    else
        XPRINT(0, stdout, ">> Failed!\n");

    XPRINT(0, stdout, "\n");

    return returnFlag;
}

liyinqiao committed
321
} // namespace nts(NiuTrans.Tensor)