TReduceSumSquared.cpp 7.38 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*   http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-27
*/

#include "TReduceSumSquared.h"

namespace nts { // namespace nts(NiuTrans.Tensor)
liyinqiao committed
25 26 27 28 29

/* 
case 1: squared sum of the items along a dimension of the tensor. 
For a 1-dimensional data array a, sum = \sum_i (a_i - shift)^2.
In this case, (2, 4) -> (4), dim = 0.
liyinqiao committed
30
*/
31 32
bool TestReduceSumSquared1()
{
liyinqiao committed
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
    /* a input tensor of size (2, 4) */
    int sOrder = 2;
    int * sDimSize = new int[sOrder];
    sDimSize[0] = 2;
    sDimSize[1] = 4;

    int sUnitNum = 1;
    for (int i = 0; i < sOrder; i++)
        sUnitNum *= sDimSize[i];

    /* a output tensor of size (4) */
    int tOrder = 1;
    int * tDimSize = new int[tOrder];
    tDimSize[0] = 4;

    int tUnitNum = 1;
    for (int i = 0; i < tOrder; i++)
        tUnitNum *= tDimSize[i];

    /* a shift tensor of size (4) */
53 54 55 56 57 58 59 60
    int shiftOrder = 1;
    int * shiftDimSize = new int[shiftOrder];
    shiftDimSize[0] = 4;

    int shiftUnitNum = 1;
    for (int i = 0; i < shiftOrder; i++)
        shiftUnitNum *= shiftDimSize[i];

liyinqiao committed
61 62 63 64
    DTYPE sData[2][4] = { {0.0F, 1.0F, 2.0F, 3.0F},
                          {4.0F, 5.0F, 6.0F, 7.0F} };
    DTYPE shiftData[4] = {1.0F, -1.0F, -1.0F, 0.0F};
    DTYPE answer[4] = {10.0F, 40.0F, 58.0F, 58.0F};
65 66 67 68 69

    /* CPU test */
    bool cpuTest = true;

    /* create tensors */
liyinqiao committed
70 71 72
    XTensor * s = NewTensor(sOrder, sDimSize);
    XTensor * t = NewTensor(tOrder, tDimSize);
    XTensor * shift = NewTensor(shiftOrder, shiftDimSize);
73
    XTensor tUser;
liyinqiao committed
74 75 76 77 78 79 80

    /* initialize variables */
    s->SetData(sData, sUnitNum);
    shift->SetData(shiftData, shiftUnitNum);
    t->SetZeroAll();

    /* call ReduceSumSquared function */
81
    _ReduceSumSquared(s, t, 0, shift);
82
    tUser = ReduceSumSquared(*s, 0, *shift);
liyinqiao committed
83 84

    /* check results */
85
    cpuTest = t->CheckData(answer, tUnitNum) && tUser.CheckData(answer, tUnitNum);
liyinqiao committed
86 87 88 89 90 91 92 93 94

#ifdef USE_CUDA
    /* GPU test */
    bool gpuTest = true;

    /* create tensors */
    XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
    XTensor * tGPU = NewTensor(tOrder, tDimSize, X_FLOAT, 1.0F, 0);
    XTensor * shiftGPU = NewTensor(shiftOrder, shiftDimSize, X_FLOAT, 1.0F, 0);
95
    XTensor tUserGPU;
liyinqiao committed
96 97 98 99 100 101 102

    /* initialize variables */
    sGPU->SetData(sData, sUnitNum);
    shiftGPU->SetData(shiftData, shiftUnitNum);
    tGPU->SetZeroAll();

    /* call ReduceSumSquared function */
103
    _ReduceSumSquared(sGPU, tGPU, 0, shiftGPU);
104
    tUserGPU = ReduceSumSquared(*sGPU, 0, *shiftGPU);
liyinqiao committed
105 106

    /* check results */
107
    gpuTest = tGPU->CheckData(answer, tUnitNum) && tUserGPU.CheckData(answer, tUnitNum);
liyinqiao committed
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133

    /* destroy variables */
    delete s;
    delete t;
    delete shift;
    delete sGPU;
    delete tGPU;
    delete shiftGPU;
    delete[] sDimSize;
    delete[] tDimSize;
    delete[] shiftDimSize;

    return cpuTest && gpuTest;
#else
    /* destroy variables */
    delete s;
    delete t;
    delete shift;
    delete[] sDimSize;
    delete[] tDimSize;
    delete[] shiftDimSize;

    return cpuTest;
#endif // USE_CUDA
}

liyinqiao committed
134 135 136 137
/* 
case 2: squared sum of the items along a dimension of the tensor. 
For a 1-dimensional data array a, sum = \sum_i (a_i - shift)^2.
In this case, (2, 4) -> (2), dim = 1.
liyinqiao committed
138 139 140 141 142 143 144 145 146 147 148 149 150
*/
bool TestReduceSumSquared2()
{
    /* a input tensor of size (2, 4) */
    int sOrder = 2;
    int * sDimSize = new int[sOrder];
    sDimSize[0] = 2;
    sDimSize[1] = 4;

    int sUnitNum = 1;
    for (int i = 0; i < sOrder; i++)
        sUnitNum *= sDimSize[i];

liyinqiao committed
151
    /* a output tensor of size (2) */
liyinqiao committed
152 153 154 155 156 157 158 159
    int tOrder = 1;
    int * tDimSize = new int[tOrder];
    tDimSize[0] = 2;

    int tUnitNum = 1;
    for (int i = 0; i < tOrder; i++)
        tUnitNum *= tDimSize[i];

liyinqiao committed
160
    /* a shift tensor of size (2) */
liyinqiao committed
161 162 163 164 165 166 167 168
    int shiftOrder = 1;
    int * shiftDimSize = new int[shiftOrder];
    shiftDimSize[0] = 2;

    int shiftUnitNum = 1;
    for (int i = 0; i < shiftOrder; i++)
        shiftUnitNum *= shiftDimSize[i];

liyinqiao committed
169 170 171 172
    DTYPE sData[2][4] = { {0.0F, 1.0F, 2.0F, 3.0F},
                          {4.0F, 5.0F, 6.0F, 7.0F} };
    DTYPE shiftData[2] = {-1.0F, 1.0F};
    DTYPE answer[2] = {30.0F, 86.0F};
liyinqiao committed
173 174 175 176 177 178 179

    /* CPU test */
    bool cpuTest = true;

    /* create tensors */
    XTensor * s = NewTensor(sOrder, sDimSize);
    XTensor * t = NewTensor(tOrder, tDimSize);
180
    XTensor * shift = NewTensor(shiftOrder, shiftDimSize);
181
    XTensor tUser;
182 183

    /* initialize variables */
liyinqiao committed
184
    s->SetData(sData, sUnitNum);
185
    shift->SetData(shiftData, shiftUnitNum);
liyinqiao committed
186
    t->SetZeroAll();
187 188

    /* call ReduceSumSquared function */
189
    _ReduceSumSquared(s, t, 1, shift);
190
    tUser = ReduceSumSquared(*s, 1, *shift);
191 192

    /* check results */
193
    cpuTest = t->CheckData(answer, tUnitNum) && tUser.CheckData(answer, tUnitNum);
194 195 196 197 198 199

#ifdef USE_CUDA
    /* GPU test */
    bool gpuTest = true;

    /* create tensors */
liyinqiao committed
200 201
    XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
    XTensor * tGPU = NewTensor(tOrder, tDimSize, X_FLOAT, 1.0F, 0);
202
    XTensor * shiftGPU = NewTensor(shiftOrder, shiftDimSize, X_FLOAT, 1.0F, 0);
203
    XTensor tUserGPU;
204 205

    /* initialize variables */
liyinqiao committed
206
    sGPU->SetData(sData, sUnitNum);
207
    shiftGPU->SetData(shiftData, shiftUnitNum);
liyinqiao committed
208
    tGPU->SetZeroAll();
209 210

    /* call ReduceSumSquared function */
211
    _ReduceSumSquared(sGPU, tGPU, 1, shiftGPU);
212
    tUserGPU = ReduceSumSquared(*sGPU, 1, *shiftGPU);
213 214

    /* check results */
215
    gpuTest = tGPU->CheckData(answer, tUnitNum) && tUserGPU.CheckData(answer, tUnitNum);
216 217

    /* destroy variables */
liyinqiao committed
218 219 220 221 222 223 224 225 226
    delete s;
    delete t;
    delete shift;
    delete sGPU;
    delete tGPU;
    delete shiftGPU;
    delete[] sDimSize;
    delete[] tDimSize;
    delete[] shiftDimSize;
227 228 229 230

    return cpuTest && gpuTest;
#else
    /* destroy variables */
liyinqiao committed
231 232 233 234 235 236
    delete s;
    delete t;
    delete shift;
    delete[] sDimSize;
    delete[] tDimSize;
    delete[] shiftDimSize;
237 238 239 240 241 242 243 244 245 246 247 248 249

    return cpuTest;
#endif // USE_CUDA
}

/* other cases */
/*
TODO!!
*/

/* test for ReduceSumSquared Function */
bool TestReduceSumSquared()
{
liyinqiao committed
250
    XPRINT(0, stdout, "[TEST ReduceSumSquared] squared sum of the items along a dimension of the tensor\n");
251 252 253 254 255 256 257 258 259 260
    bool returnFlag = true, caseFlag = true;

    /* case 1 test */
    caseFlag = TestReduceSumSquared1();
    if (!caseFlag) {
        returnFlag = false;
        XPRINT(0, stdout, ">> case 1 failed!\n");
    }
    else
        XPRINT(0, stdout, ">> case 1 passed!\n");
liyinqiao committed
261 262 263 264 265 266 267 268 269
    
    /* case 2 test */
    caseFlag = TestReduceSumSquared2();
    if (!caseFlag) {
        returnFlag = false;
        XPRINT(0, stdout, ">> case 1 failed!\n");
    }
    else
        XPRINT(0, stdout, ">> case 1 passed!\n");
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287

    /* other cases test */
    /*
    TODO!!
    */

    if (returnFlag) {
        XPRINT(0, stdout, ">> All Passed!\n");
    }
    else
        XPRINT(0, stdout, ">> Failed!\n");

    XPRINT(0, stdout, "\n");

    return returnFlag;
    }

} // namespace nts(NiuTrans.Tensor)