TSort.cpp 6.5 KB
Newer Older
xiaotong committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*   http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/*
* $Created by: LI Yinqiao (li.yin.qiao.2012@hotmail.com) 2018-04-30
*/

liyinqiao committed
22
#include "TSort.h"
xiaotong committed
23 24

namespace nts { // namespace nts(NiuTrans.Tensor)
liyinqiao committed
25 26

/* case 1: sort the tensor along a given dimension */
xiaotong committed
27 28
bool TestSort1()
{
liyinqiao committed
29
    /* a tensor of size (2, 4) */
xiaotong committed
30 31 32 33 34 35 36 37 38
    int order = 2;
    int * dimSize = new int[order];
    dimSize[0] = 2;
    dimSize[1] = 4;

    int unitNum = 1;
    for (int i = 0; i < order; i++)
        unitNum *= dimSize[i];

liyinqiao committed
39 40 41 42
    DTYPE aData[2][4] = { {0.0F, 1.0F, 2.0F, 3.0F},
                          {4.0F, 5.0F, 6.0F, 7.0F} };
    DTYPE answer[2][4] = { {4.0F, 5.0F, 6.0F, 7.0F},
                           {0.0F, 1.0F, 2.0F, 3.0F} };
xiaotong committed
43 44 45 46 47 48

    /* CPU test */
    bool cpuTest = true;

    /* create tensors */
    XTensor * a = NewTensor(order, dimSize);
49 50 51 52
    XTensor * b = NewTensor(order, dimSize);
    XTensor * aMe = NewTensor(order, dimSize);
    XTensor * index = NewTensor(order, dimSize, X_INT);
    XTensor bUser(order, dimSize, X_FLOAT, 1.0F, -1, NULL);
xiaotong committed
53 54 55

    /* initialize variables */
    a->SetData(aData, unitNum);
56 57
    aMe->SetData(aData, unitNum);
    index->SetZeroAll();
xiaotong committed
58

liyinqiao committed
59
    /* call Sort function */
60 61 62
    _Sort(a, b, index, 0);
    _SortMe(aMe, index, 0);
    Sort(*a, bUser, *index, 0);
liyinqiao committed
63

64 65 66
    cpuTest = b->CheckData(answer, unitNum) && 
              aMe->CheckData(answer, unitNum) && 
              bUser.CheckData(answer, unitNum);
xiaotong committed
67 68 69 70 71 72

#ifdef USE_CUDA
    /* GPU test */
    bool gpuTest = true;

    /* create tensor */
liyinqiao committed
73
    XTensor * aGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
74 75 76 77 78
    XTensor * bGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * aMeGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * indexGPU = NewTensor(order, dimSize, X_INT, 1.0F, 0);
    XTensor bUserGPU(order, dimSize, X_FLOAT, 1.0F, 0, NULL);

xiaotong committed
79 80
    /* Initialize variables */
    aGPU->SetData(aData, unitNum);
81 82
    aMeGPU->SetData(aData, unitNum);
    indexGPU->SetZeroAll();
xiaotong committed
83 84

    /* call sum function */
85 86 87
    _Sort(aGPU, bGPU, indexGPU, 0);
    _SortMe(aMeGPU, indexGPU, 0);
    Sort(*aGPU, bUserGPU, *indexGPU, 0);
xiaotong committed
88 89

    /* check results */
90 91 92
    gpuTest = bGPU->CheckData(answer, unitNum) && 
              aMeGPU->CheckData(answer, unitNum) && 
              bUserGPU.CheckData(answer, unitNum);
xiaotong committed
93 94

    /* destroy variables */
liyinqiao committed
95 96
    delete a;
    delete b;
97 98
    delete aMe;
    delete index;
liyinqiao committed
99 100
    delete aGPU;
    delete bGPU;
101 102
    delete aMeGPU;
    delete indexGPU;
xiaotong committed
103
    delete[] dimSize;
liyinqiao committed
104

xiaotong committed
105 106 107 108 109
    return cpuTest && gpuTest;
#else
    /* destroy variables */
    delete a;
    delete b;
110 111
    delete aMe;
    delete index;
xiaotong committed
112
    delete[] dimSize;
liyinqiao committed
113

xiaotong committed
114 115 116 117 118 119
    return cpuTest;
#endif // USE_CUDA
}

bool TestSort2()
{
liyinqiao committed
120
    /* a tensor of size (2, 4) */
xiaotong committed
121 122 123 124 125 126 127 128 129
    int order = 2;
    int * dimSize = new int[order];
    dimSize[0] = 2;
    dimSize[1] = 4;

    int unitNum = 1;
    for (int i = 0; i < order; i++)
        unitNum *= dimSize[i];

liyinqiao committed
130 131 132 133
    DTYPE aData[2][4] = { {0.0F, 1.0F, 2.0F, 3.0F},
                          {4.0F, 5.0F, 6.0F, 7.0F} };
    DTYPE answer[2][4] = { {3.0F, 2.0F, 1.0F, 0.0F},
                           {7.0F, 6.0F, 5.0F, 4.0F} };
xiaotong committed
134 135 136 137 138 139

    /* CPU test */
    bool cpuTest = true;

    /* create tensors */
    XTensor * a = NewTensor(order, dimSize);
140 141 142 143
    XTensor * b = NewTensor(order, dimSize);
    XTensor * aMe = NewTensor(order, dimSize);
    XTensor * index = NewTensor(order, dimSize, X_INT);
    XTensor bUser(order, dimSize, X_FLOAT, 1.0F, -1, NULL);
xiaotong committed
144 145 146

    /* initialize variables */
    a->SetData(aData, unitNum);
147 148 149
    aMe->SetData(aData, unitNum);
    index->SetZeroAll();
    
liyinqiao committed
150
    /* call Sort function */
151 152 153
    _Sort(a, b, index, 1);
    _SortMe(aMe, index, 1);
    Sort(*a, bUser, *index, 1);
liyinqiao committed
154

xiaotong committed
155
    /* check results */
156 157 158
    cpuTest = b->CheckData(answer, unitNum) && 
              aMe->CheckData(answer, unitNum) && 
              bUser.CheckData(answer, unitNum);
xiaotong committed
159 160 161 162 163 164

#ifdef USE_CUDA
    /* GPU test */
    bool gpuTest = true;

    /* create tensor */
liyinqiao committed
165
    XTensor * aGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
166 167 168 169
    XTensor * bGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * aMeGPU = NewTensor(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * indexGPU = NewTensor(order, dimSize, X_INT, 1.0F, 0);
    XTensor bUserGPU(order, dimSize, X_FLOAT, 1.0F, 0, NULL);
liyinqiao committed
170

xiaotong committed
171 172
    /* Initialize variables */
    aGPU->SetData(aData, unitNum);
173 174
    aMeGPU->SetData(aData, unitNum);
    indexGPU->SetZeroAll();
xiaotong committed
175 176

    /* call sum function */
177 178 179
    _Sort(aGPU, bGPU, indexGPU, 1);
    _SortMe(aMeGPU, indexGPU, 1);
    Sort(*aGPU, bUserGPU, *indexGPU, 1);
xiaotong committed
180 181

    /* check results */
182 183 184
    gpuTest = bGPU->CheckData(answer, unitNum) && 
              aMeGPU->CheckData(answer, unitNum) && 
              bUserGPU.CheckData(answer, unitNum);
xiaotong committed
185 186

    /* destroy variables */
liyinqiao committed
187 188
    delete a;
    delete b;
189 190
    delete aMe;
    delete index;
liyinqiao committed
191 192
    delete aGPU;
    delete bGPU;
193 194
    delete aMeGPU;
    delete indexGPU;
xiaotong committed
195
    delete[] dimSize;
liyinqiao committed
196

xiaotong committed
197 198 199 200 201
    return cpuTest && gpuTest;
#else
    /* destroy variables */
    delete a;
    delete b;
202 203
    delete aMe;
    delete index;
xiaotong committed
204
    delete[] dimSize;
liyinqiao committed
205

xiaotong committed
206 207 208
    return cpuTest;
#endif // USE_CUDA
}
liyinqiao committed
209

xiaotong committed
210 211 212 213 214
/* other cases */
/*
TODO!!
*/

liyinqiao committed
215
/* test for Sort Function */
xiaotong committed
216 217
bool TestSort()
{
liyinqiao committed
218
    XPRINT(0, stdout, "[TEST SORT] sort the tensor along a given dimension \n");
xiaotong committed
219 220
    bool returnFlag = true, caseFlag = true;

221 222 223 224 225 226 227 228
    /* case 1 test */
    caseFlag = TestSort1();
    if (!caseFlag) {
        returnFlag = false;
        XPRINT(0, stdout, ">> case 1 failed!\n");
    }
    else
        XPRINT(0, stdout, ">> case 1 passed!\n");
xiaotong committed
229 230 231 232 233 234 235 236 237

    /* case 2 test */
    caseFlag = TestSort2();
    if (!caseFlag) {
        returnFlag = false;
        XPRINT(0, stdout, ">> case 2 failed!\n");
    }
    else
        XPRINT(0, stdout, ">> case 2 passed!\n");
liyinqiao committed
238

xiaotong committed
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
    /* other cases test */
    /*
    TODO!!
    */

    if (returnFlag) {
        XPRINT(0, stdout, ">> All Passed!\n");
    }
    else
        XPRINT(0, stdout, ">> Failed!\n");

    XPRINT(0, stdout, "\n");

    return returnFlag;
}

} // namespace nts(NiuTrans.Tensor)