TSort.cpp 6.67 KB
Newer Older
xiaotong committed
1
/* NiuTrans.Tensor - an open-source tensor library
liyinqiao committed
2
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
xiaotong committed
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*   http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/*
* $Created by: LI Yinqiao (li.yin.qiao.2012@hotmail.com) 2018-04-30
*/

liyinqiao committed
22
#include "../core/utilities/CheckData.h"
liyinqiao committed
23
#include "TSort.h"
xiaotong committed
24 25

namespace nts { // namespace nts(NiuTrans.Tensor)
liyinqiao committed
26 27

/* case 1: sort the tensor along a given dimension */
xiaotong committed
28 29
bool TestSort1()
{
liyinqiao committed
30
    /* a tensor of size (2, 4) */
xiaotong committed
31 32 33 34 35 36 37 38 39
    int order = 2;
    int * dimSize = new int[order];
    dimSize[0] = 2;
    dimSize[1] = 4;

    int unitNum = 1;
    for (int i = 0; i < order; i++)
        unitNum *= dimSize[i];

liyinqiao committed
40 41 42 43
    DTYPE aData[2][4] = { {0.0F, 1.0F, 2.0F, 3.0F},
                          {4.0F, 5.0F, 6.0F, 7.0F} };
    DTYPE answer[2][4] = { {4.0F, 5.0F, 6.0F, 7.0F},
                           {0.0F, 1.0F, 2.0F, 3.0F} };
xiaotong committed
44 45 46 47 48

    /* CPU test */
    bool cpuTest = true;

    /* create tensors */
liyinqiao committed
49 50 51 52
    XTensor * a = NewTensorV2(order, dimSize);
    XTensor * b = NewTensorV2(order, dimSize);
    XTensor * aMe = NewTensorV2(order, dimSize);
    XTensor * index = NewTensorV2(order, dimSize, X_INT);
53
    XTensor bUser(order, dimSize, X_FLOAT, 1.0F, -1, NULL);
xiaotong committed
54 55 56

    /* initialize variables */
    a->SetData(aData, unitNum);
57 58
    aMe->SetData(aData, unitNum);
    index->SetZeroAll();
xiaotong committed
59

liyinqiao committed
60
    /* call Sort function */
61 62 63
    _Sort(a, b, index, 0);
    _SortMe(aMe, index, 0);
    Sort(*a, bUser, *index, 0);
liyinqiao committed
64

liyinqiao committed
65 66 67
    cpuTest = _CheckData(b, answer, unitNum, 1e-4F) &&
              _CheckData(aMe, answer, unitNum, 1e-4F) &&
              _CheckData(&bUser, answer, unitNum, 1e-4F);
xiaotong committed
68 69 70 71 72 73

#ifdef USE_CUDA
    /* GPU test */
    bool gpuTest = true;

    /* create tensor */
liyinqiao committed
74 75 76 77
    XTensor * aGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * bGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * aMeGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * indexGPU = NewTensorV2(order, dimSize, X_INT, 1.0F, 0);
78 79
    XTensor bUserGPU(order, dimSize, X_FLOAT, 1.0F, 0, NULL);

xiaotong committed
80 81
    /* Initialize variables */
    aGPU->SetData(aData, unitNum);
82 83
    aMeGPU->SetData(aData, unitNum);
    indexGPU->SetZeroAll();
xiaotong committed
84 85

    /* call sum function */
86 87 88
    _Sort(aGPU, bGPU, indexGPU, 0);
    _SortMe(aMeGPU, indexGPU, 0);
    Sort(*aGPU, bUserGPU, *indexGPU, 0);
xiaotong committed
89 90

    /* check results */
liyinqiao committed
91 92 93
    gpuTest = _CheckData(bGPU, answer, unitNum, 1e-4F) &&
              _CheckData(aMeGPU, answer, unitNum, 1e-4F) &&
              _CheckData(&bUserGPU, answer, unitNum, 1e-4F);
xiaotong committed
94 95

    /* destroy variables */
liyinqiao committed
96 97
    delete a;
    delete b;
98 99
    delete aMe;
    delete index;
liyinqiao committed
100 101
    delete aGPU;
    delete bGPU;
102 103
    delete aMeGPU;
    delete indexGPU;
xiaotong committed
104
    delete[] dimSize;
liyinqiao committed
105

xiaotong committed
106 107 108 109 110
    return cpuTest && gpuTest;
#else
    /* destroy variables */
    delete a;
    delete b;
111 112
    delete aMe;
    delete index;
xiaotong committed
113
    delete[] dimSize;
liyinqiao committed
114

xiaotong committed
115 116 117 118 119 120
    return cpuTest;
#endif // USE_CUDA
}

bool TestSort2()
{
liyinqiao committed
121
    /* a tensor of size (2, 4) */
xiaotong committed
122 123 124 125 126 127 128 129 130
    int order = 2;
    int * dimSize = new int[order];
    dimSize[0] = 2;
    dimSize[1] = 4;

    int unitNum = 1;
    for (int i = 0; i < order; i++)
        unitNum *= dimSize[i];

liyinqiao committed
131 132 133 134
    DTYPE aData[2][4] = { {0.0F, 1.0F, 2.0F, 3.0F},
                          {4.0F, 5.0F, 6.0F, 7.0F} };
    DTYPE answer[2][4] = { {3.0F, 2.0F, 1.0F, 0.0F},
                           {7.0F, 6.0F, 5.0F, 4.0F} };
xiaotong committed
135 136 137 138 139

    /* CPU test */
    bool cpuTest = true;

    /* create tensors */
liyinqiao committed
140 141 142 143
    XTensor * a = NewTensorV2(order, dimSize);
    XTensor * b = NewTensorV2(order, dimSize);
    XTensor * aMe = NewTensorV2(order, dimSize);
    XTensor * index = NewTensorV2(order, dimSize, X_INT);
144
    XTensor bUser(order, dimSize, X_FLOAT, 1.0F, -1, NULL);
xiaotong committed
145 146 147

    /* initialize variables */
    a->SetData(aData, unitNum);
148 149 150
    aMe->SetData(aData, unitNum);
    index->SetZeroAll();
    
liyinqiao committed
151
    /* call Sort function */
152 153 154
    _Sort(a, b, index, 1);
    _SortMe(aMe, index, 1);
    Sort(*a, bUser, *index, 1);
liyinqiao committed
155

xiaotong committed
156
    /* check results */
liyinqiao committed
157 158 159
    cpuTest = _CheckData(b, answer, unitNum, 1e-4F) &&
              _CheckData(aMe, answer, unitNum, 1e-4F) &&
              _CheckData(&bUser, answer, unitNum, 1e-4F);
xiaotong committed
160 161 162 163 164 165

#ifdef USE_CUDA
    /* GPU test */
    bool gpuTest = true;

    /* create tensor */
liyinqiao committed
166 167 168 169
    XTensor * aGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * bGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * aMeGPU = NewTensorV2(order, dimSize, X_FLOAT, 1.0F, 0);
    XTensor * indexGPU = NewTensorV2(order, dimSize, X_INT, 1.0F, 0);
170
    XTensor bUserGPU(order, dimSize, X_FLOAT, 1.0F, 0, NULL);
liyinqiao committed
171

xiaotong committed
172 173
    /* Initialize variables */
    aGPU->SetData(aData, unitNum);
174 175
    aMeGPU->SetData(aData, unitNum);
    indexGPU->SetZeroAll();
xiaotong committed
176 177

    /* call sum function */
178 179 180
    _Sort(aGPU, bGPU, indexGPU, 1);
    _SortMe(aMeGPU, indexGPU, 1);
    Sort(*aGPU, bUserGPU, *indexGPU, 1);
xiaotong committed
181 182

    /* check results */
liyinqiao committed
183 184 185
    gpuTest = _CheckData(bGPU, answer, unitNum, 1e-4F) &&
              _CheckData(aMeGPU, answer, unitNum, 1e-4F) &&
              _CheckData(&bUserGPU, answer, unitNum, 1e-4F);
xiaotong committed
186 187

    /* destroy variables */
liyinqiao committed
188 189
    delete a;
    delete b;
190 191
    delete aMe;
    delete index;
liyinqiao committed
192 193
    delete aGPU;
    delete bGPU;
194 195
    delete aMeGPU;
    delete indexGPU;
xiaotong committed
196
    delete[] dimSize;
liyinqiao committed
197

xiaotong committed
198 199 200 201 202
    return cpuTest && gpuTest;
#else
    /* destroy variables */
    delete a;
    delete b;
203 204
    delete aMe;
    delete index;
xiaotong committed
205
    delete[] dimSize;
liyinqiao committed
206

xiaotong committed
207 208 209
    return cpuTest;
#endif // USE_CUDA
}
liyinqiao committed
210

xiaotong committed
211 212 213 214 215
/* other cases */
/*
TODO!!
*/

liyinqiao committed
216
/* test for Sort Function */
xiaotong committed
217 218
bool TestSort()
{
liyinqiao committed
219
    XPRINT(0, stdout, "[TEST SORT] sort the tensor along a given dimension \n");
xiaotong committed
220 221
    bool returnFlag = true, caseFlag = true;

222 223 224 225 226 227 228 229
    /* case 1 test */
    caseFlag = TestSort1();
    if (!caseFlag) {
        returnFlag = false;
        XPRINT(0, stdout, ">> case 1 failed!\n");
    }
    else
        XPRINT(0, stdout, ">> case 1 passed!\n");
xiaotong committed
230 231 232 233 234 235 236 237 238

    /* case 2 test */
    caseFlag = TestSort2();
    if (!caseFlag) {
        returnFlag = false;
        XPRINT(0, stdout, ">> case 2 failed!\n");
    }
    else
        XPRINT(0, stdout, ">> case 2 passed!\n");
liyinqiao committed
239

xiaotong committed
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
    /* other cases test */
    /*
    TODO!!
    */

    if (returnFlag) {
        XPRINT(0, stdout, ">> All Passed!\n");
    }
    else
        XPRINT(0, stdout, ">> Failed!\n");

    XPRINT(0, stdout, "\n");

    return returnFlag;
}

} // namespace nts(NiuTrans.Tensor)