/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*   http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-06-13
*/


#include "../XTensor.h"
#include "../XList.h"
#include "TMerge.h"

namespace nts { // namespace nts(NiuTrans.Tensor)
/* case 1: transform a tensor by merging it along with a dimension. 
* In this case, (3, 2) -> (6), whereToMerge=1, leadingDim=0.
*/
bool TestMerge1()
{
    /* a source tensor of size (2, 3) */
    int sOrder = 2;
    int * sDimSize = new int[sOrder];
    sDimSize[0] = 2;
    sDimSize[1] = 3;

    int sUnitNum = 1;
    for (int i = 0; i < sOrder; i++)
        sUnitNum *= sDimSize[i];

    /* a target tensor of size (6, ) */
    int tOrder = 1;
    int * tDimSize = new int[tOrder];
    tDimSize[0] = 6;

    int tUnitNum = 1;
    for (int i = 0; i < tOrder; i++)
        tUnitNum *= tDimSize[i];

    DTYPE sData[2][3] = { {0.0, 1.0, 2.0},
                          {3.0, 4.0, 5.0} };
    DTYPE answer[6] = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0};
    
    /* CPU test */
    bool cpuTest = true;

    /* create tensors */
    XTensor * s = NewTensor(sOrder, sDimSize);
    XTensor * t = NewTensor(tOrder, tDimSize);

    /* initialize variables */
    s->SetData(sData, sUnitNum);
    t->SetZeroAll();

    /* call merge function */
    Merge(s, t, 1, 0);

    /* check results */
    cpuTest = t->CheckData(answer, tUnitNum);

#ifdef USE_CUDA
    /* GPU test */
    bool gpuTest = true;

    /* create tensor */
    XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
    XTensor * tGPU = NewTensor(tOrder, tDimSize, X_FLOAT, 1.0F, 0);

    /* Initialize variables */
    sGPU->SetData(sData, sUnitNum);
    tGPU->SetZeroAll();

    /* call merge function */
    Merge(sGPU, tGPU, 1, 0);

    /* check results */
    gpuTest = tGPU->CheckData(answer, tUnitNum);

    /* destroy variables */
    delete s, t, sGPU, tGPU;
    delete[] sDimSize, tDimSize;

    return cpuTest && gpuTest;
#else
    /* destroy variables */
    delete s, t;
    delete[] sDimSize, tDimSize;

    return cpuTest;
#endif // USE_CUDA
}

/* case 2: transform a tensor by merging it along with a dimension. 
* In this case, (2, 2, 3) -> (4, 3), whereToMerge=1, leadingDim=0.
*/
bool TestMerge2()
{
    /* a source tensor of size (2, 2, 3) */
    int sOrder = 3;
    int * sDimSize = new int[sOrder];
    sDimSize[0] = 2;
    sDimSize[1] = 2;
    sDimSize[2] = 3;

    int sUnitNum = 1;
    for (int i = 0; i < sOrder; i++)
        sUnitNum *= sDimSize[i];

    /* a target tensor of size (4, 3) */
    int tOrder = 2;
    int * tDimSize = new int[tOrder];
    tDimSize[0] = 4;
    tDimSize[1] = 3;

    int tUnitNum = 1;
    for (int i = 0; i < tOrder; i++)
        tUnitNum *= tDimSize[i];

    DTYPE sData[2][2][3] = { { {0.0, 1.0, 2.0},
                               {4.0, 5.0, 6.0} },
                             { {-1.0, 2.0, 3.0},
                               {-4.0, -5.0, -6.0} } };
    DTYPE answer[4][3] = { {0.0, 1.0, 2.0},
                           {4.0, 5.0, 6.0},
                           {-1.0, 2.0, 3.0},
                           {-4.0, -5.0, -6.0} };

    /* CPU test */
    bool cpuTest = true;

    /* create tensors */
    XTensor * s = NewTensor(sOrder, sDimSize);
    XTensor * t = NewTensor(tOrder, tDimSize);

    /* initialize variables */
    s->SetData(sData, sUnitNum);
    t->SetZeroAll();

    /* call merge function */
    Merge(s, t, 1, 0);

    /* check results */
    cpuTest = t->CheckData(answer, tUnitNum);

#ifdef USE_CUDA
    /* GPU test */
    bool gpuTest = true;

    /* create tensor */
    XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
    XTensor * tGPU = NewTensor(tOrder, tDimSize, X_FLOAT, 1.0F, 0);

    /* Initialize variables */
    sGPU->SetData(sData, sUnitNum);
    tGPU->SetZeroAll();

    /* call merge function */
    Merge(sGPU, tGPU, 1, 0);

    /* check results */
    gpuTest = tGPU->CheckData(answer, tUnitNum);

    /* destroy variables */
    delete s, t, sGPU, tGPU;
    delete[] sDimSize, tDimSize;

    return cpuTest && gpuTest;
#else
    /* destroy variables */
    delete s, t;
    delete[] sDimSize, tDimSize;

    return cpuTest;
#endif // USE_CUDA
}

/* case 3: transform a tensor by merging it along with a dimension. 
* In this case, (2, 3, 4) -> (3, 8), whereToMerge=0, leadingDim=2.
*/
bool TestMerge3()
{
    /* a source tensor of size (2, 3, 4) */
    int sOrder = 3;
    int * sDimSize = new int[sOrder];
    sDimSize[0] = 2;
    sDimSize[1] = 3;
    sDimSize[2] = 4;

    int sUnitNum = 1;
    for (int i = 0; i < sOrder; i++)
        sUnitNum *= sDimSize[i];

    /* a target tensor of size (8, 3) */
    int tOrder = 2;
    int * tDimSize = new int[tOrder];
    tDimSize[0] = 3;
    tDimSize[1] = 8;

    int tUnitNum = 1;
    for (int i = 0; i < tOrder; i++)
        tUnitNum *= tDimSize[i];

    DTYPE sData[2][3][4] = { { {0.0, 1.0, 2.0, 3.0},
                               {4.0, 5.0, 6.0, 7.0},
                               {8.0, 9.0, 10.0, 11.0} },
                             { {0.0, -1.0, -2.0, -3.0},
                               {-4.0, -5.0, -6.0, -7.0},
                               {-8.0, -9.0, -10.0, -11.0} } };
    DTYPE answer[3][8] = { {0.0, 1.0, 2.0, 3.0, 0.0, -1.0, -2.0, -3.0},
                           {4.0, 5.0, 6.0, 7.0, -4.0, -5.0, -6.0, -7.0},
                           {8.0, 9.0, 10.0, 11.0, -8.0, -9.0, -10.0, -11.0} };

    /* CPU test */
    bool cpuTest = true;

    /* create tensors */
    XTensor * s = NewTensor(sOrder, sDimSize);
    XTensor * t = NewTensor(tOrder, tDimSize);

    /* initialize variables */
    s->SetData(sData, sUnitNum);
    t->SetZeroAll();

    /* call merge function */
    Merge(s, t, 2, 0);

    /* check results */
    cpuTest = t->CheckData(answer, tUnitNum);

#ifdef USE_CUDA
    /* GPU test */
    bool gpuTest = true;

    /* create tensor */
    XTensor * sGPU = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
    XTensor * tGPU = NewTensor(tOrder, tDimSize, X_FLOAT, 1.0F, 0);

    /* Initialize variables */
    sGPU->SetData(sData, sUnitNum);
    tGPU->SetZeroAll();
    
    /* call merge function */
    Merge(sGPU, tGPU, 2, 0);

    /* check results */
    gpuTest = tGPU->CheckData(answer, tUnitNum);

    /* destroy variables */
    delete s, t, sGPU, tGPU;
    delete[] sDimSize, tDimSize;

    return cpuTest && gpuTest;
#else
    /* destroy variables */
    delete s, t;
    delete[] sDimSize, tDimSize;

    return cpuTest;
#endif // USE_CUDA
}

/* case 4: merge small tensors into a big tensor. 
In this case, 2 * (2, 4) -> (4, 4), whereToMerge=0.
*/
bool TestMerge4()
{
    /* create list */
    XList * smallList = new XList();

    /* a small tensor of size (2, 4) */
    int sOrder = 2;
    int * sDimSize = new int[sOrder];
    sDimSize[0] = 2;
    sDimSize[1] = 4;

    int sUnitNum = 1;
    for (int i = 0; i < sOrder; i++)
        sUnitNum *= sDimSize[i];

    DTYPE sData1[2][4] = { {0.0, 1.0, 2.0, 3.0},
                           {4.0, 5.0, 6.0, 7.0} };
    DTYPE sData2[2][4] = { {0.0, -1.0, -2.0, -3.0},
                           {-4.0, -5.0, -6.0, -7.0} };

    /* a target tensor of size (4, 4) */
    int tOrder = 2;
    int * tDimSize = new int[tOrder];
    tDimSize[0] = 4;
    tDimSize[1] = 4;

    int tUnitNum = 1;
    for (int i = 0; i < tOrder; i++)
        tUnitNum *= tDimSize[i];

    DTYPE answer[4][4] = { {0.0, 1.0, 2.0, 3.0},
                           {4.0, 5.0, 6.0, 7.0},
                           {0.0, -1.0, -2.0, -3.0},
                           {-4.0, -5.0, -6.0, -7.0} };

    /* CPU test */
    bool cpuTest = true;

    /* create tensors */
    XTensor * s1 = NewTensor(sOrder, sDimSize);
    XTensor * s2 = NewTensor(sOrder, sDimSize);
    XTensor * t = NewTensor(tOrder, tDimSize);

    /* initialize variables */
    s1->SetData(sData1, sUnitNum);
    s2->SetData(sData2, sUnitNum);
    t->SetZeroAll();

    /* add tensors to list */
    smallList->Add(s1);
    smallList->Add(s2);

    /* call merge function */
    Merge(smallList, t, 0);

    /* check results */
    cpuTest = t->CheckData(answer, tUnitNum);

#ifdef USE_CUDA
    /* GPU test */
    bool gpuTest = true;

    /* clear list */
    smallList->Clear();

    /* create tensors */
    XTensor * sGPU1 = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
    XTensor * sGPU2 = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
    XTensor * tGPU = NewTensor(tOrder, tDimSize);

    /* initialize variables */
    sGPU1->SetData(sData1, sUnitNum);
    sGPU2->SetData(sData2, sUnitNum);
    tGPU->SetZeroAll();

    /* add tensors to list*/
    smallList->Add(sGPU1);
    smallList->Add(sGPU2);

    /* call merge function */
    Merge(smallList, tGPU, 0);

    /* check results */
    cpuTest = tGPU->CheckData(answer, tUnitNum);

    delete s1, s2, t, sGPU1, sGPU2, tGPU;
    delete[] sDimSize, tDimSize;
    delete smallList;

    return cpuTest && gpuTest;
#else
    /* destroy variables */
    delete s1, s2, t;
    delete[] sDimSize, tDimSize;

    return cpuTest;
#endif // USE_CUDA
}

/* case 5: merge small tensors into a big tensor. 
In this case, 2 * (2, 4) -> (2, 8), whereToMerge=1.
*/
bool TestMerge5()
{
    /* create list */
    XList * smallList = new XList();

    /* a small tensor of size (2, 4) */
    int sOrder = 2;
    int * sDimSize = new int[sOrder];
    sDimSize[0] = 2;
    sDimSize[1] = 4;

    int sUnitNum = 1;
    for (int i = 0; i < sOrder; i++)
        sUnitNum *= sDimSize[i];

    DTYPE sData1[2][4] = { {0.0, 1.0, 2.0, 3.0},
                           {4.0, 5.0, 6.0, 7.0} };
    DTYPE sData2[2][4] = { {0.0, -1.0, -2.0, -3.0},
                           {-4.0, -5.0, -6.0, -7.0} };

    /* a target tensor of size (4, 4) */
    int tOrder = 2;
    int * tDimSize = new int[tOrder];
    tDimSize[0] = 2;
    tDimSize[1] = 8;

    int tUnitNum = 1;
    for (int i = 0; i < tOrder; i++)
        tUnitNum *= tDimSize[i];

    DTYPE answer[2][8] = { {0.0, 1.0, 2.0, 3.0, 0.0, -1.0, -2.0, -3.0},
                           {4.0, 5.0, 6.0, 7.0, -4.0, -5.0, -6.0, -7.0} };

    /* CPU test */
    bool cpuTest = true;

    /* create tensors */
    XTensor * s1 = NewTensor(sOrder, sDimSize);
    XTensor * s2 = NewTensor(sOrder, sDimSize);
    XTensor * t = NewTensor(tOrder, tDimSize);

    /* initialize variables */
    s1->SetData(sData1, sUnitNum);
    s2->SetData(sData2, sUnitNum);
    t->SetZeroAll();

    /* add tensors to list */
    smallList->Add(s1);
    smallList->Add(s2);

    /* call merge function */
    Merge(smallList, t, 1);

    /* check results */
    cpuTest = t->CheckData(answer, tUnitNum);

#ifdef USE_CUDA
    /* GPU test */
    bool gpuTest = true;

    /* clear list */
    smallList->Clear();

    /* create tensors */
    XTensor * sGPU1 = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
    XTensor * sGPU2 = NewTensor(sOrder, sDimSize, X_FLOAT, 1.0F, 0);
    XTensor * tGPU = NewTensor(tOrder, tDimSize);

    /* initialize variables */
    sGPU1->SetData(sData1, sUnitNum);
    sGPU2->SetData(sData2, sUnitNum);
    tGPU->SetZeroAll();

    /* add tensors to list*/
    smallList->Add(sGPU1);
    smallList->Add(sGPU2);

    /* call merge function */
    Merge(smallList, tGPU, 1);

    /* check results */
    cpuTest = tGPU->CheckData(answer, tUnitNum);

    delete s1, s2, t, sGPU1, sGPU2, tGPU;
    delete[] sDimSize, tDimSize;
    delete smallList;

    return cpuTest && gpuTest;
#else
    /* destroy variables */
    delete s1, s2, t;
    delete[] sDimSize, tDimSize;

    return cpuTest;
#endif // USE_CUDA
}

/* other cases */
/*
    TODO!!
*/

/* test for Merge Function */
extern "C"
bool TestMerge()
{
    XPRINT(0, stdout, "[TEST MERGE] -------------\n");
    bool returnFlag = true, caseFlag = true;

    /* case 1 test */
    caseFlag = TestMerge1();

    if (!caseFlag) {
        returnFlag = false;
        XPRINT(0, stdout, ">> case 1 failed!\n");
    }
    else
        XPRINT(0, stdout, ">> case 1 passed!\n");

    /* case 2 test */
    caseFlag = TestMerge2();
    if (!caseFlag) {
        returnFlag = false;
        XPRINT(0, stdout, ">> case 2 failed!\n");
    }
    else
        XPRINT(0, stdout, ">> case 2 passed!\n");

    /* case 3 test */
    caseFlag = TestMerge3();
    if (!caseFlag) {
        returnFlag = false;
        XPRINT(0, stdout, ">> case 3 failed!\n");
    }
    else
        XPRINT(0, stdout, ">> case 3 passed!\n");

    /* case 4 test */
    caseFlag = TestMerge4();
    if (!caseFlag) {
        returnFlag = false;
        XPRINT(0, stdout, ">> case 4 failed!\n");
    }
    else
        XPRINT(0, stdout, ">> case 4 passed!\n");

    /* case 5 test */
    caseFlag = TestMerge5();
    if (!caseFlag) {
        returnFlag = false;
        XPRINT(0, stdout, ">> case 5 failed!\n");
    }
    else
        XPRINT(0, stdout, ">> case 5 passed!\n");

    /* other cases test */
    /*
    TODO!!
    */

    if (returnFlag) {
        XPRINT(0, stdout, ">> All Passed!\n");
    }
    else
        XPRINT(0, stdout, ">> Failed!\n");

    XPRINT(0, stdout, "\n");

    return returnFlag;
}

} // namespace nts(NiuTrans.Tensor)