ReduceSumSquared.cpp 3.97 KB
Newer Older
xiaotong committed
1
/* NiuTrans.Tensor - an open-source tensor library
liyinqiao committed
2
 * Copyright (C) 2017, Natural Language Processing Lab, Northeastern University. 
xiaotong committed
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
 * All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *   http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

/*
* $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-04-24
*/

22
#include "../../XName.h"
xiaotong committed
23 24 25 26 27 28
#include "ReduceSum.h"
#include "ReduceSumSquared.h"

namespace nts{ // namespace nts(NiuTrans.Tensor)

/* 
29 30 31 32
squared sum of the items along a dimension of the tensor

For a 1-dimensional data array a, sum = \sum_i (a_i - shift)^2

xiaotong committed
33 34 35 36 37
>> input - the input tensor
>> output - the output tensor
>> dim - the dimension where the reduction is performed on
>> shift - bias on the input
*/
38
void _ReduceSumSquared(const XTensor * input, XTensor * output, int dim, const XTensor * shift)
xiaotong committed
39
{
40 41 42 43 44
    _ReduceSum(input, output, dim, shift, 2.0F);
}


/* 
liyinqiao committed
45
squared sum of the items along a dimension of the tensor (return an XTensor structure)
46 47 48 49 50 51 52 53 54 55 56
make a new tensor to keep the result and return it

For a 1-dimensional data array a, sum = \sum_i (a_i - shift)^2

>> input - the input tensor
>> dim - the dimension where the reduction is performed on
>> shift - bias on the input
<< return - the squared sum of the items along a dimension of the tensor
*/
XTensor ReduceSumSquared(const XTensor &input, int dim, const XTensor &shift)
{
57
    CheckNTErrors(dim >= 0 && dim < input.order, "Illegal dimension to reduce!");
liyinqiao committed
58
    
59 60
    int order = input.order - 1;
    int * dimSize = new int[order];
61
    for(int i = 0; i < order; i++){
62 63
        if(i < dim)
            dimSize[i] = input.dimSize[i];
64
        else if(i >= dim)
65 66 67
            dimSize[i] = input.dimSize[i + 1];
    }

68 69
    float dr = (!input.isSparse) ? 1.0F : input.denseRatio;
    XTensor output(order, dimSize, input.dataType, dr, input.devID, input.mem);
liyinqiao committed
70
    output.SetTMPFlag();
71 72 73

    /* call _ReduceSumSquared function */
    _ReduceSumSquared(&input, &output, dim, &shift);
74
                    
75
    /* tensor connection */
liyinqiao committed
76 77 78 79
    if (input.enableGrad) {
        XLink::MakeLink(&input, &shift, &output, REDUCE_REDUCESUMSQUARED);
        XLink::AddParamToHeadInt(&output, dim);
    }
80 81

    /* destroy variables */
xiaotong committed
82
    delete[] dimSize;
83 84

    return output;
xiaotong committed
85 86
}

liyinqiao committed
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
/* 
squared sum of the items along a dimension of the tensor

For a 1-dimensional data array a, sum = \sum_i (a_i - shift)^2

>> input - the input tensor
>> output - the output tensor
>> dim - the dimension where the reduction is performed on
>> shift - bias on the input
*/
void ReduceSumSquared(const XTensor &input, XTensor &output, int dim, const XTensor &shift)
{
    CheckNTErrors(dim >= 0 && dim < input.order, "Illegal dimension to reduce!");

    if (!output.isInit || !XTensor::IsReduceShaped(&input, &output, dim)) {
        int order = input.order - 1;
        int * dimSize = new int[order];
        for (int i = 0; i < order; i++) {
            if (i < dim)
                dimSize[i] = input.dimSize[i];
            else if (i >= dim)
                dimSize[i] = input.dimSize[i + 1];
        }

        float dr = (!input.isSparse) ? 1.0F : input.denseRatio;
        InitTensorV2(&output, order, dimSize, input.dataType, dr, input.devID, input.mem);

        /* destroy variables */
        delete[] dimSize;
    }

    /* call _ReduceSumSquared function */
    _ReduceSumSquared(&input, &output, dim, &shift);

    if (input.enableGrad) {
        /* tensor connections */
        XLink::MakeLink(&input, &shift, &output, REDUCE_REDUCESUMSQUARED);
        XLink::AddParamToHeadInt(&output, dim);
    }
}

128
 } // namespace nts(NiuTrans.Tensor)