/* NiuTrans.Tensor - an open-source tensor library
 * Copyright (C) 2017, Natural Language Processing Lab, Northestern University. 
 * All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *   http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

/*
* $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-04-24
*/

#include "ScaleAndShift.h"
#include "ScaleAndShift.cuh"

namespace nts{ // namespace nts(NiuTrans.Tensor)

/* 
scale and shift all tensor entires

p = p * scale + shift

>> a - the tensor
>> scale - the scaler factor
>> shift - the shift factor
*/
void ScaleAndShift(XTensor * a, DTYPE scale, DTYPE shift)
{
#ifdef USE_CUDA
    /* run it on GPUs */
    if(a->devID >= 0){
        CudaScaleAndShift(a, scale, shift);
        return;
    }
#endif

    CheckNTErrors((a->dataType == DEFAULT_DTYPE),
                        "The tensor is not in the default data type!");

    /* sparse tensor */
    if(a->isSparse){
        int num = a->GetNonzeroSize();
        char * d = (char*)a->data + sizeof(int);
        char * f = d + (sizeof(int) + sizeof(DTYPE)) * 0 + sizeof(int);
        for(int i = 0; i < num; i++){
            DTYPE * v = (DTYPE*)f;
            *v = *v * scale + shift;
            f += sizeof(int) + sizeof(DTYPE);
        }
    }
    /* dense tensor */
    else{
        DTYPE * v = (DTYPE*)a->data;
        for(int i = 0; i < a->unitNum; i++){
            *v = *v * scale + shift;
            v++;
        }
    }
}

} // namespace nts(NiuTrans.Tensor)