Commit aac1ad5c by xiaotong

code of attention, fnn, layer normalization untis

parent 005739bc
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include "T2TAttention.h" #include "T2TAttention.h"
#include "T2TUtility.h" #include "T2TUtility.h"
#include "../../tensor/core/CHeader.h"
namespace transformer namespace transformer
{ {
...@@ -59,9 +60,57 @@ void T2TAttention::InitModel(int argc, const char ** argv, int myDevID, XMem * m ...@@ -59,9 +60,57 @@ void T2TAttention::InitModel(int argc, const char ** argv, int myDevID, XMem * m
LoadParamInt(argc, argv, "d", &d, 512); LoadParamInt(argc, argv, "d", &d, 512);
LoadParamFloat(argc, argv, "attminmax", &minmax, 0.08F); LoadParamFloat(argc, argv, "attminmax", &minmax, 0.08F);
InitTensor2D(&w, 3 * d, 2 * dk + dv, X_FLOAT, devID, mem); InitTensor2D(&wk, d, dk, X_FLOAT, devID, mem);
InitTensor2D(&wq, d, dk, X_FLOAT, devID, mem);
InitTensor2D(&wv, d, dv, X_FLOAT, devID, mem);
w.SetDataRand(-minmax, minmax); wk.SetDataRand(-minmax, minmax);
wq.SetDataRand(-minmax, minmax);
wv.SetDataRand(-minmax, minmax);
}
/*
make the network
>> k - keys. It might be of size B * L * H
where B = batch size, L = sequence length,
and H = vector size of each position
>> q - queries
>> v - values
<< return - multi-attention result
*/
XTensor * T2TAttention::Make(XTensor * k, XTensor * q, XTensor * v)
{
XTensor k2;
XTensor q2;
XTensor v2;
/* linear transofmration before self-attention */
k2 = MMul(*k, wk);
q2 = MMul(*q, wq);
v2 = MMul(*v, wv);
XTensor kheads;
XTensor qheads;
XTensor vheads;
/* multi head */
kheads = Split(k2, k2.order - 1, nhead);
qheads = Split(q2, q2.order - 1, nhead);
vheads = Split(v2, v2.order - 1, nhead);
XTensor att;
XTensor scalar;
/* scalar = softmax(Q * K^T / sqrt(dk)) * V */
scalar = Softmax(Linear(BMMul(qheads, X_NOTRANS, kheads, X_TRANS), 1/sqrt((float)dk)), -1);
att = MMul(scalar, vheads);
XTensor * result = new XTensor();
/* concatenate the heads */
*result = Merge(att, -1);
return result;
} }
} }
...@@ -48,8 +48,14 @@ public: ...@@ -48,8 +48,14 @@ public:
/* head number */ /* head number */
int nhead; int nhead;
/* transformation matrix */ /* transformation matrix for K */
XTensor w; XTensor wk;
/* transformation matrix for Q */
XTensor wq;
/* transformation matrix for V */
XTensor wv;
/* size of transformed Q and K */ /* size of transformed Q and K */
int dk; int dk;
...@@ -69,6 +75,9 @@ public: ...@@ -69,6 +75,9 @@ public:
/* initialize the model */ /* initialize the model */
void InitModel(int argc, const char ** argv, int myDevID = -1, XMem * myMem = NULL); void InitModel(int argc, const char ** argv, int myDevID = -1, XMem * myMem = NULL);
/* make the network */
XTensor * Make(XTensor * k, XTensor * q, XTensor * v);
}; };
} }
......
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2018, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (xiaotong@mail.neu.edu.cn) 2018-08-01
*/
#include "T2TEmbedding.h"
#include "T2TUtility.h"
#include "../../tensor/core/CHeader.h"
namespace transformer
{
/* constructor */
T2TEmbedder::T2TEmbedder()
{
devID = -1;
mem = NULL;
vSize = -1;
}
/* deconstructor */
T2TEmbedder::~T2TEmbedder()
{
}
/*
initialize the model
>> argc - number of arguments
>> argv - list pf pointers to the arguments
>> myDevID - device id
>> myMem - the memory pool
*/
void T2TEmbedder::InitModel(int argc, const char ** argv, int myDevID, XMem * myMem)
{
devID = myDevID;
mem = myMem;
float minmax = 0;
int maxLength = 0;
int d = 0;
LoadParamInt(argc, argv, "vsize", &vSize, -1);
LoadParamInt(argc, argv, "maxlen", &maxLength, 256);
LoadParamInt(argc, argv, "d", &d, 256);
InitTensor2D(&w, vSize, eSize, X_FLOAT, devID, mem);
w.SetDataRandn(0, sqrt((float)eSize));
/* create the positional embedding matrix */
MakePosEmbedding(eSize, d, maxLength);
}
/*
make positional embeddings (of size eSize * length
eSize - embedding size
length - length of the sequenc
*/
void T2TEmbedder::MakePosEmbedding(int eSize, int d, int length)
{
InitTensor2D(&posEmbedding, length, eSize, X_FLOAT, devID, mem);
float * data = new float[posEmbedding.unitNum];
for(int pos = 0; pos < length; pos++){
float * dp = data + pos * eSize;
for(int k = 0; k < eSize; k++){
if(k % 2 == 0){
int i = k/2;
dp[k] = sin(pos/pow(10000.0F, 2.0F*i/d));
}
else{
int i = (k - 1)/2;
dp[k] = cos(pos/pow(10000.0F, 2.0F*i/d));
}
}
}
posEmbedding.SetData(data, posEmbedding.unitNum);
delete[] data;
}
/*
make the network
*/
XTensor * T2TEmbedder::Make(XTensor * input)
{
return NULL;
}
}
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2018, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (xiaotong@mail.neu.edu.cn) 2018-08-01
*/
#ifndef __T2TEMBEDDING_H__
#define __T2TEMBEDDING_H__
#include "../../network/XNet.h"
using namespace nts;
namespace transformer
{
/*
embedding (of word at position i):
word embedding + positional embedding
*/
class T2TEmbedder
{
public:
/* device id */
int devID;
/* memory pool */
XMem * mem;
/* vocabulary size */
int vSize;
/* embedding size */
int eSize;
/* word embedding matrix */
XTensor w;
/* predefined positional embeddings. It can speeds up
the embedding processing by re-loading. */
XTensor posEmbedding;
public:
/* constructor */
T2TEmbedder();
/* de-constructor */
~T2TEmbedder();
/* initialize the model */
void InitModel(int argc, const char ** argv, int myDevID = -1, XMem * myMem = NULL);
/* make positional embeddings */
void MakePosEmbedding(int eSize, int d, int length);
/* make the network */
XTensor * Make(XTensor * input);
};
}
#endif
...@@ -21,7 +21,9 @@ ...@@ -21,7 +21,9 @@
#include <math.h> #include <math.h>
#include "T2TEncoder.h" #include "T2TEncoder.h"
#include "T2TLayerNormal.h"
#include "T2TUtility.h" #include "T2TUtility.h"
#include "../../tensor/core/CHeader.h"
namespace transformer namespace transformer
{ {
...@@ -34,6 +36,9 @@ AttEncoder::AttEncoder() ...@@ -34,6 +36,9 @@ AttEncoder::AttEncoder()
/* de-constructor */ /* de-constructor */
AttEncoder::~AttEncoder() AttEncoder::~AttEncoder()
{ {
delete[] attentions;
delete[] fnns;
delete[] layerNorms;
} }
/* /*
...@@ -57,9 +62,19 @@ void AttEncoder::InitModel(int argc, const char ** argv, int myDevID, XMem * myM ...@@ -57,9 +62,19 @@ void AttEncoder::InitModel(int argc, const char ** argv, int myDevID, XMem * myM
CheckNTErrors(nlayer > 1, "We have one encoding layer at least!"); CheckNTErrors(nlayer > 1, "We have one encoding layer at least!");
CheckNTErrors(vSize > 1, "set vocabulary size by \"-vsize\""); CheckNTErrors(vSize > 1, "set vocabulary size by \"-vsize\"");
/* embedding matrix */ /* embedding model */
InitTensor2D(&embeddingW, vSize, eSize, X_FLOAT, devID, mem); embedder.InitModel(argc, argv, devID, mem);
embeddingW.SetDataRandn(0, sqrt((float)eSize));
attentions = new T2TAttention[nlayer];
fnns = new T2TFNN[nlayer];
layerNorms = new T2TLN[nlayer];
/* initialize the stacked layers */
for(int i = 0; i < nlayer; i++){
attentions[i].InitModel(argc, argv, myDevID, myMem);
fnns[i].InitModel(argc, argv, myDevID, myMem);
layerNorms[i].InitModel(argc, argv, myDevID, myMem);
}
} }
/* /*
...@@ -69,7 +84,44 @@ make the encoding network ...@@ -69,7 +84,44 @@ make the encoding network
*/ */
XTensor * AttEncoder::Make(XTensor * input) XTensor * AttEncoder::Make(XTensor * input)
{ {
return NULL; XTensor * x = embedder.Make(input);
for(int i = 0; i < nlayer; i++){
XTensor * att;
XTensor * res;
XTensor * ln;
XTensor * fnn;
/* self attention */
att = attentions[i].Make(x, x, x);
/* residual connection */
res = &Sum(*att, *x);
/* TODO: dropout */
/* layer normalization */
ln = layerNorms[i].Make(res);
/* input of next layer */
x = ln;
/* fnn */
fnn = fnns[i].Make(x);
/* residual connection */
res = &Sum(*fnn, *x);
/* TODO: dropout */
/* layer normalization */
ln = layerNorms[i].Make(res);
/* input of next layer */
x = ln;
}
return x;
} }
} }
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#include "T2TFNN.h" #include "T2TFNN.h"
#include "T2TAttention.h" #include "T2TAttention.h"
#include "T2TEmbedding.h"
#include "T2TLayerNormal.h"
#include "../../network/XNet.h" #include "../../network/XNet.h"
using namespace nts; using namespace nts;
...@@ -75,8 +77,8 @@ public: ...@@ -75,8 +77,8 @@ public:
/* vocabulary size */ /* vocabulary size */
int vSize; int vSize;
/* word embedding matrix */ /* embedding of word at each position */
XTensor embeddingW; T2TEmbedder embedder;
/* FNN model of each layer */ /* FNN model of each layer */
T2TFNN * fnns; T2TFNN * fnns;
...@@ -84,6 +86,9 @@ public: ...@@ -84,6 +86,9 @@ public:
/* attention model of each layer */ /* attention model of each layer */
T2TAttention * attentions; T2TAttention * attentions;
/* layer normalization */
T2TLN * layerNorms;
/* input tensor of the encoder */ /* input tensor of the encoder */
XTensor * input; XTensor * input;
......
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
#include "T2TFNN.h" #include "T2TFNN.h"
#include "T2TUtility.h" #include "T2TUtility.h"
#include "../../tensor/core/CHeader.h"
#include "../../tensor/function/FHeader.h"
namespace transformer namespace transformer
{ {
...@@ -69,5 +71,25 @@ void T2TFNN::InitModel(int argc, const char ** argv, int myDevID, XMem * myMem) ...@@ -69,5 +71,25 @@ void T2TFNN::InitModel(int argc, const char ** argv, int myDevID, XMem * myMem)
b2.SetDataRand(-minmax, minmax); b2.SetDataRand(-minmax, minmax);
} }
/*
make the network
y = max(0, x * w1 + b1) * w2 + b2
>> input - the input tensor
>> return - the output tensor
*/
XTensor * T2TFNN::Make(XTensor * input)
{
XTensor t1;
XTensor * result = new XTensor();
/* t1 = max(0, x * w1 + b1) */
t1 = Rectify(MMul(*input, X_NOTRANS, w1, X_NOTRANS) + b1);
/* result = t1 * w2 + b2 */
*result = MMul(t1, X_NOTRANS, w2, X_NOTRANS) + b2;
return result;
}
} }
...@@ -71,6 +71,9 @@ public: ...@@ -71,6 +71,9 @@ public:
/* initialize the model */ /* initialize the model */
void InitModel(int argc, const char ** argv, int myDevID = -1, XMem * myMem = NULL); void InitModel(int argc, const char ** argv, int myDevID = -1, XMem * myMem = NULL);
/* make the network */
XTensor * Make(XTensor * input);
}; };
} }
......
...@@ -49,7 +49,13 @@ void T2TLN::InitModel(int argc, const char ** argv, int myDevID, XMem * myMem) ...@@ -49,7 +49,13 @@ void T2TLN::InitModel(int argc, const char ** argv, int myDevID, XMem * myMem)
mem = myMem; mem = myMem;
} }
/* make the network */ /*
make the network
for each layer representation x, we have
y =
>> input - the input tensor
>> return - layer normalization output
*/
XTensor * T2TLN::Make(XTensor * input) XTensor * T2TLN::Make(XTensor * input)
{ {
return NULL; return NULL;
......
...@@ -19,8 +19,8 @@ ...@@ -19,8 +19,8 @@
* $Created by: XIAO Tong (xiaotong@mail.neu.edu.cn) 2018-07-31 * $Created by: XIAO Tong (xiaotong@mail.neu.edu.cn) 2018-07-31
*/ */
#ifndef __T2TFNN_H__ #ifndef __T2TLAYERNORMAL_H__
#define __T2TFNN_H__ #define __T2TLAYERNORMAL_H__
#include "../../network/XNet.h" #include "../../network/XNet.h"
......
...@@ -30,21 +30,35 @@ namespace transformer ...@@ -30,21 +30,35 @@ namespace transformer
/* constructor */ /* constructor */
T2TModel::T2TModel() T2TModel::T2TModel()
{ {
devID = -1;
mem = NULL;
isLM = false; isLM = false;
isMT = false;
} }
/* de-constructor */ /* de-constructor */
T2TModel::~T2TModel() T2TModel::~T2TModel()
{ {
delete mem;
} }
/* initialize the model */ /* initialize the model */
void T2TModel::InitModel(int argc, const char ** argv) void T2TModel::InitModel(int argc, const char ** argv)
{ {
bool useMem = false;
LoadParamInt(argc, argv, "dev", &devID, -1);
LoadParamBool(argc, argv, "mem", &useMem, useMem);
LoadParamBool(argc, argv, "lm", &isLM, true); LoadParamBool(argc, argv, "lm", &isLM, true);
LoadParamBool(argc, argv, "mt", &isMT, false); LoadParamBool(argc, argv, "mt", &isMT, false);
encoder.InitModel(argc, argv); if(useMem){
delete mem;
mem = new XMem(devID);
}
encoder.InitModel(argc, argv, devID, mem);
outputLayer.InitModel(argc, argv, devID, mem);
} }
} }
\ No newline at end of file
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "T2TAttention.h" #include "T2TAttention.h"
#include "T2TEncoder.h" #include "T2TEncoder.h"
#include "T2TDecoder.h" #include "T2TDecoder.h"
#include "T2TOutput.h"
namespace transformer namespace transformer
{ {
...@@ -33,12 +34,21 @@ namespace transformer ...@@ -33,12 +34,21 @@ namespace transformer
class T2TModel class T2TModel
{ {
public: public:
/* device id */
int devID;
/* memory pool */
XMem * mem;
/* the encoder */ /* the encoder */
AttEncoder encoder; AttEncoder encoder;
/* the decoder */ /* the decoder */
AttDecoder decoder; AttDecoder decoder;
/* output layer */
T2TOutput outputLayer;
/* indicates whether the model is running for language modeling */ /* indicates whether the model is running for language modeling */
bool isLM; bool isLM;
......
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2018, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (xiaotong@mail.neu.edu.cn) 2018-07-31
*/
#include "T2TOutput.h"
#include "T2TUtility.h"
#include "../../tensor/core/CHeader.h"
namespace transformer
{
/* constructor */
T2TOutput::T2TOutput()
{
devID = -1;
mem = NULL;
vSize = -1;
inSize = -1;
hSize = -1;
}
/* de-constructor */
T2TOutput::~T2TOutput()
{
}
/*
initialize the model
>> argc - number of arguments
>> argv - list pf pointers to the arguments
>> myDevID - device id
>> myMem - the memory pool
*/
void T2TOutput::InitModel(int argc, const char ** argv, int myDevID, XMem * myMem)
{
devID = myDevID;
mem = myMem;
LoadParamInt(argc, argv, "vsize", &vSize, -1);
LoadParamInt(argc, argv, "hsize", &inSize, 512);
LoadParamInt(argc, argv, "hsize", &hSize, 512);
}
/*
make the network
y = softmax(x * w)
>> input - input tensor
<< return - output tensor
*/
XTensor * T2TOutput::Make(XTensor * input)
{
XTensor &x = *input;
XTensor * result = new XTensor();
*result = LogSoftmax(MMul(x, w), -1);
return result;
}
}
\ No newline at end of file
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2018, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (xiaotong@mail.neu.edu.cn) 2018-07-31
*/
#ifndef __T2TOUTPUT_H__
#define __T2TOUTPUT_H__
#include "../../tensor/function/FHeader.h"
using namespace nts;
namespace transformer
{
/* output layer */
class T2TOutput
{
public:
/* device id */
int devID;
/* memory pool */
XMem * mem;
/* vocabulary size */
int vSize;
/* input vector size */
int inSize;
/* vector size of the linear transformation */
int hSize;
/* transformation matrix */
XTensor w;
public:
/* constructor */
T2TOutput();
/* de-constructor */
~T2TOutput();
/* initialize the model */
void InitModel(int argc, const char ** argv, int myDevID = -1, XMem * myMem = NULL);
/* make the network */
XTensor * Make(XTensor * input);
};
}
#endif
\ No newline at end of file
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
namespace nts { // namespace nts(NiuTrans.Tensor) namespace nts { // namespace nts(NiuTrans.Tensor)
#define BMMul MatrixMulBatched
/* /*
matrix multiplication of the two tensors c = trans(a) * trans(b) * alpha + c * beta matrix multiplication of the two tensors c = trans(a) * trans(b) * alpha + c * beta
......
...@@ -44,6 +44,7 @@ void _Merge(const XTensor * s, XTensor * t, int whereToMerge, int leadingDim) ...@@ -44,6 +44,7 @@ void _Merge(const XTensor * s, XTensor * t, int whereToMerge, int leadingDim)
{ {
if(leadingDim < 0) if(leadingDim < 0)
leadingDim = 0; leadingDim = 0;
int whereToMergeRDI = s->order - whereToMerge - 1; int whereToMergeRDI = s->order - whereToMerge - 1;
int leadingDimRDI = s->order - leadingDim - 1; int leadingDimRDI = s->order - leadingDim - 1;
if (leadingDimRDI < 0) if (leadingDimRDI < 0)
......
...@@ -41,6 +41,9 @@ void _LogSoftmax(const XTensor * x, XTensor * y, int leadDim) ...@@ -41,6 +41,9 @@ void _LogSoftmax(const XTensor * x, XTensor * y, int leadDim)
CheckNTErrors(!x->isSparse && !y->isSparse, "TODO!"); CheckNTErrors(!x->isSparse && !y->isSparse, "TODO!");
CheckNTErrors(x && y, "Empty input tensors!"); CheckNTErrors(x && y, "Empty input tensors!");
if(leadDim < 0)
leadDim = x->order - 1;
if(y->dimSize[leadDim] == 1){ if(y->dimSize[leadDim] == 1){
y->SetZeroAll(); y->SetZeroAll();
return; return;
...@@ -71,6 +74,11 @@ void _LogSoftmax(const XTensor * x, XTensor * y, int leadDim) ...@@ -71,6 +74,11 @@ void _LogSoftmax(const XTensor * x, XTensor * y, int leadDim)
int blockSize = 1; int blockSize = 1;
int blockNum = 1; int blockNum = 1;
for (int i = 0; i < leadDimRDI; i++)
stride *= y->dimSizeRDI[i];
blockSize = stride * dimensionSize;
blockNum = y->unitNum / blockSize;
max = NewTensorBuf(x->order - 1, dimSize, x->dataType, x->denseRatio, x->devID, mem); max = NewTensorBuf(x->order - 1, dimSize, x->dataType, x->denseRatio, x->devID, mem);
sum = NewTensorBuf(x->order - 1, dimSize, x->dataType, x->denseRatio, x->devID, mem); sum = NewTensorBuf(x->order - 1, dimSize, x->dataType, x->denseRatio, x->devID, mem);
...@@ -87,10 +95,6 @@ void _LogSoftmax(const XTensor * x, XTensor * y, int leadDim) ...@@ -87,10 +95,6 @@ void _LogSoftmax(const XTensor * x, XTensor * y, int leadDim)
blockSum = NewTensor2D(blockSize/dimensionSize, -1, x->dataType, x->devID, mem); blockSum = NewTensor2D(blockSize/dimensionSize, -1, x->dataType, x->devID, mem);
} }
else{ else{
for (int i = 0; i < leadDimRDI; i++)
stride *= y->dimSizeRDI[i];
blockSize = stride * dimensionSize;
blockNum = y->unitNum / blockSize;
blockx = NewTensor2D(-stride, dimensionSize, x->dataType, x->devID, mem); blockx = NewTensor2D(-stride, dimensionSize, x->dataType, x->devID, mem);
blocky = NewTensor2D(-stride, dimensionSize, x->dataType, x->devID, mem); blocky = NewTensor2D(-stride, dimensionSize, x->dataType, x->devID, mem);
blockMax = NewTensor2D(-stride, 1, x->dataType, x->devID, mem); blockMax = NewTensor2D(-stride, 1, x->dataType, x->devID, mem);
......
...@@ -37,6 +37,9 @@ softmax y = e^x / \sum_{i} e^{x_i} ...@@ -37,6 +37,9 @@ softmax y = e^x / \sum_{i} e^{x_i}
*/ */
void _Softmax(const XTensor * x, XTensor * y, int leadDim) void _Softmax(const XTensor * x, XTensor * y, int leadDim)
{ {
if(leadDim < 0)
leadDim = x->order - 1;
int leadDimRDI = x->order - leadDim - 1; int leadDimRDI = x->order - leadDim - 1;
if(!x->isSparse && !y->isSparse && x->dataType == y->dataType){ if(!x->isSparse && !y->isSparse && x->dataType == y->dataType){
int * dimSize = new int[x->order - 1]; int * dimSize = new int[x->order - 1];
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论