Commit cf149d57 by xiaotong

correct unsafe code

parent 5546a260
......@@ -193,7 +193,7 @@ void SumDimTest()
y.SetZeroAll();
z.SetZeroAll();
float * data = new float[x.unitNum];
DTYPE * data = new DTYPE[x.unitNum];
for(int i = 0; i < x.unitNum; i++)
data[i] = (DTYPE)i;
......
......@@ -154,6 +154,7 @@ void XShapeGrad::GradMerge(XTensor * node, bool isEfficent)
XNoder::MakeGrad(input);
int * dims = new int[input->order];
memset(dims, 0, sizeof(int) * input->order);
for(int i = 0, j = 0; i < input->order; i++){
if(i >= leadDim){
dims[j++] = input->dimSize[i];
......
......@@ -1006,7 +1006,7 @@ void T2TTrainer::Update(T2TModel * model, const float lr)
XTensor * para = (XTensor*)ws.Get(i);
XTensor * paraGrad = para->grad;
if (para == NULL || paraGrad == NULL)
if (paraGrad == NULL)
continue;
CheckNTErrors(para != NULL, "NULL parameter tensor!");
......
......@@ -524,7 +524,7 @@ int XDevManager::GetDeviceIDs(char * devInfo, int * devIDs)
for(int i = 0; i < terms->count; i++){
int devC, devID;
char dev[32];
char dev[32] = "";
char * curDevInfo = (char*)terms->GetItem(i);
if(sscanf(curDevInfo, "%d:%s", &devC, dev) < 2){
......
......@@ -1278,7 +1278,7 @@ int XTensor::GetNonzeroSize()
if(dataType == DEFAULT_DTYPE){
int count = 0;
for(int i = 0; i < unitNum; i++){
DTYPE value = *((DTYPE*)(char*)data + i * sizeof(DTYPE));
DTYPE value = *(DTYPE*)((char*)data + i * sizeof(DTYPE));
if(value == 0)
count++;
}
......@@ -2261,6 +2261,8 @@ XTensor * NewTensor(const XTensor * a, bool isFilledData)
CheckNTErrors((a != NULL), "Empty input!");
memset(dims, 0, sizeof(int) * MAX_TENSOR_DIM_NUM);
if(a->order > 0)
memcpy(dims, a->dimSize, sizeof(int) * a->order);
......
......@@ -463,7 +463,7 @@ void _SetDataWithOffset(XTensor * tensor, MTYPE * offsets, DTYPE value, MTYPE nu
#ifdef USE_CUDA
XMem * mem = tensor->mem;
MTYPE size = num * sizeof(MTYPE);
MTYPE * offsetsCuda = mem != NULL ? (MTYPE*)mem->AllocBuf(mem->devID, size) : (MTYPE*)XMemAlloc(mem->devID, size);
MTYPE * offsetsCuda = mem != NULL ? (MTYPE*)mem->AllocBuf(mem->devID, size) : (MTYPE*)XMemAlloc(tensor->devID, size);
XMemCopy(offsetsCuda, tensor->devID, offsets, -1, num * sizeof(MTYPE));
_CudaSetDataWithOffset(tensor, offsetsCuda, value, num);
......@@ -471,7 +471,7 @@ void _SetDataWithOffset(XTensor * tensor, MTYPE * offsets, DTYPE value, MTYPE nu
if (mem != NULL)
mem->ReleaseBuf(mem->devID, size);
else
XMemFree(mem->devID, offsetsCuda);
XMemFree(tensor->devID, offsetsCuda);
#else
ShowNTErrors("Please recompile the code with USE_CUDA");
#endif
......
......@@ -46,9 +46,9 @@ void _CopyBlocks(void * source, int blockSize, int blockNum, void * target, int
#ifdef USE_CUDA
/* copy the index from host to device */
int * targetBlocksTMP = myMem != NULL ?
(int*)myMem->AllocBuf(myMem->devID, blockNum * sizeof(int)):
(int*)myMem->AllocBuf(devID, blockNum * sizeof(int)):
(int*)XMemAlloc(devID, blockNum * sizeof(int));
XMemCopy(targetBlocksTMP, myMem->devID, targetBlocks, -1, blockNum * sizeof(int));
XMemCopy(targetBlocksTMP, devID, targetBlocks, -1, blockNum * sizeof(int));
_CopyBlocksOnSite(source, blockSize, blockNum, target, targetBlocksTMP, devID);
......
......@@ -59,7 +59,7 @@ void _CopyBlocksInGrid(void * source, int blockSize, int blockNum, int gridNum,
ShowNTErrors("Plesae specify USE_CUDA and recompile the code!");
#endif
}
else {
else if(myMem != NULL){
void * buf = XMemAlloc(myMem->devID, blockSize * blockNum * unitSize);
for (int k = 0; k < gridNum; k++) {
int offset = k * blockSize * blockNum;
......@@ -78,6 +78,9 @@ void _CopyBlocksInGrid(void * source, int blockSize, int blockNum, int gridNum,
}
XMemFree(myMem->devID, buf);
}
else {
ShowNTErrors("TODO!");
}
}
} // namespace nts(NiuTrans.Tensor)
\ No newline at end of file
......@@ -43,6 +43,8 @@ void _ReduceMax(const XTensor * input, XTensor * output, int dim)
CheckNTErrors((input->dataType == output->dataType), "Unmatched data types!");
int dimRDI = input->order - dim - 1;
CheckNTErrors(dimRDI >= 0, "Wrong dimension!");
for(int i = 0; i < input->order; i++){
if(i < dimRDI){
CheckNTErrors((input->dimSizeRDI[i] == output->dimSizeRDI[i]),
......
......@@ -51,6 +51,8 @@ void _ReduceSum(const XTensor * input, XTensor * output, int dim, const XTensor
CheckNTErrors((shift == NULL || XTensor::IsSameShaped(output, shift)), "Incorrect shift tensor size!");
int dimRDI = input->order - dim - 1;
CheckNTErrors(dimRDI >= 0, "Wrong dimension!");
for(int i = 0; i < input->order; i++){
if(i < dimRDI){
CheckNTErrors((input->dimSizeRDI[i] == output->dimSizeRDI[i]), "Unmatched tensors!");
......
......@@ -126,7 +126,7 @@ void _Split(const XTensor * s, XTensor * t, int whereToSplit, int splitNum)
void * dataTMP = t->data;
if (!isOnSameDevice)
dataTMP = mem != NULL ? mem->AllocBuf(mem->devID, size) : XMemAlloc(mem->devID, size);
dataTMP = mem != NULL ? mem->AllocBuf(mem->devID, size) : XMemAlloc(s->devID, size);
int realBlockSize = blockSize * t->unitSize;
int blockSplitSize = blockNum / splitNum;
......
......@@ -34,7 +34,7 @@ rectify function y = max(0, x)
void _Rectify(const XTensor * x, XTensor * y)
{
#ifdef USE_CUDA
if(y->devID >= 0 || y->devID >= 0){
if(x->devID >= 0 || y->devID >= 0){
_CudaRectify(x, y);
return;
}
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论