Skip to content

Commit 2130070

Browse files
gchanansoumith
authored andcommitted
Handle copying empty sparse tensors to/from CPU, GPU. (pytorch#5361)
* Handle copying empty sparse tensors to/from CPU, GPU. This is likely not a robust fix because it special cases the case where both the indices and values are empty rather than handling each one separately. But this is currently blocking a change introducing devices to constructors. * Guard sizes being NULL.
1 parent 6c587e9 commit 2130070

File tree

3 files changed

+45
-22
lines changed

3 files changed

+45
-22
lines changed

aten/src/THCS/generic/THCSTensor.cpp

+9-2
Original file line numberDiff line numberDiff line change
@@ -146,8 +146,15 @@ THCSTensor *THCSTensor_(newWithTensorAndSize)(THCState *state, THCIndexTensor *i
146146
THCSTensor *self = (THCSTensor *)THAlloc(sizeof(THCSTensor));
147147
THCSTensor_(rawInit)(state, self);
148148

149-
nDimI = THCIndexTensor_(size)(state, indices, 0);
150-
nDimV = THCTensor_(nDimension)(state, values) - 1;
149+
// TODO: we may need to special case when only one of these are empty.
150+
if (THCudaLongTensor_nDimension(state, indices) == 0 && THCTensor_(nDimension)(state, values) == 0
151+
&& sizes != NULL) {
152+
nDimI = 0;
153+
nDimV = THLongStorage_size(sizes);
154+
} else {
155+
nDimI = THCIndexTensor_(size)(state, indices, 0);
156+
nDimV = THCTensor_(nDimension)(state, values) - 1;
157+
}
151158
if (!sizes) {
152159
// TODO Make it work with N-dimensional values
153160
THArgCheck(nDimV > 0, 3, "size must be provided when nDimV > 0");

aten/src/THS/generic/THSTensor.cpp

+29-20
Original file line numberDiff line numberDiff line change
@@ -146,8 +146,14 @@ THSTensor *THSTensor_(newWithTensorAndSize)(THLongTensor *indices, THTensor *val
146146
THSTensor *self = (THSTensor *)THAlloc(sizeof(THSTensor));
147147
THSTensor_(rawInit)(self);
148148

149-
nDimI = THLongTensor_size(indices, 0);
150-
nDimV = THTensor_(nDimension)(values) - 1;
149+
// TODO: we may need to special case when only one of these are empty.
150+
if (THLongTensor_nDimension(indices) == 0 && THTensor_(nDimension)(values) == 0 && sizes != NULL) {
151+
nDimI = 0;
152+
nDimV = THLongStorage_size(sizes);
153+
} else {
154+
nDimI = THLongTensor_size(indices, 0);
155+
nDimV = THTensor_(nDimension)(values) - 1;
156+
}
151157
if (!sizes) {
152158
ignore = THLongTensor_new();
153159
THLongTensor *computed_indices_sizes = THLongTensor_new();
@@ -169,27 +175,30 @@ THSTensor *THSTensor_(newWithTensorAndSize)(THLongTensor *indices, THTensor *val
169175
THArgCheck(THLongStorage_size(sizes) == nDimI + nDimV, 2,
170176
"number of dimensions must be nDimI + nDimV");
171177

172-
THLongTensor *max_indices = THLongTensor_new();
173-
ignore = THLongTensor_new();
174-
THLongTensor_max(max_indices, ignore, indices, 1, 0);
175-
THLongTensor_free(ignore);
176-
for (int d = 0; d < nDimI; d++) {
177-
int64_t max_index_in_dim = THTensor_fastGet1d(max_indices, d);
178-
int64_t dim_size = sizes->data[d];
179-
THArgCheck(max_index_in_dim <= dim_size, 2,
180-
"sizes is inconsistent with indices: for dim %d, size is %lld but found index %lld",
181-
d, (long long)dim_size, (long long)max_index_in_dim);
182-
}
183-
for (int d = 0; d < nDimV; d++) {
184-
int64_t values_size = THTensor_(size)(values, d + 1);
185-
int64_t specified_size = sizes->data[nDimI + d];
186-
THArgCheck(values_size <= specified_size, 2,
187-
"values and sizes are inconsistent: sizes[%d] is %lld but values.size(%d) is %lld",
188-
d + nDimI, (long long)specified_size, d + 1, (long long)values_size);
178+
// TODO: we may need to special case when only one of these are empty.
179+
if (!(THLongTensor_nDimension(indices) == 0 && THTensor_(nDimension)(values) == 0 && sizes != NULL)) {
180+
THLongTensor *max_indices = THLongTensor_new();
181+
ignore = THLongTensor_new();
182+
THLongTensor_max(max_indices, ignore, indices, 1, 0);
183+
THLongTensor_free(ignore);
184+
for (int d = 0; d < nDimI; d++) {
185+
int64_t max_index_in_dim = THTensor_fastGet1d(max_indices, d);
186+
int64_t dim_size = sizes->data[d];
187+
THArgCheck(max_index_in_dim <= dim_size, 2,
188+
"sizes is inconsistent with indices: for dim %d, size is %lld but found index %lld",
189+
d, (long long)dim_size, (long long)max_index_in_dim);
190+
}
191+
for (int d = 0; d < nDimV; d++) {
192+
int64_t values_size = THTensor_(size)(values, d + 1);
193+
int64_t specified_size = sizes->data[nDimI + d];
194+
THArgCheck(values_size <= specified_size, 2,
195+
"values and sizes are inconsistent: sizes[%d] is %lld but values.size(%d) is %lld",
196+
d + nDimI, (long long)specified_size, d + 1, (long long)values_size);
197+
}
198+
THLongTensor_free(max_indices);
189199
}
190200

191201
THSTensor_(rawResize)(self, nDimI, nDimV, THLongStorage_data(sizes));
192-
THLongTensor_free(max_indices);
193202
}
194203
// NB: by default, we do NOT clone indices/values into the sparse tensor.
195204
// Efficient API by default!

test/test_sparse.py

+7
Original file line numberDiff line numberDiff line change
@@ -339,6 +339,13 @@ def test_clone(self):
339339
y = x.clone()
340340
self.assertTrue(y.is_coalesced())
341341

342+
@cuda_only
343+
def test_cuda_empty(self):
344+
from torch.autograd import Variable
345+
x = Variable(torch.sparse.FloatTensor(2, 3, 4))
346+
y = x.cuda(0)
347+
x.cpu()
348+
342349
def test_transpose(self):
343350
x = self._gen_sparse(4, 20, 5)[0]
344351
y = self.safeToDense(x)

0 commit comments

Comments
 (0)