Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Slice and Unsqueeze ops bug fixes in v5.0 #293

Open
wants to merge 1 commit into
base: v5.0
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 14 additions & 8 deletions builtin_op_importers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -927,7 +927,7 @@ DEFINE_BUILTIN_OP_IMPORTER(Dropout) {
{
RETURN_IDENTITY(inputs.at(0));
}
else
else
{
// Return both Dropout outputs: (output + mask)
std::vector<TensorOrWeights> outputs;
Expand Down Expand Up @@ -1246,7 +1246,7 @@ DEFINE_BUILTIN_OP_IMPORTER(MaxPool) {

// TODO: Since TensorRT only 0 pads, need to do an elementwise ADD with
// -INFINITY on the padded dimensions to ensure max pooling functions as expected.
// This negatively impacts performance. Update when non-zero padding is supported in a
// This negatively impacts performance. Update when non-zero padding is supported in a
// future TRT version.

nvinfer1::Dims padded_dims = tensor_ptr->getDimensions();
Expand Down Expand Up @@ -1562,7 +1562,7 @@ DEFINE_BUILTIN_OP_IMPORTER(Reshape) {
new_shape = set_dims_CHW(remove_dim(new_shape, BATCH_DIM));
// Check for -1 dimension in new shape
TRT_CHECK(get_infer_dim(infer_dim,new_shape));

if (infer_dim < 0) {
ASSERT(get_shape_size(new_shape) ==
get_shape_size(tensor.getDimensions()),
Expand Down Expand Up @@ -1720,17 +1720,17 @@ DEFINE_BUILTIN_OP_IMPORTER(Slice) {
// TRT only supports slicing HW dims when using padding layer,
// so if user wants to slice some other axis, we check whether
// slice contains full dimension
if (axes[i] != nbDims-1 && axes[i] != nbDims)
if (axis != nbDims-2 && axis != nbDims-1)
{
ASSERT((ends[i] - starts[i]) == dims.d[axis], ErrorCode::kUNSUPPORTED_NODE);
}
else
{
if (axes[i] == nbDims-1)
if (axis == nbDims-2)
{
H_idx = i;
}
else if (axes[i] == nbDims)
else if (axis == nbDims-1)
{
W_idx = i;
}
Expand Down Expand Up @@ -2054,19 +2054,25 @@ DEFINE_BUILTIN_OP_IMPORTER(Unsqueeze) {
int ndim_in = old_shape.nbDims;
OnnxAttrs attrs(node);
auto axes = attrs.get<std::vector<int>>("axes");

std::set<int> axes_set_tmp(axes.begin(), axes.end());
int ndim_out = ndim_in + axes_set_tmp.size();

// If the input was already a tensor, then we're dealing with a TRT shape,
// so subtract 1 from the axes. Otherwise, this is an ONNX shape.
if (inputs.at(0).is_tensor())
{
for (auto& axis : axes)
{
ASSERT(axis != BATCH_DIM, ErrorCode::kUNSUPPORTED_NODE);
--axis;
convert_axis(axis, ndim_out);
// convert_axis already subtracts batch dimension
//--axis;
}
}

std::set<int> axes_set(axes.begin(), axes.end());
int ndim_out = ndim_in + axes_set.size();
//int ndim_out = ndim_in + axes_set.size();
ASSERT(ndim_out <= nvinfer1::Dims::MAX_DIMS, ErrorCode::kUNSUPPORTED_NODE);
nvinfer1::Dims new_shape;
new_shape.nbDims = ndim_out;
Expand Down