Skip to content

Commit 041edee

Browse files
asdfghjklxdpytorchmergebot
authored andcommitted
Fix several typos (pytorch#83823)
Fixes #ISSUE_NUMBER Pull Request resolved: pytorch#83823 Approved by: https://github.com/ngimel, https://github.com/kit1980
1 parent 7a348a1 commit 041edee

File tree

3 files changed

+5
-8
lines changed

3 files changed

+5
-8
lines changed

aten/src/ATen/Dispatch.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -200,7 +200,7 @@ inline void deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF_AND_COMPLEX() {}
200200
// conditionally compile fragments of the case statements such
201201
// that the kernel functions are specialized only for the dtypes
202202
// that are needed. The NAME parameter *must* be a build time
203-
// cons char* (can't be std::string, etc...)
203+
// const char* (can't be std::string, etc...)
204204
//
205205
// Please ensure that the NAME is unique for every implementation
206206
// or you run the risk of over-including code for the kernel

c10/core/ScalarType.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ struct ScalarTypeToCPPType;
103103
/* This is a workaround for the CUDA bug which prevents */ \
104104
/* ::detail::ScalarTypeToCType<T>::type being used directly due to */ \
105105
/* ambiguous reference which can't to be resolved. For some reason it */ \
106-
/* cant pick between at::detail and at::cuda::detail. */ \
106+
/* can't pick between at::detail and at::cuda::detail. */ \
107107
/* For repro example, please see: */ \
108108
/* https://gist.github.com/izdeby/952ae7cf256ddb740a73776d39a7e7ba */ \
109109
/* TODO: remove once the bug is fixed. */ \

torch/csrc/TypeInfo.cpp

+3-6
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ PyObject* THPDTypeInfo_compare(THPDTypeInfo* a, THPDTypeInfo* b, int op) {
108108

109109
static PyObject* THPDTypeInfo_bits(THPDTypeInfo* self, void*) {
110110
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers)
111-
int bits = elementSize(self->type) * 8;
111+
int64_t bits = elementSize(self->type) * 8;
112112
return THPUtils_packInt64(bits);
113113
}
114114

@@ -220,13 +220,10 @@ PyObject* THPFInfo_str(THPFInfo* self) {
220220
}
221221

222222
PyObject* THPIInfo_str(THPIInfo* self) {
223-
auto type = self->type;
224-
std::string primary_name, legacy_name;
225-
std::tie(primary_name, legacy_name) = torch::utils::getDtypeNames(type);
226223
std::ostringstream oss;
227224

228-
oss << "iinfo(min=" << PyFloat_AsDouble(THPIInfo_min(self, nullptr));
229-
oss << ", max=" << PyFloat_AsDouble(THPIInfo_max(self, nullptr));
225+
oss << "iinfo(min=" << PyLong_AsDouble(THPIInfo_min(self, nullptr));
226+
oss << ", max=" << PyLong_AsDouble(THPIInfo_max(self, nullptr));
230227
oss << ", dtype=" << PyUnicode_AsUTF8(THPIInfo_dtype(self, nullptr)) << ")";
231228

232229
return THPUtils_packString(oss.str().c_str());

0 commit comments

Comments
 (0)