c10::string_view -> std::string_view in more places (#142517)

Pull Request resolved: https://github.com/pytorch/pytorch/pull/142517
Approved by: https://github.com/malfet
This commit is contained in:
Richard Barnes 2024-12-12 19:45:57 +00:00 committed by PyTorch MergeBot
parent 0b75b7ff2b
commit 82ce888273
8 changed files with 28 additions and 26 deletions

View file

@ -123,7 +123,7 @@ inline constexpr crc64_t crc64(const char* str, size_t size) {
return crc64_t{detail::crc64impl(0, str, size)};
}
inline constexpr crc64_t crc64(c10::string_view str) {
inline constexpr crc64_t crc64(std::string_view str) {
return crc64(str.data(), str.size());
}
} // namespace c10::util

View file

@ -92,7 +92,7 @@ size_t ReplaceAll(std::string& s, std::string_view from, std::string_view to) {
std::string::size_type last_pos = 0u;
std::string::size_type cur_pos = 0u;
std::string::size_type write_pos = 0u;
const c10::string_view input(s);
const std::string_view input(s);
if (from.size() >= to.size()) {
// If the replacement string is not larger than the original, we

View file

@ -474,7 +474,7 @@ TEST_F(LazyOpsTest, TestDiv) {
}
TEST_F(LazyOpsTest, TestDivWithRoundingMode) {
std::optional<c10::string_view> rounding_modes[] = {
std::optional<std::string_view> rounding_modes[] = {
"trunc", "floor", std::nullopt};
for (const auto& rounding_mode : rounding_modes) {
for (torch::ScalarType scalar_type1 :
@ -534,7 +534,7 @@ TEST_F(LazyOpsTest, TestDivInPlace) {
}
TEST_F(LazyOpsTest, TestDivInPlaceWithRoundingMode) {
std::optional<c10::string_view> rounding_modes[] = {
std::optional<std::string_view> rounding_modes[] = {
"trunc", "floor", std::nullopt};
for (const auto& rounding_mode : rounding_modes) {
for (torch::ScalarType scalar_type1 : {torch::kFloat}) {

View file

@ -683,6 +683,8 @@ def generate_tensor_like_override_tests(cls):
return torch.float32
elif arg_type == "c10::string_view":
return ""
elif arg_type == "std::string_view":
return ""
elif arg_type == "SymInt":
# TODO: generate actual SymbolicInt
return 1

View file

@ -33,8 +33,8 @@ inline constexpr bool should_include_kernel_dtype(
const char *kernel_tag_str,
at::ScalarType scalar_type
) {
[[maybe_unused]] c10::string_view kernel_tag_sv =
c10::string_view(kernel_tag_str);
[[maybe_unused]] auto kernel_tag_sv =
std::string_view(kernel_tag_str);
$body return false;
}
}

View file

@ -19,7 +19,7 @@ inline Tensor fft(
const Tensor& self,
std::optional<SymInt> n = std::nullopt,
int64_t dim = -1,
std::optional<c10::string_view> norm = std::nullopt) {
std::optional<std::string_view> norm = std::nullopt) {
return torch::fft_fft_symint(self, std::move(n), dim, norm);
}
@ -35,7 +35,7 @@ inline Tensor ifft(
const Tensor& self,
std::optional<SymInt> n = std::nullopt,
int64_t dim = -1,
std::optional<c10::string_view> norm = std::nullopt) {
std::optional<std::string_view> norm = std::nullopt) {
return torch::fft_ifft_symint(self, std::move(n), dim, norm);
}
@ -51,7 +51,7 @@ inline Tensor fft2(
const Tensor& self,
OptionalIntArrayRef s = std::nullopt,
IntArrayRef dim = {-2, -1},
std::optional<c10::string_view> norm = std::nullopt) {
std::optional<std::string_view> norm = std::nullopt) {
return torch::fft_fft2(self, s, dim, norm);
}
@ -67,7 +67,7 @@ inline Tensor ifft2(
const Tensor& self,
at::OptionalIntArrayRef s = std::nullopt,
IntArrayRef dim = {-2, -1},
std::optional<c10::string_view> norm = std::nullopt) {
std::optional<std::string_view> norm = std::nullopt) {
return torch::fft_ifft2(self, s, dim, norm);
}
@ -83,7 +83,7 @@ inline Tensor fftn(
const Tensor& self,
at::OptionalIntArrayRef s = std::nullopt,
at::OptionalIntArrayRef dim = std::nullopt,
std::optional<c10::string_view> norm = std::nullopt) {
std::optional<std::string_view> norm = std::nullopt) {
return torch::fft_fftn(self, s, dim, norm);
}
@ -99,7 +99,7 @@ inline Tensor ifftn(
const Tensor& self,
at::OptionalIntArrayRef s = std::nullopt,
at::OptionalIntArrayRef dim = std::nullopt,
std::optional<c10::string_view> norm = std::nullopt) {
std::optional<std::string_view> norm = std::nullopt) {
return torch::fft_ifftn(self, s, dim, norm);
}
@ -116,7 +116,7 @@ inline Tensor rfft(
const Tensor& self,
std::optional<SymInt> n = std::nullopt,
int64_t dim = -1,
std::optional<c10::string_view> norm = std::nullopt) {
std::optional<std::string_view> norm = std::nullopt) {
return torch::fft_rfft_symint(self, std::move(n), dim, norm);
}
@ -135,7 +135,7 @@ inline Tensor irfft(
const Tensor& self,
std::optional<SymInt> n = std::nullopt,
int64_t dim = -1,
std::optional<c10::string_view> norm = std::nullopt) {
std::optional<std::string_view> norm = std::nullopt) {
return torch::fft_irfft_symint(self, std::move(n), dim, norm);
}
@ -151,7 +151,7 @@ inline Tensor rfft2(
const Tensor& self,
at::OptionalIntArrayRef s = std::nullopt,
IntArrayRef dim = {-2, -1},
std::optional<c10::string_view> norm = std::nullopt) {
std::optional<std::string_view> norm = std::nullopt) {
return torch::fft_rfft2(self, s, dim, norm);
}
@ -167,7 +167,7 @@ inline Tensor irfft2(
const Tensor& self,
at::OptionalIntArrayRef s = std::nullopt,
IntArrayRef dim = {-2, -1},
std::optional<c10::string_view> norm = std::nullopt) {
std::optional<std::string_view> norm = std::nullopt) {
return torch::fft_irfft2(self, s, dim, norm);
}
@ -183,7 +183,7 @@ inline Tensor rfftn(
const Tensor& self,
at::OptionalIntArrayRef s = std::nullopt,
at::OptionalIntArrayRef dim = std::nullopt,
std::optional<c10::string_view> norm = std::nullopt) {
std::optional<std::string_view> norm = std::nullopt) {
return torch::fft_rfftn(self, s, dim, norm);
}
@ -199,7 +199,7 @@ inline Tensor irfftn(
const Tensor& self,
at::OptionalIntArrayRef s = std::nullopt,
at::OptionalIntArrayRef dim = std::nullopt,
std::optional<c10::string_view> norm = std::nullopt) {
std::optional<std::string_view> norm = std::nullopt) {
return torch::fft_irfftn(self, s, dim, norm);
}
@ -219,7 +219,7 @@ inline Tensor hfft(
const Tensor& self,
std::optional<SymInt> n = std::nullopt,
int64_t dim = -1,
std::optional<c10::string_view> norm = std::nullopt) {
std::optional<std::string_view> norm = std::nullopt) {
return torch::fft_hfft_symint(self, std::move(n), dim, norm);
}
@ -238,7 +238,7 @@ inline Tensor ihfft(
const Tensor& self,
std::optional<SymInt> n = std::nullopt,
int64_t dim = -1,
std::optional<c10::string_view> norm = std::nullopt) {
std::optional<std::string_view> norm = std::nullopt) {
return torch::fft_ihfft_symint(self, std::move(n), dim, norm);
}
@ -257,7 +257,7 @@ inline Tensor hfft2(
const Tensor& self,
at::OptionalIntArrayRef s = std::nullopt,
IntArrayRef dim = {-2, -1},
std::optional<c10::string_view> norm = std::nullopt) {
std::optional<std::string_view> norm = std::nullopt) {
return torch::fft_hfft2(self, s, dim, norm);
}
@ -277,7 +277,7 @@ inline Tensor ihfft2(
const Tensor& self,
at::OptionalIntArrayRef s = std::nullopt,
IntArrayRef dim = {-2, -1},
std::optional<c10::string_view> norm = std::nullopt) {
std::optional<std::string_view> norm = std::nullopt) {
return torch::fft_ihfft2(self, s, dim, norm);
}
@ -296,7 +296,7 @@ inline Tensor hfftn(
const Tensor& self,
at::OptionalIntArrayRef s = std::nullopt,
IntArrayRef dim = {-2, -1},
std::optional<c10::string_view> norm = std::nullopt) {
std::optional<std::string_view> norm = std::nullopt) {
return torch::fft_hfftn(self, s, dim, norm);
}
@ -316,7 +316,7 @@ inline Tensor ihfftn(
const Tensor& self,
at::OptionalIntArrayRef s = std::nullopt,
IntArrayRef dim = {-2, -1},
std::optional<c10::string_view> norm = std::nullopt) {
std::optional<std::string_view> norm = std::nullopt) {
return torch::fft_ihfftn(self, s, dim, norm);
}

View file

@ -307,7 +307,7 @@ inline Tensor pad_sequence(
ArrayRef<Tensor> sequences,
bool batch_first = false,
double padding_value = 0,
c10::string_view padding_side = "right") {
std::string_view padding_side = "right") {
return at::pad_sequence(sequences, batch_first, padding_value, padding_side);
}

View file

@ -76,7 +76,7 @@ std::shared_ptr<Source> SourceRangeDeserializer::deserialize_source(
"Text table index is out of range")
filename = *text_table_[fnameIndex];
std::vector<c10::string_view> pieces;
std::vector<std::string_view> pieces;
std::vector<std::shared_ptr<std::string>> strs;
for (int64_t i : textIndex) {