mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-14 20:57:59 +00:00
Fix multiple spelling and grammar mistakes (#48592)
Summary: I found a number of spelling & grammatical mistakes in the repository. Previously I had these fixes submitted individually, but I saw that a single word change was apparently too small for a PR to be merged. Hopefully this new PR has a sufficient number of changes. Pull Request resolved: https://github.com/pytorch/pytorch/pull/48592 Reviewed By: ejguan Differential Revision: D25224216 Pulled By: mrshenli fbshipit-source-id: 2af3db2aee486563efd0dffc4e8f777306a73e44
This commit is contained in:
parent
2200e72293
commit
d6ddd78eb0
5 changed files with 11 additions and 11 deletions
|
|
@ -255,7 +255,7 @@ CUDA, MSVC, and PyTorch versions are interdependent; please install matching ver
|
|||
| 10.2 | Visual Studio 2019 (16.X) (`_MSC_VER` < 1930) | 1.5.0 ~ 1.7.0 |
|
||||
| 11.0 | Visual Studio 2019 (16.X) (`_MSC_VER` < 1930) | 1.7.0 |
|
||||
|
||||
Note: There's a [compilation issue](https://github.com/oneapi-src/oneDNN/issues/812) in serveral Visual Studio 2019 versions since 16.7.1, so please make sure your Visual Studio 2019 version is not in 16.7.1 ~ 16.7.5
|
||||
Note: There's a [compilation issue](https://github.com/oneapi-src/oneDNN/issues/812) in several Visual Studio 2019 versions since 16.7.1, so please make sure your Visual Studio 2019 version is not in 16.7.1 ~ 16.7.5
|
||||
|
||||
Additional libraries such as
|
||||
[Magma](https://developer.nvidia.com/magma), [oneDNN, a.k.a MKLDNN or DNNL](https://github.com/oneapi-src/oneDNN), and [Sccache](https://github.com/mozilla/sccache) are often needed. Please refer to the [installation-helper](https://github.com/pytorch/pytorch/tree/master/.jenkins/pytorch/win-test-helpers/installation-helpers) to install them.
|
||||
|
|
@ -403,4 +403,4 @@ Note: This project is unrelated to [hughperkins/pytorch](https://github.com/hugh
|
|||
|
||||
## License
|
||||
|
||||
PyTorch is a BSD-style licensed, as found in the [LICENSE](LICENSE) file.
|
||||
PyTorch has a BSD-style license, as found in the [LICENSE](LICENSE) file.
|
||||
|
|
|
|||
|
|
@ -356,7 +356,7 @@ Tensor binaryElementwiseKernel(
|
|||
const Tensor& input1,
|
||||
const Tensor& input2,
|
||||
NSString* arrayKernel,
|
||||
NSString* nonarrayKernal) {
|
||||
NSString* nonarrayKernel) {
|
||||
MPSImage* X1 = imageFromTensor(input1);
|
||||
MPSImage* X2 = imageFromTensor(input2);
|
||||
std::vector<int64_t> outputSize = input1.sizes().vec();
|
||||
|
|
@ -367,7 +367,7 @@ Tensor binaryElementwiseKernel(
|
|||
mt.texture()->allocateTemporaryTextureStorage(outputSize, cb1);
|
||||
MPSImage* Y = imageFromMetalTensor(mt);
|
||||
id<MTLComputePipelineState> state = [[MPSCNNContext sharedInstance]
|
||||
pipelineState:kernelFor(X1, arrayKernel, nonarrayKernal)];
|
||||
pipelineState:kernelFor(X1, arrayKernel, nonarrayKernel)];
|
||||
id<MTLComputeCommandEncoder> encoder = [cb1.buffer computeCommandEncoder];
|
||||
[encoder setComputePipelineState:state];
|
||||
[encoder setTexture:[X1 texture] atIndex:0];
|
||||
|
|
@ -388,7 +388,7 @@ Tensor& binaryElementwiseKernel_(
|
|||
Tensor& input1,
|
||||
const Tensor& input2,
|
||||
NSString* arrayKernel,
|
||||
NSString* nonarrayKernal) {
|
||||
NSString* nonarrayKernel) {
|
||||
MPSImage* X1 = imageFromTensor(input1);
|
||||
MPSImage* X2 = imageFromTensor(input2);
|
||||
std::vector<int64_t> outputSize = input1.sizes().vec();
|
||||
|
|
@ -397,7 +397,7 @@ Tensor& binaryElementwiseKernel_(
|
|||
TORCH_CHECK([cb1 isEqual:cb2], @"inputs have different command buffer");
|
||||
MPSImage* Y = [MPSImage temporaryImageFromSize:outputSize commandBuffer:cb1];
|
||||
id<MTLComputePipelineState> state = [[MPSCNNContext sharedInstance]
|
||||
pipelineState:kernelFor(X1, arrayKernel, nonarrayKernal)];
|
||||
pipelineState:kernelFor(X1, arrayKernel, nonarrayKernel)];
|
||||
id<MTLComputeCommandEncoder> encoder = [cb1.buffer computeCommandEncoder];
|
||||
[encoder setComputePipelineState:state];
|
||||
[encoder setTexture:[X1 texture] atIndex:0];
|
||||
|
|
|
|||
|
|
@ -112,8 +112,8 @@ void inBatchBroadcast(
|
|||
setShape(blob, new_blob);
|
||||
const auto rit = reversed.find(blob);
|
||||
if (rit != reversed.end()) {
|
||||
const auto& orignal_input = rit->second;
|
||||
setShape(orignal_input, "");
|
||||
const auto& original_input = rit->second;
|
||||
setShape(original_input, "");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1944,8 +1944,8 @@ TEST(Cuda, MaskCompoundInnerLoop_CUDA) {
|
|||
|
||||
// Tests the case with two loops fused into a common parent, which is not bound
|
||||
// to any block or thread dimension - however it's two inner loops are bound to
|
||||
// the first thread dimenions. This should work just like the MaskThreadDim test
|
||||
// where the bigger loop is unmasked but the smaller is masked.
|
||||
// the first thread dimensions. This should work just like the MaskThreadDim
|
||||
// test where the bigger loop is unmasked but the smaller is masked.
|
||||
TEST(Cuda, MaskInnerLoopOneBlock_CUDA) {
|
||||
KernelScope kernel_scope;
|
||||
int OUTER_SIZE = 10;
|
||||
|
|
|
|||
|
|
@ -147,7 +147,7 @@ using ContextPtr = std::shared_ptr<DistAutogradContext>;
|
|||
// doesn't know the current context. It's just a util class.
|
||||
class TORCH_API ThreadLocalDistAutogradContext {
|
||||
public:
|
||||
// Store 'new_context' to the thread local varaible maintained by this class.
|
||||
// Store 'new_context' to the thread local variable maintained by this class.
|
||||
explicit ThreadLocalDistAutogradContext(ContextPtr&& new_context);
|
||||
~ThreadLocalDistAutogradContext();
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue