diff --git a/caffe2/python/recurrent.py b/caffe2/python/recurrent.py index d4762f08c683e3..8bb0d9cfd6d656 100644 --- a/caffe2/python/recurrent.py +++ b/caffe2/python/recurrent.py @@ -282,7 +282,7 @@ def map_to_dual_list(m): cell_net.Proto().type = 'simple' # The last output is a list of step workspaces, - # which is only needed internally for gradient propogation + # which is only needed internally for gradient propagation return results[:-1] diff --git a/test/distributed/test_c10d_nccl.py b/test/distributed/test_c10d_nccl.py index b68765013e76d5..f41390157cd1d5 100644 --- a/test/distributed/test_c10d_nccl.py +++ b/test/distributed/test_c10d_nccl.py @@ -2183,7 +2183,7 @@ def div(fut): process_group, allreduce_with_then_hook ) - # check whether the grads are equal to what allreduce returns multuplied by 5. + # check whether the grads are equal to what allreduce returns multiplied by 5. # without the comm_hook, result would be still 0.25 * torch.ones(2, 2). self._run_and_verify_hook(gpu_model, 8, 1.25 * torch.ones(2, 2)) diff --git a/test/test_fx.py b/test/test_fx.py index 8fb7c76aa28e83..3e3b12fb671480 100644 --- a/test/test_fx.py +++ b/test/test_fx.py @@ -1550,7 +1550,7 @@ def forward(self, x): self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method', 'call_module', 'output'])) - # Test shape propogation and make sure results match actual + # Test shape propagation and make sure results match actual self.assertEqual(output_shape, ref_out.shape) self.assertEqual(output_stride, ref_out.stride()) diff --git a/torch/csrc/jit/runtime/autodiff.cpp b/torch/csrc/jit/runtime/autodiff.cpp index a8dc3cc7859a7d..7e1f6182534cc2 100644 --- a/torch/csrc/jit/runtime/autodiff.cpp +++ b/torch/csrc/jit/runtime/autodiff.cpp @@ -389,7 +389,7 @@ bool outputRequiresGrad(Value* output) { static ReverseDetails addReverseInline(Gradient& grad_desc) { auto& graph = *grad_desc.f; // note: reverse_node is intentionally not inserted to avoid - // accidentally acting on it (e.g. in elminate dead code), + // accidentally acting on it (e.g. in eliminate dead code), // std::cout << *reverse_node << to view its state. auto reverse_node = graph.create(prim::Reverse, 0); auto reverse_block = reverse_node->addBlock(); diff --git a/torchgen/gen.py b/torchgen/gen.py index 2ff7773562da56..807fa092371abe 100644 --- a/torchgen/gen.py +++ b/torchgen/gen.py @@ -103,7 +103,7 @@ # - 'api' has conversions for how to translate JIT schema into # the various C++ APIs that the codegen interacts with. There # are in fact THREE different C++ APIs: the public C++ API, -# the dispatcher API, and the legacy disaptcher API. See each +# the dispatcher API, and the legacy dispatcher API. See each # of these respective files for more information # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #