mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-15 21:00:47 +00:00
Summary: This allows us to do in-place relu and also corrects the previous error of inconsistency between the cudnn impl and the non-cudnn impl. This implementation butchers the cudnn interface, in the sense that we pass in the output instead of the input for the gradient pass. We do have a gradient checker to guard this situation, so we should be safe. Reviewed By: asaadaldien Differential Revision: D4889426 fbshipit-source-id: 081f8fe06de78413b5786086bfd5ae6c8128cd6e
31 lines
899 B
Python
31 lines
899 B
Python
from __future__ import absolute_import
|
|
from __future__ import division
|
|
from __future__ import print_function
|
|
from __future__ import unicode_literals
|
|
|
|
from caffe2.python import core
|
|
from hypothesis import given
|
|
import hypothesis.strategies as st
|
|
import caffe2.python.hypothesis_test_util as hu
|
|
import caffe2.python.mkl_test_util as mu
|
|
import numpy as np
|
|
|
|
import unittest
|
|
|
|
|
|
class TestRelu(hu.HypothesisTestCase):
|
|
|
|
@given(X=hu.tensor(),
|
|
engine=st.sampled_from(["", "CUDNN"]),
|
|
**mu.gcs)
|
|
def test_relu(self, X, gc, dc, engine):
|
|
op = core.CreateOperator("Relu", ["X"], ["Y"], engine=engine)
|
|
# go away from the origin point to avoid kink problems
|
|
X += 0.02 * np.sign(X)
|
|
X[X == 0.0] += 0.02
|
|
self.assertDeviceChecks(dc, op, [X], [0])
|
|
self.assertGradientChecks(gc, op, [X], 0, [0])
|
|
|
|
|
|
if __name__ == "__main__":
|
|
unittest.main()
|