mirror of
https://github.com/saymrwulf/pytorch.git
synced 2026-05-14 20:57:59 +00:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/54042 Pull Request resolved: https://github.com/pytorch/pytorch/pull/53881 1. Fix position_weighted optimizer: Position weighted layer uses default optimizer but is actually gradient_slice, which will cause problem if we do not handle it properly in the new optimizier. The solution is to use sparseadagrad when it is gradient_slices. 2. Optimizer implementation of v1 and v2: using 1st momentum with/without bias_correction. 3. also implemented decoupled weight decay in the new optimizer. Test Plan: buck test //caffe2/caffe2/fb/dper/layer_models/tests/split_1:sparse_nn_test_2 -- test_mlp_optimization buck test //caffe2/caffe2/python:optimizer_test -- TestDecayAdagrad buck test //caffe2/caffe2/python/operator_test:decay_adagrad_test ctr_mbl_feed work flow: f255731660 oc work flow: f255739503 Reviewed By: 0x10cxR1 Differential Revision: D26839668 fbshipit-source-id: 2b6881c1a88540ef5766be40f5e80001257e2199
68 lines
2.6 KiB
Python
68 lines
2.6 KiB
Python
import functools
|
|
|
|
from hypothesis import given
|
|
import hypothesis.strategies as st
|
|
import numpy as np
|
|
|
|
from caffe2.python import core
|
|
import caffe2.python.hypothesis_test_util as hu
|
|
|
|
|
|
class TestDecayAdagrad(hu.HypothesisTestCase):
|
|
|
|
@staticmethod
|
|
def ref_decay_adagrad(param, mom1, mom2, grad, LR, ITER,
|
|
beta1, beta2, epsilon, weight_decay, bias_correction_first, output_grad=False):
|
|
t = ITER + 1
|
|
mom1_out = (beta1 * mom1) + (1 - beta1) * grad
|
|
mom2_out = mom2 + np.square(grad)
|
|
if bias_correction_first:
|
|
c = 1 - np.power(beta1, t)
|
|
else:
|
|
c = 1.0
|
|
grad_out = mom1_out / c / (np.sqrt(mom2_out) + epsilon) + weight_decay * param
|
|
param_out = param + LR * grad_out
|
|
|
|
return param_out, mom1_out, mom2_out
|
|
|
|
@given(inputs=hu.tensors(n=4),
|
|
ITER=st.integers(min_value=0, max_value=10000),
|
|
LR=st.floats(min_value=0.01, max_value=0.99,
|
|
allow_nan=False, allow_infinity=False),
|
|
beta1=st.floats(min_value=0.01, max_value=0.99,
|
|
allow_nan=False, allow_infinity=False),
|
|
beta2=st.floats(min_value=0.01, max_value=0.99,
|
|
allow_nan=False, allow_infinity=False),
|
|
epsilon=st.floats(min_value=0.01, max_value=0.99,
|
|
allow_nan=False, allow_infinity=False),
|
|
weight_decay=st.floats(min_value=0.01, max_value=0.99,
|
|
allow_nan=False, allow_infinity=False),
|
|
**hu.gcs_cpu_only)
|
|
def test_decay_adagrad(self, inputs, ITER, LR, beta1, beta2, epsilon, weight_decay, gc, dc):
|
|
bias_correction_first = True
|
|
|
|
param, mom1, mom2, grad = inputs
|
|
mom2 = np.abs(mom2)
|
|
ITER = np.array([ITER], dtype=np.int64)
|
|
LR = np.array([LR], dtype=np.float32)
|
|
|
|
op = core.CreateOperator(
|
|
"DecayAdagrad",
|
|
["param", "mom1", "mom2", "grad", "lr", "iter"],
|
|
["output_param", "output_mom1", "output_mom2"],
|
|
beta1=beta1, beta2=beta2, epsilon=epsilon, weight_decay=weight_decay, bias_correction_first=bias_correction_first)
|
|
|
|
# Iter lives on the CPU
|
|
input_device_options = {'iter': hu.cpu_do}
|
|
|
|
self.assertReferenceChecks(
|
|
gc, op,
|
|
[param, mom1, mom2, grad, LR, ITER],
|
|
functools.partial(
|
|
self.ref_decay_adagrad,
|
|
beta1=beta1, beta2=beta2, epsilon=epsilon, weight_decay=weight_decay, bias_correction_first=bias_correction_first),
|
|
input_device_options=input_device_options)
|
|
|
|
if __name__ == "__main__":
|
|
import unittest
|
|
unittest.main()
|