id: 12445    nodeId: 12445    type: General    point: 123.0    linkPoint: .0    maker: cella    permission: linkable    made at: 2018.07.30 07:26    edited at: 2018.09.04 01:51
tensorflow optimizers minimize the loss with gradients
================================ simple case

import tensorflow as tf
import numpy as np

def f(val):
return 1 * val

inSz = 1
outSz = 1

x = tf.placeholder(tf.float32, [inSz], name="x")
y = tf.placeholder(tf.float32, [outSz], name="y")

init = tf.truncated_normal_initializer(mean=0.0, stddev=0.01)

w1 = tf.get_variable("w1", [1], tf.float32, init, trainable=True)

output = tf.multiply(x, w1)

mse = tf.square(tf.subtract(output, y))
opt = tf.train.AdamOptimizer(0.1)
grads_vars = opt.compute_gradients(mse, [w1])
trainOp = opt.apply_gradients(grads_vars)

with tf.Session() as ss:
ss.run(tf.global_variables_initializer())
for i in range(40):
invalue = 1.0 - 0.1 * i
outvalue = f(invalue)

returns = ss.run({"trainOp":trainOp, "grads_vars":grads_vars}, \
feed_dict={x:[invalue], y:[outvalue]})
print("gv=", returns["grads_vars"])


------------------------ output
$ python3 grad1.py
gv= [(array([-1.96553004], dtype=float32), array([ 0.11723495], dtype=float32))]
gv= [(array([-1.43007934], dtype=float32), array([ 0.21520177], dtype=float32))]
gv= [(array([-1.00454175], dtype=float32), array([ 0.30959633], dtype=float32))]
gv= [(array([-0.67659557], dtype=float32), array([ 0.39896959], dtype=float32))]
gv= [(array([-0.43274194], dtype=float32), array([ 0.48216373], dtype=float32))]
gv= [(array([-0.25891814], dtype=float32), array([ 0.55845356], dtype=float32))]
gv= [(array([-0.14129487], dtype=float32), array([ 0.62758404], dtype=float32))]
gv= [(array([-0.06703488], dtype=float32), array([ 0.68971366], dtype=float32))]
gv= [(array([-0.02482291], dtype=float32), array([ 0.74530613], dtype=float32))]
gv= [(array([-0.00509388], dtype=float32), array([ 0.79501069], dtype=float32))]
gv= [(array([ 0.], dtype=float32), array([ 0.83955526], dtype=float32))]
gv= [(array([-0.00320889], dtype=float32), array([ 0.87966293], dtype=float32))]
gv= [(array([-0.00962697], dtype=float32), array([ 0.91598856], dtype=float32))]
gv= [(array([-0.01512206], dtype=float32), array([ 0.94907117], dtype=float32))]
gv= [(array([-0.01629722], dtype=float32), array([ 0.97929788], dtype=float32))]
gv= [(array([-0.01035106], dtype=float32), array([ 1.00687504], dtype=float32))]
gv= [(array([ 0.00495], dtype=float32), array([ 1.03180671], dtype=float32))]
gv= [(array([ 0.03117061], dtype=float32), array([ 1.05388248], dtype=float32))]
gv= [(array([ 0.06896954], dtype=float32), array([ 1.07267869], dtype=float32))]
gv= [(array([ 0.11773942], dtype=float32), array([ 1.0875833], dtype=float32))]
gv= [(array([ 0.17516661], dtype=float32), array([ 1.09785533], dtype=float32))]
gv= [(array([ 0.23680991], dtype=float32), array([ 1.10272753], dtype=float32))]
gv= [(array([ 0.29585525], dtype=float32), array([ 1.10155463], dtype=float32))]
gv= [(array([ 0.34325477], dtype=float32), array([ 1.09398866], dtype=float32))]
gv= [(array([ 0.36843565], dtype=float32), array([ 1.08014917], dtype=float32))]
gv= [(array([ 0.36067128], dtype=float32), array([ 1.0607568], dtype=float32))]
gv= [(array([ 0.31107482], dtype=float32), array([ 1.03721702], dtype=float32))]
gv= [(array([ 0.21511427], dtype=float32), array([ 1.01165771], dtype=float32))]
gv= [(array([ 0.07554216], dtype=float32), array([ 0.9868952], dtype=float32))]
gv= [(array([-0.09461667], dtype=float32), array([ 0.96622533], dtype=float32))]
gv= [(array([-0.27019739], dtype=float32), array([ 0.95288545], dtype=float32))]
gv= [(array([-0.41555056], dtype=float32), array([ 0.94919068], dtype=float32))]
gv= [(array([-0.49183381], dtype=float32), array([ 0.95571381], dtype=float32))]
gv= [(array([-0.46854743], dtype=float32), array([ 0.97095138], dtype=float32))]
gv= [(array([-0.33464015], dtype=float32), array([ 0.99147183], dtype=float32))]
gv= [(array([-0.10660172], dtype=float32), array([ 1.01231492], dtype=float32))]
gv= [(array([ 0.16649704], dtype=float32), array([ 1.02784395], dtype=float32))]
gv= [(array([ 0.40596458], dtype=float32), array([ 1.03349435], dtype=float32))]
gv= [(array([ 0.52519089], dtype=float32), array([ 1.0277853], dtype=float32))]
gv= [(array([ 0.46734878], dtype=float32), array([ 1.01306129], dtype=float32))]

-------------------------------- comment
The first value is of gradient while the second is of weight.
The weight approaches to the value of 1 and stays around it.
Note that the gradient is d(loss)/d(weight).
In case that the gradient is negative, d(loss) and d(weight) have opposite sign, and the weight should be increased to reduce the loss.
In case that the gradient is positive, d(loss) and d(weight) have the same sign, and the weight should be decreased to reduce the loss.


======================== case of reverse accumulation

import tensorflow as tf
import numpy as np


def f(val):
return 1. * val

def g(val):
return 1. * val

fInSz = 1
fOutSz = 1
gInSz = fOutSz
gOutSz = 1
init = tf.truncated_normal_initializer(mean=0.0, stddev=0.01)

fX = tf.placeholder(tf.float32, [fInSz], name="fX")
fW = tf.get_variable("fW", [1], tf.float32, init, trainable=True)
fOut = tf.multiply(fX, fW)

gX = tf.get_variable("gX", [1], tf.float32, init, trainable=True)
gW = tf.get_variable("gW", [1], tf.float32, init, trainable=True)
gOut = tf.multiply(gX, gW)
gY = tf.placeholder(tf.float32, [gOutSz], name="gY")

mse = tf.square(tf.subtract(gOut, gY))
gOpt = tf.train.AdamOptimizer(0.1)
gGradsVars = gOpt.compute_gradients(mse, [gW, gX])
gTrainOp = gOpt.apply_gradients((gGradsVars[0],))

raGrads = tf.placeholder(tf.float32, [gInSz], name="reverseAccumulatedGrads")
fOpt = tf.train.AdamOptimizer(0.1)
#fGradsVars = fOpt.compute_gradients(fOut, [fW], grad_loss=raGrads)
#fTrainOp = fOpt.apply_gradients(fGradsVars)
fTrainOp = fOpt.minimize(fOut, var_list=[fW], grad_loss=raGrads)

with tf.Session() as ss:
ss.run(tf.global_variables_initializer())
for i in range(900):
xValue = 1.0 - 0.1 * i
target = g(f(xValue))
returns = ss.run({"fOut":fOut}, feed_dict={fX:[xValue]})
fOutValue = returns["fOut"][0]

returns = ss.run({"gTrainOp":gTrainOp, \
"gGradsVars":gGradsVars, \
"gW":gW}, \
feed_dict={gX:[fOutValue], gY:[target]})
raGradsValue = returns["gGradsVars"][1][0][0]
print("g=", returns["gGradsVars"])
print("gW=", returns["gW"])

returns = ss.run({"fTrainOp":fTrainOp, \
#"fGradsVars":fGradsVars, \
"fW":fW}, \
feed_dict={fX:[xValue], raGrads:[raGradsValue]})
#print("f=", returns["fGradsVars"])
print("fW=", returns["fW"])


------------------------------------- output
$ python3 grad3.py
...
g= [(array([-61.3755188], dtype=float32), array([ 0.99749088], dtype=float32)), (array([ 0.68612206], dtype=float32), array([-89.00285339], dtype=float32))]
gW= [ 0.99749088]
fW= [ 1.00367808]

$ python3 grad3.py
...
g= [(array([-8.27773666], dtype=float32), array([-0.99956721], dtype=float32)), (array([ 0.09309305], dtype=float32), array([ 88.93274689], dtype=float32))]
gW= [-0.99956721]
fW= [-0.99978048]

------------------------------------- comment
Both of fW and gW approach to the value of either 1 or -1.
Note that the target value is only for g(f()) not for g() or f().
So g(f(x)) = x can be obtained by either {f(x) = x, g(x) = x} or {f(x) = -1, g(x )= -1}

Return to tensorflow optimizers minimize the loss with gradients