%tensorflow_version 2.x %reset -f #libs import tensorflow as tf; #constants BSIZE = 1; #model class model(tf.Module): def __init__(this): super().__init__(); this.W1 = tf.Variable(tf.random.uniform([2,1], -1,1)); @tf.function(input_signature=[tf.TensorSpec([BSIZE,2])]) def __call__(this,Inp): return tf.matmul(Inp,this.W1); #data X = tf.convert_to_tensor([[1,2]],tf.float32); Y = tf.convert_to_tensor([[3 ]],tf.float32); #train Model = model(); Loss = tf.losses.LogCosh(); Optim = tf.optimizers.SGD(1e-1); Steps = 10; for I in range(Steps): if I%(Steps/10)==0: Out = Model(X); Lossvalue = Loss(Y,Out); print("Loss:",Lossvalue.numpy()); with tf.GradientTape() as T: Out = Model(X); Lossvalue = Loss(Y,Out); Grads = T.gradient(Lossvalue, Model.trainable_variables); Optim.apply_gradients(zip(Grads, Model.trainable_variables)); Out = Model(X); Lossvalue = Loss(Y,Out); print("Loss:",Lossvalue.numpy(),"(Last)"); #test print("\nTest:"); print(X.numpy()[0],"-->",Y.numpy()[0]); print(Model(X).numpy()[0][0]); print("\nDone."); #eof
Wednesday, 16 October 2019
TensorFlow: Single Neuron Linear Regression without Bias
Source code:
Subscribe to:
Post Comments (Atom)
No comments:
Post a Comment