Friday, 27 September 2019

Sample TensorFlow 2 DNN: Tensor Maths Style

TensorFlow 2 is slightly different from TensorFlow 1 as eager mode is default and everything is now value instead of op+value. Output of any `tf.**` function is now value.

Source code:
#!pip install tensorflow==2.0.0rc2
#%tensorflow_version 2.x
%reset -f

#libs
import tensorflow as tf;

#data
X = [[0,0],[0,1],[1,0],[1,1]];
Y = [[0],  [1],  [1],  [0]  ];
X = tf.convert_to_tensor(X,tf.float32);
Y = tf.convert_to_tensor(Y,tf.float32);

#model
W1 = tf.Variable(tf.random.uniform([2,20],-1,1));
B1 = tf.Variable(tf.random.uniform([  20],-1,1));

W2 = tf.Variable(tf.random.uniform([20,2],-1,1));
B2 = tf.Variable(tf.random.uniform([   2],-1,1));

@tf.function
def feedforward(X):
  H1  = tf.nn.leaky_relu(tf.matmul(X,W1) + B1);
  Out = tf.sigmoid(tf.matmul(H1,W2) + B2);
  return Out;
#end def

#train
Optim = tf.keras.optimizers.SGD(1e-1);
Steps = 1000;

for I in range(Steps):
  if I%(Steps/10)==0:
    Out  = feedforward(X);
    Loss = tf.reduce_sum(tf.square(Y-Out));
    print("Loss:",Loss.numpy());
  #end if

  with tf.GradientTape() as T:
    Out  = feedforward(X);
    Loss = tf.reduce_sum(tf.square(Y-Out));
  #end with

  Grads = T.gradient(Loss,[W1,B1,W2,B2]);
  Optim.apply_gradients(zip(Grads,[W1,B1,W2,B2]));
#end for

Out  = feedforward(X);
Loss = tf.reduce_sum(tf.square(Y-Out));
print("Loss:",Loss.numpy(),"(Last)");

print("\nDone.");
#eof

No comments:

Post a Comment