Wednesday, 11 September 2019

Case Study: 2-node Linear Separation

This is the case when 2 lines of linear separation won't do, but with a layer of at least 2 nodes will do.

Source code:
#see:
#https://i.imgur.com/RWCpYNw.png

#libs
import tensorflow        as tf;
import matplotlib.pyplot as pyplot;
import numpy             as np;

#mockup function
def units(Num):
  return Num;
#end def

#data
X = [[0,0],[0,1],[1,0],[1,1],[2,0]];
Y = [[0],  [1],  [1],  [2],  [0]  ];
Batch_Size = 5;
Max_X      = 2;
Max_Y      = 2;

#normalise
for I in range(len(X)):
  X[I][0] /= Max_X;
  X[I][1] /= Max_X;
  Y[I][0] /= Max_Y;
#end for

#model
Input     = tf.placeholder(dtype=tf.float32, shape=[Batch_Size,2]);
Expected  = tf.placeholder(dtype=tf.float32, shape=[Batch_Size,1]);

#just units(2) would do, THEORETICALLY:
Weight1   = tf.Variable(tf.random_uniform(shape=[2,units(20)], minval=-1, maxval=1));
Bias1     = tf.Variable(tf.random_uniform(shape=[  units(20)], minval=-1, maxval=1));
Hidden1   = tf.nn.relu(tf.matmul(Input,Weight1) + Bias1);

#output of 1 neuron only, units(1):
Weight2   = tf.Variable(tf.random_uniform(shape=[20,units(1)], minval=-1, maxval=1));
Bias2     = tf.Variable(tf.random_uniform(shape=[   units(1)], minval=-1, maxval=1));
Output    = tf.sigmoid(tf.matmul(Hidden1,Weight2) + Bias2);

Loss      = tf.reduce_sum(tf.square(Expected-Output));
Optimiser = tf.train.GradientDescentOptimizer(1e-1);
Training  = Optimiser.minimize(Loss);

#train
Sess = tf.Session();
Init = tf.global_variables_initializer();
Sess.run(Init);

Losses = [];
for I in range(10000):
  if (I%1000==0):
    Lossvalue = Sess.run(Loss, feed_dict={Input:X, Expected:Y});
    print("Loss:",round(Lossvalue,18));
    Losses += [Lossvalue];
  #end if
  
  Sess.run(Training, feed_dict={Input:X, Expected:Y});
#end for

Lastloss = Sess.run(Loss, feed_dict={Input:X, Expected:Y});
Losses  += [Lastloss];
print("Loss:",round(Lastloss,18),"(last)");

#eval
Evalresults = Sess.run(Output, feed_dict={Input:X, Expected:Y});
Evalresults = Evalresults.tolist();
for I in range(len(Evalresults)):
  Evalresults[I] = [round(Evalresults[I][0]*Max_Y),round(Evalresults[I][0]*Max_Y,18)];

print("\nEval:");
print(Evalresults);

#plot
print("\nLoss curve:");
pyplot.plot(Losses,"-bo");
#eof

Colab link:
https://colab.research.google.com/drive/1eZ1uEsq4TA0Qwe4_1moc-AGd08y26rVk

No comments:

Post a Comment