Source code:
#libs import tensorflow as tf; import matplotlib.pyplot as pyplot; #data Text = "the quick brown fox jumps over the lazy dog"; Data = [0, 1, 2, 3, 4, 5, 0, 6, 7]; #make training data X = []; Y = []; Max = 7; #x & y Bsize = 6; #batch size, take the whole epoch. for I in range(len(Data)-3): X += [Data[I:I+3]]; Y += [[Data[I+3]]]; #end for print("X =",X); print("Y =",Y); #normalise for I in range(len(X)): Y[I][0] /= Max; for J in range(len(X[0])): X[I][J] /= Max; #end for #model Input = tf.placeholder(dtype=tf.float32, shape=[Bsize,3]); Expected = tf.placeholder(dtype=tf.float32, shape=[Bsize,1]); Weight1 = tf.Variable(tf.random_uniform(shape=[3,20], minval=-1, maxval=1)); Bias1 = tf.Variable(tf.random_uniform(shape=[ 20], minval=-1, maxval=1)); Hidden1 = tf.nn.leaky_relu(tf.matmul(Input,Weight1) + Bias1); Weight2 = tf.Variable(tf.random_uniform(shape=[20,1], minval=-1, maxval=1)); Bias2 = tf.Variable(tf.random_uniform(shape=[ 1], minval=-1, maxval=1)); Output = tf.sigmoid(tf.matmul(Hidden1,Weight2) + Bias2); Loss = tf.reduce_sum(tf.square(Expected-Output)); Optimiser = tf.train.GradientDescentOptimizer(1e-1); Training = Optimiser.minimize(Loss); #training Sess = tf.Session(); Init = tf.global_variables_initializer(); Sess.run(Init); Epochs = 10000; Losses = []; Feed = {Input:X, Expected:Y}; for I in range(Epochs): if (I%(Epochs/10)==0): Lossvalue = Sess.run(Loss, feed_dict=Feed); Losses += [Lossvalue]; print("Loss:",Lossvalue); #end if Sess.run(Training, feed_dict=Feed); #end for Lastloss = Sess.run(Loss, feed_dict=Feed); Losses += [Lastloss]; print("Loss:",Lastloss,"(last)"); #eval print("\nEval:"); Evalresults = Sess.run(Output, feed_dict=Feed).tolist(); for I in range(len(Evalresults)): Evalresults[I][0] = round(Evalresults[I][0]*Max); print(Evalresults); Sess.close(); print("\nDone."); #eof
Colab link:
https://colab.research.google.com/drive/1caY8GUts-BOUjl-uQ1WXRrO94wx_n8uT
No comments:
Post a Comment