Save/load a model subclassing from
tf.keras.Model:
- Save: tf.saved_model.save(Model,Model_Dir);
- Load: tf.keras.models.load_model(Model_Dir);
In order to save a low-level (using
tf.Variable) model, the
tf.Variable(s) must the under a
tf.Module instead of being global vars.
- Save: tf.saved_model.save(Model,Model_Dir);
- Load: tf.saved_model.load(Model_Dir);
Source code:
%tensorflow_version 2.x
%reset -f
#libs
import tensorflow as tf;
#constants
BSIZE = 4;
#model
class model(tf.Module):
def __init__(this):
super().__init__();
this.W1 = tf.Variable(tf.random.uniform([2,20], -1,1));
this.B1 = tf.Variable(tf.random.uniform([ 20], -1,1));
this.W2 = tf.Variable(tf.random.uniform([20,1], -1,1));
this.B2 = tf.Variable(tf.random.uniform([ 1], -1,1));
@tf.function(input_signature=[tf.TensorSpec([BSIZE,2],tf.float32)])
def __call__(this,X):
H1 = tf.nn.leaky_relu(tf.matmul(X,this.W1) + this.B1);
Out = tf.sigmoid(tf.matmul(H1,this.W2) + this.B2);
return Out;
def get_loss(Out):
return tf.reduce_sum(tf.square(Y-Out));
#PROGRAMME ENTRY POINT==========================================================
#data
X = tf.convert_to_tensor([[0,0],[0,1],[1,0],[1,1]],tf.float32);
Y = tf.convert_to_tensor([[0], [1], [1], [0] ],tf.float32);
#train
M = model();
Optim = tf.keras.optimizers.SGD(1e-1);
for I in range(1000):
if I%100==0:
Out = M(X);
Loss = get_loss(Out);
print("Loss:",Loss.numpy());
with tf.GradientTape() as T:
Out = M(X);
Loss = get_loss(Out);
Grads = T.gradient(Loss, [M.W1,M.B1,M.W2,M.B2]);
Optim.apply_gradients(zip(Grads, [M.W1,M.B1,M.W2,M.B2]));
#end for
Out = M(X);
Loss = get_loss(Out);
print("Loss:",Loss.numpy(),"(Last)");
print("\nSaving...");
tf.saved_model.save(M,"/tmp/test-model");
print("\nEval from previous save:");
M2 = tf.saved_model.load("/tmp/test-model")
print(tf.round(M2(X)).numpy());
print("\nDone.");
#eof
No comments:
Post a Comment