@@ -89,7 +89,7 @@ def test_dropout_partial_noise_shape(self):
8989 # Test that dropout mask is shared across second dim.
9090 self .assertAllClose (out_np [:, 0 , :], out_np [:, 1 , :])
9191
92- def test_dropout_with_savemodel (self ):
92+ def test_dropout_with_saving (self ):
9393 inputs = keras .Input (shape = (5 , 10 ))
9494 layer = keras .layers .Dropout (0.5 , force_generator = True )
9595 outputs = layer (inputs )
@@ -105,32 +105,52 @@ def test_dropout_with_savemodel(self):
105105 # Make sure the layer does dropout value when training
106106 self .assertNotAllClose (train , predict )
107107
108- model .save (
109- os .path .join (self .get_temp_dir (), "savedmodel" ), save_format = "tf"
110- )
111- loaded_model = keras .models .load_model (
112- os .path .join (self .get_temp_dir (), "savedmodel" )
113- )
114- predict2 = loaded_model (np .ones ((20 , 5 , 10 )))
115-
116- self .assertAllClose (predict , predict2 )
117- # Make sure the model dropout different value after loading
118- train2 = loaded_model (np .ones ((20 , 5 , 10 )), training = True )
119- self .assertNotAllClose (train , train2 )
120- self .assertIsNotNone (loaded_model .layers [1 ]._random_generator )
121-
122- # Also make sure the checkpoint doesn't contain any variable from the
123- # dropout layer, to keep the backward compatibility.
124- checkpoint = tf .train .Checkpoint (model )
125- save_path = checkpoint .save (
126- os .path .join (self .get_temp_dir (), "checkpoint" )
127- )
128- checkpoint_var_names = [
129- name_value_tuple [0 ]
130- for name_value_tuple in tf .train .list_variables (save_path )
131- ]
132- for name in checkpoint_var_names :
133- self .assertNotIn ("dropout" , name )
108+ with self .subTest ("savedmodel" ):
109+ model .save (
110+ os .path .join (self .get_temp_dir (), "savedmodel" ),
111+ save_format = "tf" ,
112+ )
113+ loaded_model = keras .models .load_model (
114+ os .path .join (self .get_temp_dir (), "savedmodel" )
115+ )
116+ predict2 = loaded_model (np .ones ((20 , 5 , 10 )))
117+
118+ self .assertAllClose (predict , predict2 )
119+ # Make sure the model dropout different value after loading
120+ train2 = loaded_model (np .ones ((20 , 5 , 10 )), training = True )
121+ self .assertNotAllClose (train , train2 )
122+ self .assertIsNotNone (loaded_model .layers [1 ]._random_generator )
123+
124+ with self .subTest ("keras_v3" ):
125+ if not tf .__internal__ .tf2 .enabled ():
126+ self .skipTest (
127+ "TF2 must be enabled to use the new `.keras` saving."
128+ )
129+ model .save (os .path .join (self .get_temp_dir (), "model.keras" ))
130+ loaded_model = keras .models .load_model (
131+ os .path .join (self .get_temp_dir (), "model.keras" )
132+ )
133+ predict2 = loaded_model (np .ones ((20 , 5 , 10 )))
134+
135+ self .assertAllClose (predict , predict2 )
136+ # Make sure the model dropout different value after loading
137+ train2 = loaded_model (np .ones ((20 , 5 , 10 )), training = True )
138+ self .assertNotAllClose (train , train2 )
139+ self .assertIsNotNone (loaded_model .layers [1 ]._random_generator )
140+
141+ with self .subTest ("checkpoint" ):
142+ # Also make sure the checkpoint doesn't contain any variable from
143+ # the dropout layer, to keep the backward compatibility.
144+ checkpoint = tf .train .Checkpoint (model )
145+ save_path = checkpoint .save (
146+ os .path .join (self .get_temp_dir (), "checkpoint" )
147+ )
148+ checkpoint_var_names = [
149+ name_value_tuple [0 ]
150+ for name_value_tuple in tf .train .list_variables (save_path )
151+ ]
152+ for name in checkpoint_var_names :
153+ self .assertNotIn ("dropout" , name )
134154
135155
136156@test_combinations .run_all_keras_modes
0 commit comments