Skip to content

Commit

Permalink
[Fix][Github 2714] Ensures that the original model is kept untouched
Browse files Browse the repository at this point in the history
Github issue: #2714

Signed-off-by: Gwena Cunha <[email protected]>
  • Loading branch information
gcunhase authored and rajeevsrao committed Mar 17, 2023
1 parent a566665 commit c46089f
Showing 1 changed file with 11 additions and 5 deletions.
16 changes: 11 additions & 5 deletions tools/tensorflow-quantization/tensorflow_quantization/quantize.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,15 +297,21 @@ def _execute_quantize_model(
cfg.is_config_object_created()
), "[E] Have you created the quantization config object before calling `quantize_model`?"

# Wrap quantizable layers
model = tf.keras.models.clone_model(
model, input_tensors=None, clone_function=_quantize_model_layer_clone_function
# 3. Ensure that the original model is kept untouched.
# This step is needed as `clone_model` with our custom `clone_function` wraps layers in a destructive manner.
# TODO: delete later if a better solution is found, most likely inside our custom `clone_function`.
cloned_model = tf.keras.models.clone_model(model)
cloned_model.set_weights(model.get_weights())

# 4. Wrap quantizable layers
quant_model = tf.keras.models.clone_model(
cloned_model, input_tensors=None, clone_function=_quantize_model_layer_clone_function
)

# Clean global space afterwards
# 5. Clean global space afterwards
q_config_object.clean()

return model
return quant_model


def _recognize_config_class_id(
Expand Down

0 comments on commit c46089f

Please sign in to comment.