I'm using TensorFlow's quantization aware training API and wish to deploy a model with arbitrary bit-width. As only 8 bit quantization is supported for tflite deployment I will deploy with a custom inference algorithm, but I still need to access the weights of the model in the correct size.
Currently after using quantization aware training my model is still in floating point, and as far as I've seen the only way to access the quantized weights is to convert the model to tflite format. However, this is impossible when using experimental functions.
Here is my quantize config class:
class Quantizer(tfmot.quantization.keras.QuantizeConfig):
# Configure how to quantize weights.
def get_weights_and_quantizers(self, layer):
return [(layer.kernel, tfmot.quantization.keras.quantizers.LastValueQuantizer(num_bits=8, symmetric=True, narrow_range=False, per_axis=False))]
# Configure how to quantize activations.
def get_activations_and_quantizers(self, layer):
return [(layer.activation, tfmot.quantization.keras.quantizers.MovingAverageQuantizer(num_bits=8, symmetric=False, narrow_range=False, per_axis=False))]
def set_quantize_weights(self, layer, quantize_weights):
# Add this line for each item returned in `get_weights_and_quantizers`
# , in the same order
layer.kernel = quantize_weights[0]
def set_quantize_activations(self, layer, quantize_activations):
# Add this line for each item returned in `get_activations_and_quantizers`
# , in the same order.
layer.activation = quantize_activations[0]
# Configure how to quantize outputs (may be equivalent to activations).
def get_output_quantizers(self, layer):
return []
def get_config(self):
return {}
class ModifiedQuantizer(Quantizer):
# Configure weights to quantize with 4-bit instead of 8-bits.
def get_weights_and_quantizers(self, layer):
return [(layer.kernel, quantizer(num_bits=bits, symmetric=symmetric, narrow_range=narrow_range, per_axis=per_axis))]
And here is how I quantize the model:
supported_layers = [
tf.keras.layers.Conv2D,
tf.keras.layers.DepthwiseConv2D
]
class Quantizer(tfmot.quantization.keras.QuantizeConfig):
# Configure how to quantize weights.
def get_weights_and_quantizers(self, layer):
return [(layer.kernel, tfmot.quantization.keras.quantizers.LastValueQuantizer(num_bits=8, symmetric=True, narrow_range=False, per_axis=False))]
# Configure how to quantize activations.
def get_activations_and_quantizers(self, layer):
return [(layer.activation, tfmot.quantization.keras.quantizers.MovingAverageQuantizer(num_bits=8, symmetric=False, narrow_range=False, per_axis=False))]
def set_quantize_weights(self, layer, quantize_weights):
# Add this line for each item returned in `get_weights_and_quantizers`
# , in the same order
layer.kernel = quantize_weights[0]
def set_quantize_activations(self, layer, quantize_activations):
# Add this line for each item returned in `get_activations_and_quantizers`
# , in the same order.
layer.activation = quantize_activations[0]
# Configure how to quantize outputs (may be equivalent to activations).
def get_output_quantizers(self, layer):
return []
def get_config(self):
return {}
class ModifiedQuantizer(Quantizer):
# Configure weights to quantize with 4-bit instead of 8-bits.
def get_weights_and_quantizers(self, layer):
return [(layer.kernel, quantizer(num_bits=bits, symmetric=symmetric, narrow_range=narrow_range, per_axis=per_axis))]
# Configure how to quantize activations.
def get_activations_and_quantizers(self, layer):
return [(layer.activation, tfmot.quantization.keras.quantizers.MovingAverageQuantizer(num_bits=bits, symmetric=False, narrow_range=False, per_axis=False))]
def quantize_all_layers(layer):
for supported_layer in supported_layers:
if isinstance(layer, supported_layer):
return quantize_annotate_layer(layer, quantize_config=ModifiedQuantizer())
# print(layer.name)
return layer
annotated_model = clone_model(
model,
clone_function=quantize_all_layers
)
with quantize_scope(
{'Quantizer': Quantizer},
{'ModifiedQuantizer': ModifiedQuantizer},
{'_relu6': models._relu6}):
q_aware_model = quantize_apply(annotated_model)
optimizer = keras.optimizers.Adam(
learning_rate=0.001)
q_aware_model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True),
optimizer=optimizer, metrics=['sparse_categorical_accuracy'])
train_images, train_labels, val_images, val_labels, _, _ = cifar10.load()
q_aware_model.fit(train_images, train_labels, batch_size=64, epochs=1, verbose=1,
validation_data=(val_images, val_labels))
Is previously said, when using e.g. bits=4 in the ModifiedQuantizer, the model is still saved in floating point, and I don't know how to access the quantized weights.
Thanks!