Skip to content

Commit

Permalink
Fix comparison of module_type and MulLinear (#1671)
Browse files Browse the repository at this point in the history
Signed-off-by: Kaihui-intel <kaihui.tang@intel.com>
  • Loading branch information
Kaihui-intel authored Mar 14, 2024
1 parent 047560f commit ba3abac
Showing 1 changed file with 1 addition and 2 deletions.
3 changes: 1 addition & 2 deletions neural_compressor/utils/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,6 @@ def load_weight_only(checkpoint_dir, model, layer_wise=False):
Returns:
(object): quantized model
"""
import neural_compressor # for eval(config['module_type'])
from neural_compressor.adaptor.torch_utils.model_wrapper import MulLinear

weights_file = os.path.join(os.path.abspath(os.path.expanduser(checkpoint_dir)), "best_model.pt")
Expand All @@ -221,7 +220,7 @@ def load_weight_only(checkpoint_dir, model, layer_wise=False):
for op_name, config in weight_only_config.items():
if config["dtype"] == "fp32":
continue
if eval(config["module_type"]) == MulLinear:
if config["module_type"] == MulLinear.__module__ + "." + MulLinear.__name__:
# op should be repleced by MulLinear
module = util.fetch_module(model, op_name)
new_module = MulLinear(module)
Expand Down

0 comments on commit ba3abac

Please sign in to comment.