diff --git a/README.md b/README.md index 10ea5c3..73b38ee 100644 --- a/README.md +++ b/README.md @@ -105,7 +105,7 @@ beartype = "^0.14.1" ### Tree Models with `feature_importances_` Attribute ```python -# Import the function +# import the package import target_permutation_importances as tpi # Prepare a dataset @@ -163,7 +163,7 @@ Running 2 actual runs and 10 random runs ### Linear Models with `coef_` Attribute ```python -# Import the function +# import the package import target_permutation_importances as tpi # Prepare a dataset @@ -214,7 +214,7 @@ Outputs: ### With `sklearn.multioutput` ```python -# Import the function +# import the package import target_permutation_importances as tpi # Prepare a dataset @@ -328,11 +328,12 @@ Read the followings for details: `TargetPermutationImportancesWrapper` follows scikit-learn interfaces and support scikit-learn feature selection method such as `SelectFromModel`: ```python -# Import the function +# Import the package import target_permutation_importances as tpi # Prepare a dataset import pandas as pd +import numpy as np from sklearn.datasets import load_breast_cancer # Models @@ -372,9 +373,26 @@ selector = SelectFromModel( ).fit(Xpd, data.target) selected_x = selector.transform(Xpd) print(selected_x.shape) +print(selector.get_feature_names_out()) ``` Fork above code from [Kaggle](https://www.kaggle.com/code/kingychiu/target-permutation-importances-basic-usage/notebook). +Outputs: +``` +Running 2 actual runs and 10 random runs +100%|██████████| 2/2 [00:01<00:00, 1.80it/s] +100%|██████████| 10/10 [00:06<00:00, 1.55it/s] + feature importance +22 worst perimeter 0.151953 +27 worst concave points 0.124407 +20 worst radius 0.119090 +7 mean concave points 0.098747 +23 worst area 0.096943 +(569, 5) +['mean concave points' 'worst radius' 'worst perimeter' 'worst area' + 'worst concave points'] + ``` + --- ## Feature Selection Examples diff --git a/docs/index.md b/docs/index.md index 10ea5c3..73b38ee 100644 --- a/docs/index.md +++ b/docs/index.md @@ -105,7 +105,7 @@ beartype = "^0.14.1" ### Tree Models with `feature_importances_` Attribute ```python -# Import the function +# import the package import target_permutation_importances as tpi # Prepare a dataset @@ -163,7 +163,7 @@ Running 2 actual runs and 10 random runs ### Linear Models with `coef_` Attribute ```python -# Import the function +# import the package import target_permutation_importances as tpi # Prepare a dataset @@ -214,7 +214,7 @@ Outputs: ### With `sklearn.multioutput` ```python -# Import the function +# import the package import target_permutation_importances as tpi # Prepare a dataset @@ -328,11 +328,12 @@ Read the followings for details: `TargetPermutationImportancesWrapper` follows scikit-learn interfaces and support scikit-learn feature selection method such as `SelectFromModel`: ```python -# Import the function +# Import the package import target_permutation_importances as tpi # Prepare a dataset import pandas as pd +import numpy as np from sklearn.datasets import load_breast_cancer # Models @@ -372,9 +373,26 @@ selector = SelectFromModel( ).fit(Xpd, data.target) selected_x = selector.transform(Xpd) print(selected_x.shape) +print(selector.get_feature_names_out()) ``` Fork above code from [Kaggle](https://www.kaggle.com/code/kingychiu/target-permutation-importances-basic-usage/notebook). +Outputs: +``` +Running 2 actual runs and 10 random runs +100%|██████████| 2/2 [00:01<00:00, 1.80it/s] +100%|██████████| 10/10 [00:06<00:00, 1.55it/s] + feature importance +22 worst perimeter 0.151953 +27 worst concave points 0.124407 +20 worst radius 0.119090 +7 mean concave points 0.098747 +23 worst area 0.096943 +(569, 5) +['mean concave points' 'worst radius' 'worst perimeter' 'worst area' + 'worst concave points'] + ``` + --- ## Feature Selection Examples diff --git a/pyproject.toml b/pyproject.toml index 67be596..31177dd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "target-permutation-importances" -version = "1.0.19a1" +version = "1.0.19" description = "Compute (Target) Permutation Importances of a machine learning model" authors = [{name = "Anthony Chiu", email = "kingychiu@gmail.com"}] maintainers = [{name = "Anthony Chiu", email = "kingychiu@gmail.com"}] @@ -25,7 +25,7 @@ classifiers = [ [tool.poetry] name = "target-permutation-importances" -version = "1.0.19a1" +version = "1.0.19" description = "Compute (Target) Permutation Importances of a machine learning model" authors = ["Anthony Chiu "] maintainers = ["Anthony Chiu "] diff --git a/target_permutation_importances/functional.py b/target_permutation_importances/functional.py index 233f467..7289edb 100644 --- a/target_permutation_importances/functional.py +++ b/target_permutation_importances/functional.py @@ -357,7 +357,7 @@ def compute( Example: ```python - # Import the function + # import the package import target_permutation_importances as tpi # Prepare a dataset diff --git a/target_permutation_importances/sklearn_wrapper.py b/target_permutation_importances/sklearn_wrapper.py index 13c0a5b..3e4d3e4 100644 --- a/target_permutation_importances/sklearn_wrapper.py +++ b/target_permutation_importances/sklearn_wrapper.py @@ -41,11 +41,12 @@ def __init__( Example: ```python - # Import the function + # Import the package import target_permutation_importances as tpi # Prepare a dataset import pandas as pd + import numpy as np from sklearn.datasets import load_breast_cancer # Models @@ -85,6 +86,7 @@ def __init__( ).fit(Xpd, data.target) selected_x = selector.transform(Xpd) print(selected_x.shape) + print(selector.get_feature_names_out()) ``` """ self.model_cls = model_cls