Module imodels.rule_set.fplasso
Expand source code
from typing import List
from sklearn.base import ClassifierMixin, RegressorMixin
from imodels.rule_set.rule_fit import RuleFit
from imodels.util.convert import itemsets_to_rules
from imodels.util.extract import extract_fpgrowth
class FPLasso(RuleFit):
def __init__(self,
minsupport=0.1,
maxcardinality=2,
disc_strategy='mdlp',
disc_kwargs={},
verbose=False,
n_estimators=100,
tree_size=4,
sample_fract='default',
max_rules=2000,
memory_par=0.01,
tree_generator=None,
lin_trim_quantile=0.025,
lin_standardise=True,
exp_rand_tree_size=True,
include_linear=True,
alpha=None,
random_state=None):
super().__init__(n_estimators,
tree_size,
sample_fract,
max_rules,
memory_par,
tree_generator,
lin_trim_quantile,
lin_standardise,
exp_rand_tree_size,
include_linear,
alpha,
random_state)
self.disc_strategy = disc_strategy
self.disc_kwargs = disc_kwargs
self.minsupport = minsupport
self.maxcardinality = maxcardinality
self.verbose = verbose
def fit(self, X, y=None, feature_names=None, undiscretized_features=[]):
self.undiscretized_features = undiscretized_features
super().fit(X, y, feature_names=feature_names)
return self
def _extract_rules(self, X, y) -> List[str]:
itemsets = extract_fpgrowth(X, y,
feature_names=self.feature_placeholders,
minsupport=self.minsupport,
maxcardinality=self.maxcardinality,
undiscretized_features=self.undiscretized_features,
disc_strategy=self.disc_strategy,
disc_kwargs=self.disc_kwargs,
verbose=self.verbose)[0]
return itemsets_to_rules(itemsets)
class FPLassoRegressor(FPLasso, RegressorMixin):
def _init_prediction_task(self):
self.prediction_task = 'regression'
class FPLassoClassifier(FPLasso, ClassifierMixin):
def _init_prediction_task(self):
self.prediction_task = 'classification'
Classes
class FPLasso (minsupport=0.1, maxcardinality=2, disc_strategy='mdlp', disc_kwargs={}, verbose=False, n_estimators=100, tree_size=4, sample_fract='default', max_rules=2000, memory_par=0.01, tree_generator=None, lin_trim_quantile=0.025, lin_standardise=True, exp_rand_tree_size=True, include_linear=True, alpha=None, random_state=None)-
Rulefit class. Rather than using this class directly, should use RuleFitRegressor or RuleFitClassifier
Parameters
tree_size:Numberofterminalnodesingeneratedtrees.Ifexp_rand_tree_size=True,- this will be the mean number of terminal nodes.
sample_fract:fractionofrandomlychosentrainingobservationsusedtoproduceeachtree.- FP 2004 (Sec. 2)
max_rules:totalnumberoftermsincludedinthefinalmodel(bothlinearandrules)- approximate total number of candidate rules generated for fitting also is based on this Note that actual number of candidate rules will usually be lower than this due to duplicates.
memory_par:scalemultiplier(shrinkagefactor)appliedtoeachnewtreewhen- sequentially induced. FP 2004 (Sec. 2)
lin_standardise:IfTrue,thelineartermswillbestandardisedasperFriedmanSec3.2- by multiplying the winsorised variable by 0.4/stdev.
lin_trim_quantile:Iflin_standardiseisTrue,thisquantilewillbeusedtotrimlinear- terms before standardisation.
exp_rand_tree_size:IfTrue,eachboostedtreewillhaveadifferentmaximumnumberof- terminal nodes based on an exponential distribution about tree_size. (Friedman Sec 3.3)
include_linear:Includelineartermsasopposedtoonlyrules- random_state: Integer to initialise random objects and provide repeatability.
tree_generator:Optional:thisobjectwillbeusedasprovidedtogeneratetherules.- This will override almost all the other properties above. Must be GradientBoostingRegressor or GradientBoostingClassifier, optional (default=None)
Attributes
rule_ensemble:RuleEnsemble- The rule ensemble
feature_names:listofstrings, optional (default=None)- The names of the features (columns)
Expand source code
class FPLasso(RuleFit): def __init__(self, minsupport=0.1, maxcardinality=2, disc_strategy='mdlp', disc_kwargs={}, verbose=False, n_estimators=100, tree_size=4, sample_fract='default', max_rules=2000, memory_par=0.01, tree_generator=None, lin_trim_quantile=0.025, lin_standardise=True, exp_rand_tree_size=True, include_linear=True, alpha=None, random_state=None): super().__init__(n_estimators, tree_size, sample_fract, max_rules, memory_par, tree_generator, lin_trim_quantile, lin_standardise, exp_rand_tree_size, include_linear, alpha, random_state) self.disc_strategy = disc_strategy self.disc_kwargs = disc_kwargs self.minsupport = minsupport self.maxcardinality = maxcardinality self.verbose = verbose def fit(self, X, y=None, feature_names=None, undiscretized_features=[]): self.undiscretized_features = undiscretized_features super().fit(X, y, feature_names=feature_names) return self def _extract_rules(self, X, y) -> List[str]: itemsets = extract_fpgrowth(X, y, feature_names=self.feature_placeholders, minsupport=self.minsupport, maxcardinality=self.maxcardinality, undiscretized_features=self.undiscretized_features, disc_strategy=self.disc_strategy, disc_kwargs=self.disc_kwargs, verbose=self.verbose)[0] return itemsets_to_rules(itemsets)Ancestors
Subclasses
Inherited members
class FPLassoClassifier (minsupport=0.1, maxcardinality=2, disc_strategy='mdlp', disc_kwargs={}, verbose=False, n_estimators=100, tree_size=4, sample_fract='default', max_rules=2000, memory_par=0.01, tree_generator=None, lin_trim_quantile=0.025, lin_standardise=True, exp_rand_tree_size=True, include_linear=True, alpha=None, random_state=None)-
Rulefit class. Rather than using this class directly, should use RuleFitRegressor or RuleFitClassifier
Parameters
tree_size:Numberofterminalnodesingeneratedtrees.Ifexp_rand_tree_size=True,- this will be the mean number of terminal nodes.
sample_fract:fractionofrandomlychosentrainingobservationsusedtoproduceeachtree.- FP 2004 (Sec. 2)
max_rules:totalnumberoftermsincludedinthefinalmodel(bothlinearandrules)- approximate total number of candidate rules generated for fitting also is based on this Note that actual number of candidate rules will usually be lower than this due to duplicates.
memory_par:scalemultiplier(shrinkagefactor)appliedtoeachnewtreewhen- sequentially induced. FP 2004 (Sec. 2)
lin_standardise:IfTrue,thelineartermswillbestandardisedasperFriedmanSec3.2- by multiplying the winsorised variable by 0.4/stdev.
lin_trim_quantile:Iflin_standardiseisTrue,thisquantilewillbeusedtotrimlinear- terms before standardisation.
exp_rand_tree_size:IfTrue,eachboostedtreewillhaveadifferentmaximumnumberof- terminal nodes based on an exponential distribution about tree_size. (Friedman Sec 3.3)
include_linear:Includelineartermsasopposedtoonlyrules- random_state: Integer to initialise random objects and provide repeatability.
tree_generator:Optional:thisobjectwillbeusedasprovidedtogeneratetherules.- This will override almost all the other properties above. Must be GradientBoostingRegressor or GradientBoostingClassifier, optional (default=None)
Attributes
rule_ensemble:RuleEnsemble- The rule ensemble
feature_names:listofstrings, optional (default=None)- The names of the features (columns)
Expand source code
class FPLassoClassifier(FPLasso, ClassifierMixin): def _init_prediction_task(self): self.prediction_task = 'classification'Ancestors
- FPLasso
- RuleFit
- sklearn.base.BaseEstimator
- sklearn.base.TransformerMixin
- RuleSet
- sklearn.base.ClassifierMixin
Inherited members
class FPLassoRegressor (minsupport=0.1, maxcardinality=2, disc_strategy='mdlp', disc_kwargs={}, verbose=False, n_estimators=100, tree_size=4, sample_fract='default', max_rules=2000, memory_par=0.01, tree_generator=None, lin_trim_quantile=0.025, lin_standardise=True, exp_rand_tree_size=True, include_linear=True, alpha=None, random_state=None)-
Rulefit class. Rather than using this class directly, should use RuleFitRegressor or RuleFitClassifier
Parameters
tree_size:Numberofterminalnodesingeneratedtrees.Ifexp_rand_tree_size=True,- this will be the mean number of terminal nodes.
sample_fract:fractionofrandomlychosentrainingobservationsusedtoproduceeachtree.- FP 2004 (Sec. 2)
max_rules:totalnumberoftermsincludedinthefinalmodel(bothlinearandrules)- approximate total number of candidate rules generated for fitting also is based on this Note that actual number of candidate rules will usually be lower than this due to duplicates.
memory_par:scalemultiplier(shrinkagefactor)appliedtoeachnewtreewhen- sequentially induced. FP 2004 (Sec. 2)
lin_standardise:IfTrue,thelineartermswillbestandardisedasperFriedmanSec3.2- by multiplying the winsorised variable by 0.4/stdev.
lin_trim_quantile:Iflin_standardiseisTrue,thisquantilewillbeusedtotrimlinear- terms before standardisation.
exp_rand_tree_size:IfTrue,eachboostedtreewillhaveadifferentmaximumnumberof- terminal nodes based on an exponential distribution about tree_size. (Friedman Sec 3.3)
include_linear:Includelineartermsasopposedtoonlyrules- random_state: Integer to initialise random objects and provide repeatability.
tree_generator:Optional:thisobjectwillbeusedasprovidedtogeneratetherules.- This will override almost all the other properties above. Must be GradientBoostingRegressor or GradientBoostingClassifier, optional (default=None)
Attributes
rule_ensemble:RuleEnsemble- The rule ensemble
feature_names:listofstrings, optional (default=None)- The names of the features (columns)
Expand source code
class FPLassoRegressor(FPLasso, RegressorMixin): def _init_prediction_task(self): self.prediction_task = 'regression'Ancestors
- FPLasso
- RuleFit
- sklearn.base.BaseEstimator
- sklearn.base.TransformerMixin
- RuleSet
- sklearn.base.RegressorMixin
Inherited members