from sklearn import tree
X = [[0, 0], [1, 1]]
Y = [0, 1]
clf = tree.ExtraTreeClassifier()
clf = clf.fit(X, Y)
>>> clf.predict([[2., 2.]])
array([1])
class sklearn.tree.ExtraTreeClassifier(
criterion=’gini’,
splitter=’random’,
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features=’auto’,
random_state=None,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
min_impurity_split=None,
class_weight=None
)
#见DecisionTree的
属性
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the feature. The importance of a feature is computed as the (normalized) total reduction of the criterion brought by that feature. It is also known as the Gini importance [R251].
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems), or a list containing the number of classes for each output (for multi-output problems).
n_features_ : int
The number of features when fit is performed.
n_outputs_ : int
The number of outputs when fit is performed.
tree_ : Tree object
The underlying Tree object.
方法
apply(X,check_input = True )
decision_path(X,check_input = True )
fit(X,y,sample_weight = None,check_input = True,X_idx_sorted = None )
get_params(deep = True )
predict(X,check_input = True )
predict_log_proba(X )
predict_proba(X,check_input = True )
score(X,y,sample_weight = None )[source]
set_params(** params )