博客
关于我
强烈建议你试试无所不能的chatGPT,快点击我
COMP7404 Machine Learing——Decision Tree & Random Forests
阅读量:2136 次
发布时间:2019-04-30

本文共 8649 字,大约阅读时间需要 28 分钟。

Decision Tree

一个很大的不同点是决策树模型在预处理阶段不需要标准化。

因为决策树不需要考虑特征的值,只需要考虑划分界限

我们都知道决策树有ID3, C4.5, CART等,但是在sklearn.tree的DecisionTreeClassifier是使用的CART的分类树,但是其criterion是可以改的,criterion=gini那就是正常的CART,criterion=entropy就有点像ID3,C4.5了

而CART的回归树就是DecisionTreeRegressor

import numpy as npfrom sklearn import datasetsiris = datasets.load_iris()X = iris.data[:, [2, 3]]y = iris.targetfrom sklearn.model_selection import train_test_splitX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1, stratify=y)from sklearn.tree import DecisionTreeClassifiertree = DecisionTreeClassifier(criterion='gini', max_depth=7, random_state=1)tree.fit(X_train, y_train)y_pred = tree.predict(X_train)print('Misclassified training samples:',(y_train!=y_pred).sum()) y_pred = tree.predict(X_test)print('Misclassified samples:', (y_test != y_pred).sum()) from sklearn.metrics import accuracy_scoreprint('Accuracy: %.3f' % accuracy_score(y_test, y_pred))#下面是可视化from matplotlib.colors import ListedColormapimport matplotlib.pyplot as pltdef plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):    markers = ('s', 'x', 'o', '^', 'v')    colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')    cmap = ListedColormap(colors[:len(np.unique(y))])    x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1    x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1    xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),np.arange(x2_min, x2_max, resolution))    Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)    Z = Z.reshape(xx1.shape)    plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)    plt.xlim(xx1.min(), xx1.max())    plt.ylim(xx2.min(), xx2.max())    for idx, cl in enumerate(np.unique(y)):        plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=colors[idx], marker=markers[idx],                     label=cl, edgecolor='black')    if test_idx:        X_test, y_test = X[test_idx, :], y[test_idx]        plt.scatter(X_test[:, 0], X_test[:, 1], c='none', edgecolor='black', alpha=1.0, linewidth=1,                    marker='o', s=100, label='test set')  from sklearn.tree import DecisionTreeClassifiertree = DecisionTreeClassifier(criterion='gini', max_depth=7, random_state=1)tree.fit(X_train, y_train)X_combined = np.vstack((X_train, X_test))y_combined = np.hstack((y_train, y_test))plot_decision_regions(X_combined, y_combined,                       classifier=tree, test_idx=range(105, 150))plt.xlabel('petal length [cm]')plt.ylabel('petal width [cm]')plt.legend(loc='upper left')plt.tight_layout()plt.show()

import pandas as pdfrom sklearn import datasetsfrom sklearn.model_selection import train_test_splitfrom sklearn.tree import DecisionTreeClassifierfrom sklearn.metrics import accuracy_scoreiris = datasets.load_iris()X = iris.data[:,[2,3]]y = iris.targetX_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3, random_state=1, stratify=y)tree = DecisionTreeClassifier(criterion='gini', max_depth=7, random_state=1)tree.fit(X_train, y_train)y_pred = tree.predict(X_train)print('misclassified training samples: ', (y_pred!=y_train).sum())y_pred = tree.predict(X_test)print('misclassified testing samples: ', (y_pred!=y_test).sum())print('Accuracy: %0.3f' % accuracy_score(y_pred,y_test))

 

 

Visualizing Decision Tree

import numpy as npfrom sklearn import datasetsiris = datasets.load_iris()X = iris.data[:, [2, 3]]y = iris.targetfrom sklearn.model_selection import train_test_splitX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1, stratify=y)from matplotlib.colors import ListedColormapimport matplotlib.pyplot as pltdef plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):    markers = ('s', 'x', 'o', '^', 'v')    colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')    cmap = ListedColormap(colors[:len(np.unique(y))])    x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1    x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1    xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),np.arange(x2_min, x2_max, resolution))    Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)    Z = Z.reshape(xx1.shape)    plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)    plt.xlim(xx1.min(), xx1.max())    plt.ylim(xx2.min(), xx2.max())    for idx, cl in enumerate(np.unique(y)):        plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=colors[idx], marker=markers[idx],                     label=cl, edgecolor='black')    if test_idx:        X_test, y_test = X[test_idx, :], y[test_idx]        plt.scatter(X_test[:, 0], X_test[:, 1], c='none', edgecolor='black', alpha=1.0, linewidth=1,                    marker='o', s=100, label='test set')from sklearn.tree import DecisionTreeClassifiertree = DecisionTreeClassifier(criterion='gini', max_depth=7, random_state=1)tree.fit(X_train, y_train)from pydotplus import graph_from_dot_datafrom sklearn.tree import export_graphvizdot_data = export_graphviz(tree, filled=True, rounded=True,                            class_names=['Setosa',                                         'Versicolor',                                        'Virginica'],                           feature_names=['petal length',                                           'petal width'],                           out_file=None)graph = graph_from_dot_data(dot_data) graph.write_png('tree.png')

 

random forests

Combining multiple decision trees via random forests

同样,既有RandomForestClassifier又有RandomForestRegression

import numpy as npfrom sklearn import datasetsiris = datasets.load_iris()X = iris.data[:, [2, 3]]y = iris.targetfrom sklearn.model_selection import train_test_splitX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1, stratify=y)from sklearn.ensemble import RandomForestClassifierforest = RandomForestClassifier(criterion='gini', n_estimators=25,                                 random_state=1, n_jobs=2)forest.fit(X_train, y_train)y_pred = forest.predict(X_train)print('Misclassified training samples:',(y_train!=y_pred).sum()) y_pred = forest.predict(X_test)print('Misclassified samples:', (y_test != y_pred).sum()) from sklearn.metrics import accuracy_scoreprint('Accuracy: %.3f' % accuracy_score(y_test, y_pred))from matplotlib.colors import ListedColormapimport matplotlib.pyplot as pltdef plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):    markers = ('s', 'x', 'o', '^', 'v')    colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')    cmap = ListedColormap(colors[:len(np.unique(y))])    x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1    x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1    xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),np.arange(x2_min, x2_max, resolution))    Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)    Z = Z.reshape(xx1.shape)    plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)    plt.xlim(xx1.min(), xx1.max())    plt.ylim(xx2.min(), xx2.max())    for idx, cl in enumerate(np.unique(y)):        plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=colors[idx], marker=markers[idx],                     label=cl, edgecolor='black')    if test_idx:        X_test, y_test = X[test_idx, :], y[test_idx]        plt.scatter(X_test[:, 0], X_test[:, 1], c='none', edgecolor='black', alpha=1.0, linewidth=1,                    marker='o', s=100, label='test set')X_combined = np.vstack((X_train, X_test))y_combined = np.hstack((y_train, y_test))from sklearn.ensemble import RandomForestClassifierforest = RandomForestClassifier(criterion='gini', n_estimators=25,                                 random_state=1, n_jobs=2)forest.fit(X_train, y_train)plot_decision_regions(X_combined, y_combined,                       classifier=forest, test_idx=range(105, 150))plt.xlabel('petal length [cm]')plt.ylabel('petal width [cm]')plt.legend(loc='upper left')plt.tight_layout()plt.show()

import pandas as pdfrom sklearn import datasetsfrom sklearn.model_selection import train_test_splitfrom sklearn.ensemble import RandomForestClassifierfrom sklearn.metrics import accuracy_scoreiris = datasets.load_iris()X = iris.data[:,[2,3]]y = iris.targetX_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3, random_state=1, stratify=y)forest = RandomForestClassifier(criterion='gini', n_estimators=25,                                random_state=1, n_jobs=2)forest.fit(X_train, y_train)y_pred = forest.predict(X_train)print('misclassified training samples: ', (y_pred!=y_train).sum())y_pred = forest.predict(X_test)print('misclassified testing samples: ', (y_pred!=y_test).sum())print('Accuracy: %0.3f' % accuracy_score(y_pred,y_test))

 

转载地址:http://qmygf.baihongyu.com/

你可能感兴趣的文章
【LEETCODE】105-Construct Binary Tree from Preorder and Inorder Traversal
查看>>
【TED】只需专注10分钟-Andy Puddicombe
查看>>
【MachineLearning】数据挖掘中的分类和聚类的区别
查看>>
【LEETCODE】292-Nim Game
查看>>
【LEETCODE】237-Delete Node in a Linked List
查看>>
【LEETCODE】206-Reverse Linked List
查看>>
【LEETCODE】203-Remove Linked List Elements
查看>>
【LEETCODE】234-Palindrome Linked List
查看>>
【LEETCODE】141-Linked List Cycle
查看>>
【LEETCODE】142-Linked List Cycle II
查看>>
【LEETCODE】92-Reverse Linked List II
查看>>
【LEETCODE】283-Move Zeroes
查看>>
【LEETCODE】217-Contains Duplicate
查看>>
【LEETCODE】219-Contains Duplicate II
查看>>
【LEETCODE】220-Contains Duplicate III
查看>>
【LEETCODE】171-Excel Sheet Column Number
查看>>
【LEETCODE】169-Majority Element
查看>>
【LEETCODE】191-Number of 1 Bits
查看>>
【LEETCODE】13-Roman to Integer
查看>>
【LEETCODE】83-Remove Duplicates from Sorted List
查看>>