您好,登錄后才能下訂單哦!
特點(diǎn)
from collections import defaultdict import numpy as np from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from loguru import logger class NaiveBayesScratch(): """樸素貝葉斯算法Scratch實(shí)現(xiàn)""" def __init__(self): # 存儲(chǔ)先驗(yàn)概率 P(Y=ck) self._prior_prob = defaultdict(float) # 存儲(chǔ)似然概率 P(X|Y=ck) self._likelihood = defaultdict(defaultdict) # 存儲(chǔ)每個(gè)類別的樣本在訓(xùn)練集中出現(xiàn)次數(shù) self._ck_counter = defaultdict(float) # 存儲(chǔ)每一個(gè)特征可能取值的個(gè)數(shù) self._Sj = defaultdict(float) def fit(self, X, y): """ 模型訓(xùn)練,參數(shù)估計(jì)使用貝葉斯估計(jì) X: 訓(xùn)練集,每一行表示一個(gè)樣本,每一列表示一個(gè)特征或?qū)傩? y: 訓(xùn)練集標(biāo)簽 """ n_sample, n_feature = X.shape # 計(jì)算每個(gè)類別可能的取值以及每個(gè)類別樣本個(gè)數(shù) ck, num_ck = np.unique(y, return_counts=True) self._ck_counter = dict(zip(ck, num_ck)) for label, num_label in self._ck_counter.items(): # 計(jì)算先驗(yàn)概率,做了拉普拉斯平滑處理,即計(jì)算P(y) self._prior_prob[label] = (num_label + 1) / (n_sample + ck.shape[0]) # 記錄每個(gè)類別樣本對(duì)應(yīng)的索引 ck_idx = [] for label in ck: label_idx = np.squeeze(np.argwhere(y == label)) ck_idx.append(label_idx) # 遍歷每個(gè)類別 for label, idx in zip(ck, ck_idx): xdata = X[idx] # 記錄該類別所有特征對(duì)應(yīng)的概率 label_likelihood = defaultdict(defaultdict) # 遍歷每個(gè)特征 for i in range(n_feature): # 記錄該特征每個(gè)取值對(duì)應(yīng)的概率 feature_val_prob = defaultdict(float) # 獲取該列特征可能的取值和每個(gè)取值出現(xiàn)的次數(shù) feature_val, feature_cnt = np.unique(xdata[:, i], return_counts=True) self._Sj[i] = feature_val.shape[0] feature_counter = dict(zip(feature_val, feature_cnt)) for fea_val, cnt in feature_counter.items(): # 計(jì)算該列特征每個(gè)取值的概率,做了拉普拉斯平滑,即為了計(jì)算P(x|y) feature_val_prob[fea_val] = (cnt + 1) / (self._ck_counter[label] + self._Sj[i]) label_likelihood[i] = feature_val_prob self._likelihood[label] = label_likelihood def predict(self, x): """ 輸入樣本,輸出其類別,本質(zhì)上是計(jì)算后驗(yàn)概率 **注意計(jì)算后驗(yàn)概率的時(shí)候?qū)Ω怕嗜?duì)數(shù)**,概率連乘可能導(dǎo)致浮點(diǎn)數(shù)下溢,取對(duì)數(shù)將連乘轉(zhuǎn)化為求和 """ # 保存分類到每個(gè)類別的后驗(yàn)概率,即計(jì)算P(y|x) post_prob = defaultdict(float) # 遍歷每個(gè)類別計(jì)算后驗(yàn)概率 for label, label_likelihood in self._likelihood.items(): prob = np.log(self._prior_prob[label]) # 遍歷樣本每一維特征 for i, fea_val in enumerate(x): feature_val_prob = label_likelihood[i] # 如果該特征值出現(xiàn)在訓(xùn)練集中則直接獲取概率 if fea_val in feature_val_prob: prob += np.log(feature_val_prob[fea_val]) else: # 如果該特征沒有出現(xiàn)在訓(xùn)練集中則采用拉普拉斯平滑計(jì)算概率 laplace_prob = 1 / (self._ck_counter[label] + self._Sj[i]) prob += np.log(laplace_prob) post_prob[label] = prob prob_list = list(post_prob.items()) prob_list.sort(key=lambda v: v[1], reverse=True) # 返回后驗(yàn)概率最大的類別作為預(yù)測類別 return prob_list[0][0] def main(): X, y = load_iris(return_X_y=True) xtrain, xtest, ytrain, ytest = train_test_split(X, y, train_size=0.8, shuffle=True) model = NaiveBayesScratch() model.fit(xtrain, ytrain) n_test = xtest.shape[0] n_right = 0 for i in range(n_test): y_pred = model.predict(xtest[i]) if y_pred == ytest[i]: n_right += 1 else: logger.info("該樣本真實(shí)標(biāo)簽為:{},但是Scratch模型預(yù)測標(biāo)簽為:{}".format(ytest[i], y_pred)) logger.info("Scratch模型在測試集上的準(zhǔn)確率為:{}%".format(n_right * 100 / n_test)) if __name__ == "__main__": main()
以上就是python 機(jī)器學(xué)習(xí)之實(shí)現(xiàn)樸素貝葉斯算法的示例的詳細(xì)內(nèi)容,更多關(guān)于python實(shí)現(xiàn)樸素貝葉斯算法的資料請(qǐng)關(guān)注億速云其它相關(guān)文章!
免責(zé)聲明:本站發(fā)布的內(nèi)容(圖片、視頻和文字)以原創(chuàng)、轉(zhuǎn)載和分享為主,文章觀點(diǎn)不代表本網(wǎng)站立場,如果涉及侵權(quán)請(qǐng)聯(lián)系站長郵箱:is@yisu.com進(jìn)行舉報(bào),并提供相關(guān)證據(jù),一經(jīng)查實(shí),將立刻刪除涉嫌侵權(quán)內(nèi)容。