使用TensorFlow實(shí)現(xiàn)SVM
較基礎(chǔ)的SVM,后續(xù)會(huì)加上多分類以及高斯核,供大家參考。
Talk is cheap, show me the code
import tensorflow as tf
from sklearn.base import BaseEstimator, ClassifierMixin
import numpy as np
class TFSVM(BaseEstimator, ClassifierMixin):
def __init__(self,
C = 1, kernel = 'linear',
learning_rate = 0.01,
training_epoch = 1000,
display_step = 50,
batch_size = 50,
random_state = 42):
#參數(shù)列表
self.svmC = C
self.kernel = kernel
self.learning_rate = learning_rate
self.training_epoch = training_epoch
self.display_step = display_step
self.random_state = random_state
self.batch_size = batch_size
def reset_seed(self):
#重置隨機(jī)數(shù)
tf.set_random_seed(self.random_state)
np.random.seed(self.random_state)
def random_batch(self, X, y):
#調(diào)用隨機(jī)子集,實(shí)現(xiàn)mini-batch gradient descent
indices = np.random.randint(1, X.shape[0], self.batch_size)
X_batch = X[indices]
y_batch = y[indices]
return X_batch, y_batch
def _build_graph(self, X_train, y_train):
#創(chuàng)建計(jì)算圖
self.reset_seed()
n_instances, n_inputs = X_train.shape
X = tf.placeholder(tf.float32, [None, n_inputs], name = 'X')
y = tf.placeholder(tf.float32, [None, 1], name = 'y')
with tf.name_scope('trainable_variables'):
#決策邊界的兩個(gè)變量
W = tf.Variable(tf.truncated_normal(shape = [n_inputs, 1], stddev = 0.1), name = 'weights')
b = tf.Variable(tf.truncated_normal([1]), name = 'bias')
with tf.name_scope('training'):
#算法核心
y_raw = tf.add(tf.matmul(X, W), b)
l2_norm = tf.reduce_sum(tf.square(W))
hinge_loss = tf.reduce_mean(tf.maximum(tf.zeros(self.batch_size, 1), tf.subtract(1., tf.multiply(y_raw, y))))
svm_loss = tf.add(hinge_loss, tf.multiply(self.svmC, l2_norm))
training_op = tf.train.AdamOptimizer(learning_rate = self.learning_rate).minimize(svm_loss)
with tf.name_scope('eval'):
#正確率和預(yù)測(cè)
prediction_class = tf.sign(y_raw)
correct_prediction = tf.equal(y, prediction_class)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init = tf.global_variables_initializer()
self._X = X; self._y = y
self._loss = svm_loss; self._training_op = training_op
self._accuracy = accuracy; self.init = init
self._prediction_class = prediction_class
self._W = W; self._b = b
def _get_model_params(self):
#獲取模型的參數(shù),以便存儲(chǔ)
with self._graph.as_default():
gvars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
return {gvar.op.name: value for gvar, value in zip(gvars, self._session.run(gvars))}
def _restore_model_params(self, model_params):
#保存模型的參數(shù)
gvar_names = list(model_params.keys())
assign_ops = {gvar_name: self._graph.get_operation_by_name(gvar_name + '/Assign') for gvar_name in gvar_names}
init_values = {gvar_name: assign_op.inputs[1] for gvar_name, assign_op in assign_ops.items()}
feed_dict = {init_values[gvar_name]: model_params[gvar_name] for gvar_name in gvar_names}
self._session.run(assign_ops, feed_dict = feed_dict)
def fit(self, X, y, X_val = None, y_val = None):
#fit函數(shù),注意要輸入驗(yàn)證集
n_batches = X.shape[0] // self.batch_size
self._graph = tf.Graph()
with self._graph.as_default():
self._build_graph(X, y)
best_loss = np.infty
best_accuracy = 0
best_params = None
checks_without_progress = 0
max_checks_without_progress = 20
self._session = tf.Session(graph = self._graph)
with self._session.as_default() as sess:
self.init.run()
for epoch in range(self.training_epoch):
for batch_index in range(n_batches):
X_batch, y_batch = self.random_batch(X, y)
sess.run(self._training_op, feed_dict = {self._X:X_batch, self._y:y_batch})
loss_val, accuracy_val = sess.run([self._loss, self._accuracy], feed_dict = {self._X: X_val, self._y: y_val})
accuracy_train = self._accuracy.eval(feed_dict = {self._X: X_batch, self._y: y_batch})
if loss_val < best_loss:
best_loss = loss_val
best_params = self._get_model_params()
checks_without_progress = 0
else:
checks_without_progress += 1
if checks_without_progress > max_checks_without_progress:
break
if accuracy_val > best_accuracy:
best_accuracy = accuracy_val
#best_params = self._get_model_params()
if epoch % self.display_step == 0:
print('Epoch: {}\tValidaiton loss: {:.6f}\tValidation Accuracy: {:.4f}\tTraining Accuracy: {:.4f}'
.format(epoch, loss_val, accuracy_val, accuracy_train))
print('Best Accuracy: {:.4f}\tBest Loss: {:.6f}'.format(best_accuracy, best_loss))
if best_params:
self._restore_model_params(best_params)
self._intercept = best_params['trainable_variables/weights']
self._bias = best_params['trainable_variables/bias']
return self
def predict(self, X):
with self._session.as_default() as sess:
return self._prediction_class.eval(feed_dict = {self._X: X})
def _intercept(self):
return self._intercept
def _bias(self):
return self._bias
實(shí)際運(yùn)行效果如下(以Iris數(shù)據(jù)集為樣本):
畫出決策邊界來看看:
以上就是本文的全部?jī)?nèi)容,希望對(duì)大家的學(xué)習(xí)有所幫助,也希望大家多多支持腳本之家。
- win10下python3.5.2和tensorflow安裝環(huán)境搭建教程
- win10下tensorflow和matplotlib安裝教程
- python3.6.3安裝圖文教程 TensorFlow安裝配置方法
- tensorflow實(shí)現(xiàn)簡(jiǎn)單邏輯回歸
- Tensorflow使用支持向量機(jī)擬合線性回歸
- TensorFlow實(shí)現(xiàn)iris數(shù)據(jù)集線性回歸
- TensorFlow實(shí)現(xiàn)模型評(píng)估
- 使用tensorflow實(shí)現(xiàn)線性svm
- TensorFlow Session使用的兩種方法小結(jié)
- C++調(diào)用tensorflow教程
相關(guān)文章
Python 3.x 判斷 dict 是否包含某鍵值的實(shí)例講解
今天小編就為大家分享一篇Python 3.x 判斷 dict 是否包含某鍵值的實(shí)例講解,具有很好的參考價(jià)值,希望對(duì)大家有所幫助。一起跟隨小編過來看看吧2018-07-07
淺談python連續(xù)賦值可能引發(fā)的錯(cuò)誤
今天小編就為大家分享一篇淺談python連續(xù)賦值可能引發(fā)的錯(cuò)誤,具有很好的參考價(jià)值,希望對(duì)大家有所幫助。一起跟隨小編過來看看吧2018-11-11
jupyter 使用Pillow包顯示圖像時(shí)inline顯示方式
這篇文章主要介紹了jupyter 使用Pillow包顯示圖像時(shí)inline顯示方式,具有很好的參考價(jià)值,希望對(duì)大家有所幫助。一起跟隨小編過來看看吧2020-04-04
Python使用Pandas處理測(cè)試數(shù)據(jù)的方法
Pandas是一個(gè)功能極其強(qiáng)大的數(shù)據(jù)分析庫(kù),可以高效地操作各種數(shù)據(jù)集,這篇文章主要介紹了Python自動(dòng)化測(cè)試-使用Pandas來高效處理測(cè)試數(shù)據(jù),需要的朋友可以參考下2023-02-02
利用Pytorch實(shí)現(xiàn)簡(jiǎn)單的線性回歸算法
今天小編就為大家分享一篇利用Pytorch實(shí)現(xiàn)簡(jiǎn)單的線性回歸算法,具有很好的參考價(jià)值,希望對(duì)大家有所幫助。一起跟隨小編過來看看吧2020-01-01
關(guān)于AnacondaNavigator?Jupyter?Notebook更換Python內(nèi)核的問題
因?yàn)樾掳惭b的Anaconda?Navigator默認(rèn)安裝了一個(gè)Python,Jupyter?Notebook默認(rèn)使用的內(nèi)核就是這個(gè)Python,跟我系統(tǒng)安裝好的Python沖突了,下面小編給大家介紹AnacondaNavigator?Jupyter?Notebook更換Python內(nèi)核的問題,需要的朋友可以參考下2022-02-02
Pandas中運(yùn)行速度優(yōu)化的常用方法介紹
這篇文章主要為大家詳細(xì)介紹了幾種pandas中常用到的方法,對(duì)于這些方法使用存在哪些需要注意的問題,以及如何對(duì)它們進(jìn)行速度提升,需要的小伙伴可以參考下2025-03-03
Python實(shí)現(xiàn)ElGamal加密算法的示例代碼
ElGamal加密算法是一個(gè)基于迪菲-赫爾曼密鑰交換的非對(duì)稱加密算法。這篇文章通過示例代碼給大家介紹Python實(shí)現(xiàn)ElGamal加密算法的相關(guān)知識(shí),感興趣的朋友一起看看吧2020-06-06

