This repository has been archived by the owner on Mar 10, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 203
/
SVM.py
93 lines (86 loc) · 3.28 KB
/
SVM.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
from __future__ import division, print_function
import os
import numpy as np
import random as rnd
filepath = os.path.dirname(os.path.abspath(__file__))
class SVM():
"""
使用SMO实现支持向量
"""
def __init__(self, max_iter=10000, kernel_type='linear', C=1.0, epsilon=0.001):
self.kernels = {
'linear' : self.kernel_linear,
'quadratic' : self.kernel_quadratic
}
self.max_iter = max_iter
self.kernel_type = kernel_type
self.C = C
self.epsilon = epsilon
def fit(self, X, y):
# 初始化
n, d = X.shape[0], X.shape[1]
alpha = np.zeros((n))
kernel = self.kernels[self.kernel_type]
count = 0
while True:
count += 1
alpha_prev = np.copy(alpha)
for j in range(0, n):
i = self.get_rnd_int(0, n-1, j) # 随机 i~=j
x_i, x_j, y_i, y_j = X[i,:], X[j,:], y[i], y[j]
k_ij = kernel(x_i, x_i) + kernel(x_j, x_j) - 2 * kernel(x_i, x_j)
if k_ij == 0:
continue
alpha_prime_j, alpha_prime_i = alpha[j], alpha[i]
(L, H) = self.compute_L_H(self.C, alpha_prime_j, alpha_prime_i, y_j, y_i)
self.w = self.calc_w(alpha, y, X)
self.b = self.calc_b(X, y, self.w)
E_i = self.E(x_i, y_i, self.w, self.b)
E_j = self.E(x_j, y_j, self.w, self.b)
alpha[j] = alpha_prime_j + float(y_j * (E_i - E_j))/k_ij
alpha[j] = max(alpha[j], L)
alpha[j] = min(alpha[j], H)
alpha[i] = alpha_prime_i + y_i*y_j * (alpha_prime_j - alpha[j])
# 检测
diff = np.linalg.norm(alpha - alpha_prev)
if diff < self.epsilon:
break
if count >= self.max_iter:
print("Iteration number exceeded the max of %d iterations" % (self.max_iter))
return
# 实现完整模型元素
self.b = self.calc_b(X, y, self.w)
if self.kernel_type == 'linear':
self.w = self.calc_w(alpha, y, X)
# 获得支持向量
alpha_idx = np.where(alpha > 0)[0]
support_vectors = X[alpha_idx, :]
return support_vectors, count
def predict(self, X):
return self.h(X, self.w, self.b)
def calc_b(self, X, y, w):
b_tmp = y - np.dot(w.T, X.T)
return np.mean(b_tmp)
def calc_w(self, alpha, y, X):
return np.dot(alpha * y, X)
# 预测
def h(self, X, w, b):
return np.sign(np.dot(w.T, X.T) + b).astype(int)
# 预测错误
def E(self, x_k, y_k, w, b):
return self.h(x_k, w, b) - y_k
def compute_L_H(self, C, alpha_prime_j, alpha_prime_i, y_j, y_i):
if(y_i != y_j):
return (max(0, alpha_prime_j - alpha_prime_i), min(C, C - alpha_prime_i + alpha_prime_j))
else:
return (max(0, alpha_prime_i + alpha_prime_j - C), min(C, alpha_prime_i + alpha_prime_j))
def get_rnd_int(self, a,b,z):
i = z
while i == z:
i = rnd.randint(a,b)
return i
# Define kernels
def kernel_linear(self, x1, x2):
return np.dot(x1, x2.T)
def kernel_quadratic(self, x1, x2):
return (np.dot(x1, x2.T) ** 2)