PCA 手写主成分分析

PCA 手写主成分分析

1
2
3
4
import numpy as np
import pandas as pd
df = pd.read_csv('iris.data')
df.head()
5.1 3.5 1.4 0.2 Iris-setosa
0 4.9 3.0 1.4 0.2 Iris-setosa
1 4.7 3.2 1.3 0.2 Iris-setosa
2 4.6 3.1 1.5 0.2 Iris-setosa
3 5.0 3.6 1.4 0.2 Iris-setosa
4 5.4 3.9 1.7 0.4 Iris-setosa

1 数据预处理

1
2
3
#加上列名
df.columns=['sepal_len', 'sepal_wid', 'petal_len', 'petal_wid', 'class']
df.head()
sepal_len sepal_wid petal_len petal_wid class
0 4.9 3.0 1.4 0.2 Iris-setosa
1 4.7 3.2 1.3 0.2 Iris-setosa
2 4.6 3.1 1.5 0.2 Iris-setosa
3 5.0 3.6 1.4 0.2 Iris-setosa
4 5.4 3.9 1.7 0.4 Iris-setosa

2 画图,进行降维特征分析

1
2
3
4
# split data table into data X and class labels y

X = df.iloc[:,0:4].values
y = df.iloc[:,4].values
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
# 把每个特征用于分类的结果,都画成条形图,观察哪个特征更容易划分种类
from matplotlib import pyplot as plt
import math

label_dict = {1: 'Iris-Setosa',
2: 'Iris-Versicolor',
3: 'Iris-Virgnica'}

feature_dict = {0: 'sepal length [cm]',
1: 'sepal width [cm]',
2: 'petal length [cm]',
3: 'petal width [cm]'}


plt.figure(figsize=(8, 6))
for cnt in range(4):
plt.subplot(2, 2, cnt+1)
for lab in ('Iris-setosa', 'Iris-versicolor', 'Iris-virginica'):
plt.hist(X[y==lab, cnt],
label=lab,
bins=10,
alpha=0.3,)
plt.xlabel(feature_dict[cnt])
plt.legend(loc='upper right', fancybox=True, fontsize=8)

plt.tight_layout()
plt.show()

png

1
2
3
# 特征 归一化
from sklearn.preprocessing import StandardScaler
X_std = StandardScaler().fit_transform(X)

3 协方差分析(发现有2个有用特征,决定从4维降到2维)

1 计算样本X的 协方差矩阵(有4个特征,所以是4x4)

1
2
3
4
# 自己算 协方差矩阵
mean_vec = np.mean(X_std, axis=0)
cov_mat = (X_std - mean_vec).T.dot((X_std - mean_vec)) / (X_std.shape[0]-1)
print('Covariance matrix \n%s' %cov_mat)
Covariance matrix 
[[ 1.00675676 -0.10448539  0.87716999  0.82249094]
 [-0.10448539  1.00675676 -0.41802325 -0.35310295]
 [ 0.87716999 -0.41802325  1.00675676  0.96881642]
 [ 0.82249094 -0.35310295  0.96881642  1.00675676]]
1
2
# numpy算 协方差矩阵
print('NumPy covariance matrix: \n%s' %np.cov(X_std.T))
NumPy covariance matrix: 
[[ 1.00675676 -0.10448539  0.87716999  0.82249094]
 [-0.10448539  1.00675676 -0.41802325 -0.35310295]
 [ 0.87716999 -0.41802325  1.00675676  0.96881642]
 [ 0.82249094 -0.35310295  0.96881642  1.00675676]]

2 对协方差矩阵进行 特征值分解

1
2
3
4
5
6
cov_mat = np.cov(X_std.T)

eig_vals, eig_vecs = np.linalg.eig(cov_mat)

print('Eigenvectors \n%s' %eig_vecs)
print('\nEigenvalues \n%s' %eig_vals)
Eigenvectors 
[[ 0.52308496 -0.36956962 -0.72154279  0.26301409]
 [-0.25956935 -0.92681168  0.2411952  -0.12437342]
 [ 0.58184289 -0.01912775  0.13962963 -0.80099722]
 [ 0.56609604 -0.06381646  0.63380158  0.52321917]]

Eigenvalues 
[ 2.92442837  0.93215233  0.14946373  0.02098259]

3 把特征值从大到小排列,并配对特征向量

1
2
3
4
5
6
7
8
9
10
11
# Make a list of (eigenvalue, eigenvector) tuples
eig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:,i]) for i in range(len(eig_vals))]
print (eig_pairs)
print ('----------')
# Sort the (eigenvalue, eigenvector) tuples from high to low
eig_pairs.sort(key=lambda x: x[0], reverse=True)

# Visually confirm that the list is correctly sorted by decreasing eigenvalues
print('Eigenvalues in descending order:')
for i in eig_pairs:
print(i[0],"对应",i[1])
[(2.9244283691111144, array([ 0.52308496, -0.25956935,  0.58184289,  0.56609604])), (0.93215233025350641, array([-0.36956962, -0.92681168, -0.01912775, -0.06381646])), (0.14946373489813314, array([-0.72154279,  0.2411952 ,  0.13962963,  0.63380158])), (0.020982592764270606, array([ 0.26301409, -0.12437342, -0.80099722,  0.52321917]))]
----------
Eigenvalues in descending order:
2.92442836911 对应 [ 0.52308496 -0.25956935  0.58184289  0.56609604]
0.932152330254 对应 [-0.36956962 -0.92681168 -0.01912775 -0.06381646]
0.149463734898 对应 [-0.72154279  0.2411952   0.13962963  0.63380158]
0.0209825927643 对应 [ 0.26301409 -0.12437342 -0.80099722  0.52321917]

4 通过前面特征值累加所占比重 的图像,判断取前多少特征值合适,组成投影矩阵W

1
2
3
4
5
tot = sum(eig_vals)
var_exp = [(i / tot)*100 for i in sorted(eig_vals, reverse=True)]
print (var_exp)
cum_var_exp = np.cumsum(var_exp)
cum_var_exp
[72.620033326920336, 23.147406858644135, 3.7115155645845164, 0.52104424985101538]





array([  72.62003333,   95.76744019,   99.47895575,  100.        ])
1
2
3
4
a = np.array([1,2,3,4])
print (a)
print ('-----------')
print (np.cumsum(a))
[1 2 3 4]
-----------
[ 1  3  6 10]
1
2
3
4
5
6
7
8
9
10
11
12

plt.figure(figsize=(6, 4))

plt.bar(range(4), var_exp, alpha=0.5, align='center',
label='individual explained variance')
plt.step(range(4), cum_var_exp, where='mid',
label='cumulative explained variance')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.legend(loc='best')
plt.tight_layout()
plt.show()

png

1
2
3
4
matrix_w = np.hstack((eig_pairs[0][1].reshape(4,1),
eig_pairs[1][1].reshape(4,1)))

print('Matrix W:\n', matrix_w)
Matrix W:
 [[ 0.52308496 -0.36956962]
 [-0.25956935 -0.92681168]
 [ 0.58184289 -0.01912775]
 [ 0.56609604 -0.06381646]]

4 开始降维——用投影矩阵降维样本矩阵X

1
Y = X_std.dot(matrix_w)

5 画图观察 降维前 和 降维后的样本分布

1
2
3
4
5
6
7
8
9
10
11
12
plt.figure(figsize=(6, 4))
for lab, col in zip(('Iris-setosa', 'Iris-versicolor', 'Iris-virginica'),
('blue', 'red', 'green')):
plt.scatter(X[y==lab, 0],
X[y==lab, 1],
label=lab,
c=col)
plt.xlabel('sepal_len')
plt.ylabel('sepal_wid')
plt.legend(loc='best')
plt.tight_layout()
plt.show()

png

1
2
3
4
5
6
7
8
9
10
11
12
plt.figure(figsize=(6, 4))
for lab, col in zip(('Iris-setosa', 'Iris-versicolor', 'Iris-virginica'),
('blue', 'red', 'green')):
plt.scatter(Y[y==lab, 0],
Y[y==lab, 1],
label=lab,
c=col)
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.legend(loc='lower center')
plt.tight_layout()
plt.show()

png

1
2


1
2


1
2


1
2


꧁༺The༒End༻꧂