-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathPCA.py
55 lines (43 loc) · 1.4 KB
/
PCA.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
def load_data(file_path):
with open(file_path, 'r') as file:
lines = file.readlines()
data = []
labels = []
digit = 0
block_count = 0
for line in lines:
if line.strip(): # If the line is not blank
features = list(map(float, line.strip().split()))
data.append(features)
labels.append(digit)
else: # End of a block
block_count += 1
if block_count == 660: # 660 blocks per digit
digit += 1
block_count = 0
return np.array(data), np.array(labels)
# Load the dataset
data, labels = load_data('Train_Arabic_Digit.txt')
# Standardize the data
scaler = StandardScaler()
data_std = scaler.fit_transform(data)
# Initialize PCA
pca = PCA()
# Fit PCA on the standardized data
pca.fit(data_std)
# Calculate the cumulative sum of explained variance ratio
cumulative_variance = np.cumsum(pca.explained_variance_ratio_)
# Output the cumulative variance
print(cumulative_variance)
plt.figure(figsize=(10, 6))
plt.plot(cumulative_variance, marker='o')
plt.title('Cumulative Explained Variance by Each Component')
plt.xlabel('Number of Components')
plt.ylabel('Cumulative Explained Variance')
plt.grid(True)
# Save the plot to a file
plt.savefig('.png')