-
Notifications
You must be signed in to change notification settings - Fork 0
/
lung_data.py
47 lines (33 loc) · 1.28 KB
/
lung_data.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import logging
import scipy.io as sio
from numpy.core.multiarray import ndarray
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, StandardScaler
DATA_PATH = 'data/'
FILE_NAME = 'lung.mat'
TEST_SIZE = 1 / 3
def load_lung_data() -> (ndarray, ndarray):
"""
The lung.mat data set is from: https://jundongl.github.io/scikit-feature/datasets.html
Dataset characteristics:
# sample: 203
# features: 3312
# output classes: 5
To expect: high variance
"""
lung_data = sio.loadmat(DATA_PATH + FILE_NAME)
X_: ndarray = lung_data['X']
y_: ndarray = lung_data['Y']
enc = OneHotEncoder().fit(y_)
y_ = enc.transform(y_).astype('uint8').toarray()
logging.info("Available output categories:")
logging.info(enc.categories_)
return X_, y_
def train_test_split_normalize(X_: ndarray, y_: ndarray, test_size=TEST_SIZE, random_state=42) \
-> (ndarray, ndarray, ndarray, ndarray):
X_train_, X_test_, y_train_, y_test_ = train_test_split(X_, y_, test_size=test_size, random_state=random_state)
normalize = StandardScaler()
normalize.fit(X_train_)
X_train_ = normalize.transform(X_train_)
X_test_ = normalize.transform(X_test_)
return X_train_, X_test_, y_train_, y_test_