반응형


import pandas as pd
from io import StringIO

csv_data = '''A,B,C,D
1.0,2.0,3.0,4.0
5.0,6.0,,8.0
0.0,11.0,12.0,
'''
df = pd.read_csv(StringIO(csv_data))
print('df:\n ', df)
print('df.isnull().sum():\n ', df.isnull().sum())

# 결측값을 가진 샘플이나 피처 제거
print('df.dropna():\n ', df.dropna()) # 결측값을 가진 행을 제거하는 방법
print('df.dropna(axis=1):\n ', df.dropna(axis=1)) # 결측값을 가진 열을 제거하는 방법
print("df.dropna(how='all'):\n", df.dropna(how='all')) # 모든 열이 결측값을 가진 행만 제거
print('df.dropna(thresh=4):\n', df.dropna(thresh=4)) # 최소 4개의 결측치가 아닌 값을 가지지 못하는 행 제거
print("df.dropna(subset=['C']:\n", df.dropna(subset=['C'])) # C열에서 결측치가 있는 행을 제거

# 결측값의 보정
# 전체 피처열의 평균값으로 결측값을 간단히 대체하는 평균보정법
from sklearn.preprocessing import Imputer
imr = Imputer(missing_values='NaN', strategy='mean', axis=0)
imr = imr.fit(df)
imputed_data = imr.transform(df.values)
print('imputed_data:\n', imputed_data)


import pandas as pd
df = pd.DataFrame([
['green', 'M', 10.1, 'class1'],
['red', 'L', 13.5, 'class2'],
['blue', 'XL', 15.3, 'class1']
])
df.columns = ['color', 'size', 'price', 'classlabel']
print('df:\n', df)

# 순위형 피처 매핑
# ex) XL = L + 1 = M + 2
size_mapping = {'XL': 3, 'L': 2, 'M': 1}
df['size'] = df['size'].map(size_mapping)
print('df:\n', df)

# 역매핑
inv_size_mapping = {v: k for k, v in size_mapping.items()}
print('inv_size_mapping:\n', inv_size_mapping)


# 분류 레이블 인코딩
import numpy as np
class_mapping = {label:idx for idx, label in enumerate(np.unique(df['classlabel']))}
print('class_mapping:\n', class_mapping)

df['classlabel'] = df['classlabel'].map(class_mapping)
print('df:\n', df)
# 역매핑
inv_class_mapping = {v: k for k, v in class_mapping.items()}
df['classlabel'] = df['classlabel'].map(inv_class_mapping)
print('df:\n', df)

from sklearn.preprocessing import LabelEncoder
class_le = LabelEncoder()
y = class_le.fit_transform(df['classlabel'].values)
print('y:\n', y)

print('class_le.inverse_transform(y):\n', class_le.inverse_transform(y))


# 명목형 피처에 원핫 인코딩 수행
X = df[['color', 'size', 'price']].values
color_le = LabelEncoder()
X[:, 0] = color_le.fit_transform(X[:, 0])
print('X:\n', X)


# one hot encoding
from sklearn.preprocessing import OneHotEncoder
ohe = OneHotEncoder(categorical_features=[0])
print('ohe.fit_transform(X).toarray():\n', ohe.fit_transform(X).toarray())

print('df:\n', df)
print("pd.get_dummies(df[['price', 'color', 'size']]):\n", pd.get_dummies(df[['price', 'color', 'size']]))


# 데이터를 훈련용과 테스트용으로 분할하기
df_wine = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data', header=None)
df_wine.columns = ['Class label', 'Alcohol',
'Malic acid', 'Ash',
'Alcalinity of ash', 'Magnesium',
'Total phenols', 'Flavanoids',
'Nonflavanoid phenols', 'Proanthocyanins',
'Color intensity', 'Hue',
'OD280/OD315 of diluted wines', 'Proline']
print('Class labels', np.unique(df_wine['Class label']))
print('df_wine.head():\n', df_wine.head())

# 테스트용과 훈련용으로 임의 분할하기 위한 편리한 방법 한 가지는 사이킷런 cross_validation 서브 모듈의 train_test_split 함수
from sklearn.cross_validation import train_test_split
X, y = df_wine.iloc[:, 1].values, df_wine.iloc[:, 0].values
print('X:\n', df_wine.iloc[:, 1].values) # Alcohol
print('y:\n', df_wine.iloc[:, 0].values) # Class label
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
print('X_train:\n', X_train)
print('X_test:\n', X_test)

# 최소-최대 스케일링 프로시저
from sklearn.preprocessing import MinMaxScaler
mms = MinMaxScaler()
X_train = np.array(X_train).reshape((len(X_train), 1))
X_test = np.array(X_test).reshape((len(X_test), 1))
X_train_norm = mms.fit_transform(X_train)
X_test_norm = mms.transform(X_test)
print('X_train_norm:\n', X_train_norm)
print('X_test_norm:\n', X_test_norm)

# 표준화 프로시저
from sklearn.preprocessing import StandardScaler
stdsc = StandardScaler()
X_train_std = stdsc.fit_transform(X_train)
X_test_std = stdsc.transform(X_test)
print('X_train:\n', X_train)
print('X_train_std:\n', X_train_std)


############ 오버피팅을 줄일 수 있는 일반적인 방법

from sklearn.linear_model import LogisticRegression
LogisticRegression(penalty='l1')
print("LogistincRegression(penality='l1'):\n", LogisticRegression(penalty='l1'))
lr = LogisticRegression(penalty='l1', C=0.1)
lr.fit(X_train_std, y_train)
print('Training accuracy:', lr.score(X_train_std, y_train))
print('Test accuracy:', lr.score(X_test_std, y_test))
lr.intercept_
print('lr.intercept_: ', lr.intercept_)
lr.coef_
print('lr.coef_: ', lr.coef_)


# 여러가지 정규화 강도를 위한 다양한 피처들의 가중계수인 정규화의 경로를 플롯
import matplotlib.pyplot as plt
fig = plt.figure()
ax = plt.subplot(111)
colors = ['blue', 'green', 'red', 'cyan',
'magenta', 'yellow', 'black',
'pink', 'lightgreen', 'lightblue',
'gray', 'indigo', 'orange']
weights, params = [], []
for c in np.arange(-4, 6):
lr = LogisticRegression(penalty='l1', C=10**np.float32(c), random_state=0)
lr.fit(X_train_std, y_train)
weights.append(lr.coef_[1])
params.append(10**np.float32(c))

weights = np.array(weights)
for column, color in zip(range(weights.shape[1]), colors):
plt.plot(params, weights[:, column], label=df_wine.columns[column+1], color=color)

plt.axhline(0, color='black', linestyle='--', linewidth=3)
plt.xlim([10**(-5), 10**5])
plt.ylabel('weight coefficient')
plt.xlabel('C')
plt.xscale('log')
plt.legend(loc='upper left')
ax.legend(loc='upper center', bbox_to_anchor=(1.38, 1.03), ncol=1, fancybox=True)
plt.show()
# 연속형 피처 선택 알고리즘
'''
SBS(Sequential Backward Selection) : 계산상 효율을 위해 초기 피처 부분 공간의 차원을 축소시킬 때
분류기 성능이 무너지는 것을 최소화하는 것을 목표로 한다.
'''
df_wine = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data', header=None)
df_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash',
'Alcalinity of ash', 'Magnesium', 'Total phenols', 'Flavanoids',
'Nonflavanoid phenols', 'Proanthocyanins', 'Color intensity', 'Hue',
'OD280/OD315 of diluted wines', 'Proline']

from sklearn.cross_validation import train_test_split
X, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values

from sklearn.base import clone
from itertools import combinations
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.metrics import accuracy_score
class SBS():
def __init__(self, estimator, k_features, scoring=accuracy_score,
test_size=0.25, random_state=1):
self.scoring = scoring
self.estimator = clone(estimator)
self.k_features = k_features
self.test_size = test_size
self.random_state = random_state

def fit(self, X, y):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self.test_size,
random_state=self.random_state)
dim = X_train.shape[1]
#print('dim:\n', dim)
self.indices_ = tuple(range(dim))
#print('self.indices:\n', self.indices_)
self.subsets_ = [self.indices_]
#print('self.subsets:\n', self.subsets_)
score = self._calc_score(X_train, y_train, X_test, y_test, self.indices_)
#print('score:\n', score)
self.scores_ = [score]

while dim > self.k_features:
scores = []
subsets = []

for p in combinations(self.indices_, r=dim-1):
score = self._calc_score(X_train, y_train, X_test, y_test, p)
scores.append(score)
subsets.append(p)
best = np.argmax(scores)
self.indices_ = subsets[best]
self.subsets_.append(self.indices_)
dim -= 1

self.scores_.append(scores[best])
#print('self.scores_:\n', self.scores_)
self.k_score_ = self.scores_[-1]

return self

def transform(self, X):
return X[:, self.indices_]

def _calc_score(self, X_train, y_train, X_test, y_test, indices):
self.estimator.fit(X_train[:, indices], y_train)
y_pred = self.estimator.predict(X_test[:, indices])
score = self.scoring(y_test, y_pred)
return score

from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
knn = KNeighborsClassifier(n_neighbors=2)
sbs = SBS(knn, k_features=1)
sbs.fit(X_train_std, y_train)
#print('sbs.subsets_:\n', sbs.subsets_)
k_feat = [len(k) for k in sbs.subsets_]
plt.plot(k_feat, sbs.scores_, marker='o')
plt.ylim([0.7, 1.1])
plt.ylabel('Accuracy')
plt.xlabel('Number of features')
plt.grid()
plt.show()

k5 = list(sbs.subsets_[8])
#print('k5:\n', k5)
print('df_wine.columns[1:][k5]:\n', df_wine.columns[1:][k5])

knn.fit(X_train_std, y_train)
print('Training accuracy:', knn.score(X_train_std, y_train))
print('Test accuracy:', knn.score(X_test_std, y_test))

knn.fit(X_train_std[:, k5], y_train)
print('Training accuracy:', knn.score(X_train_std[:, k5], y_train))
print('Test accuracy:', knn.score(X_test_std[:, k5], y_test))

#### 랜덤 포레스트를 활용한 피처 중요도의 평가
'''
랜덤 포레스트를 사용하면 데이터가 선형적으로 분리 가능한지 여부와 상관없이
포레스트 내의 모든 의사결정나무로부터 계산된 평균 불순도 감소분으로 피처의 중요도를 측정할 수 있다.
나무 기반의 모델은 표준화나 정규화가 필요하지 않다는 것을 기억하자.
'''
from sklearn.ensemble import RandomForestClassifier
feat_labels = df_wine.columns[1:]
forest = RandomForestClassifier(n_estimators=10000, random_state=0, n_jobs=-1)
forest.fit(X_train, y_train)
importances = forest.feature_importances_
indices = np.argsort(importances)[::-1]
for f in range(X_train.shape[1]):
print("%2d) %-*s %f" % (f+1, 30, feat_labels[f], importances[indices[f]]))

plt.title('Feature Importances')
plt.bar(range(X_train.shape[1]), importances[indices], color='lightblue', align='center')
plt.xticks(range(X_train.shape[1]), feat_labels, rotation=90)
plt.xlim([-1, X_train.shape[1]])
plt.tight_layout()
plt.show()

X_selected = forest.transform(X_train, threshold=0.15)
print('X_selected.shape:\n', X_selected.shape)


반응형

+ Recent posts