728x90
numpy를 활용하여 FNN을 구현해 보자!
config
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
config = AttrDict()
config.lr= 0.001
config.bias = 0.7
config.epochs = 500
Data
클래스 0 데이터에 *10을 하여 차별점을 둠
def generate_binary_dataset(num_samples, num_features, random_state=50):
np.random.seed(random_state)
# 클래스 0 샘플 생성
num_samples_class0 = num_samples // 2
class0_features = np.random.normal(loc=2, scale=1, size=(num_samples_class0, num_features))*10
class0_labels = np.zeros((num_samples_class0, 1))
# 클래스 1 샘플 생성
num_samples_class1 = num_samples - num_samples_class0
class1_features = np.random.normal(loc=-2, scale=1, size=(num_samples_class1, num_features))
class1_labels = np.ones((num_samples_class1, 1))
# 데이터셋 합치기
features = np.vstack((class0_features, class1_features))
labels = np.vstack((class0_labels, class1_labels))
# 데이터 순서 섞기
random_indices = np.random.permutation(num_samples)
features = features[random_indices]
labels = labels[random_indices]
return (features, labels)
활성화 함수 + loss
backpropagation을 단순화하여 derivative는 쓰지 않음
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(x):
sig = sigmoid(x)
return sig * (1 - sig)
def cross_entropy_loss(target, prediction):
epsilon = 1e-7
loss = -np.mean(target * np.log(prediction + epsilon) + (1 - target) * np.log(1 - prediction + epsilon))
return loss
Network
Network 설정, add layout, forward, backward, summary 구현
class Network():
def __init__(self, config):
self.config = config
self.W = []
self.B = []
self.Z = []
self.A = []
def add_layout(self, input_size, output_size):
w = np.random.randn(output_size, input_size)
b = np.random.randn(output_size, 1)
self.W.append(w)
self.B.append(b)
def forward(self, x):
self.Z = []
self.A = [x]
for i in range(len(self.W)):
z = np.dot(self.W[i], self.A[-1]) + self.B[i]
self.Z.append(z)
a = sigmoid(z)
self.A.append(a)
return self.A[-1]
def backward(self,x, y, y_hat):
dW = []
dB = []
for w in reversed(self.W):
W = w + config.lr*(y-y_hat)
dW.append(W)
for b in reversed(self.B):
B = b + config.lr*(y-y_hat)
dB.append(B)
dW.reverse()
dB.reverse()
self.W = dW
self.B = dB
def summary(self):
print('-------------------------------------------------')
print('{:^15} {:^15} {:^15}'.format('Layer','W shape','Output shape'))
print('=================================================')
for i in range(len(self.W)):
print('{:^15} {:^15} {:^15}'.
format(i,str(np.array(self.W[i]).shape),str(np.array(self.W[i]).shape[0])))
model 생성
model = Network(config)
model.add_layout(4,3)
model.add_layout(3,2)
model.add_layout(2,1)
model.summary()
-------------------------------------------------
Layer W shape Output shape
=================================================
0 (3, 4) 3
1 (2, 3) 2
2 (1, 2) 1
학습
datax, datay = generate_binary_dataset(100,4)
loss = []
for i in range(config.epochs):
for j, (x, y) in enumerate(zip(datax, datay)):
x = np.array(x.reshape(-1,1))
y = np.array(y)
pre = model.forward(x)
model.backward(x, y, pre)
#print(pre, y)
if j == 0 and i % 10 == 0:
loss.append(cross_entropy_loss(pre,y))
print('[{}/{}]{}'.format(i,config.epochs,cross_entropy_loss(pre,y)))
loss
optimization을 실행하지 않아 loss가 크게 줄어들진 않지만 적어지는 추세를 보임
df = pd.DataFrame(loss)
df_plot = df.plot(kind="line", grid=True).get_figure()
728x90
'Machine Learning > Model' 카테고리의 다른 글
[ML] KNN(K-Nearest Neighborhood), k-최근접 이웃 (0) | 2023.03.22 |
---|---|
[ML] 나이브 베이즈(Naive bayes) 개념, 실습 (0) | 2023.03.21 |
[ML] 이진 분류 모델 (0) | 2023.01.03 |
[ML] 순방향 신경망(FNN), 신경망의 설계 (0) | 2023.01.03 |
[DATA] Custom Image Data 넣기 (0) | 2022.12.28 |