1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 | # -*- coding: utf-8 -*- """Day 21_Diabetes_MultiClass.ipynb Automatically generated by Colaboratory. Original file is located at """ from google.colab import drive drive.mount('/gdrive') PATH = "/gdrive/My Drive/Colab Notebooks/resources/" import matplotlib.pyplot as plt from tensorflow import keras import tensorflow as tf import numpy as np print(tf.__version__) xdata = np.transpose([[1,2], [2,3], [3,1], [4,3], [5,3], [6,2]]) ydata = np.transpose([ [0],[0],[0],[1],[1],[1] ]) X = tf.placeholder(tf.float32, shape = [ 2, None ]) y = tf.placeholder(tf.float32, shape = [ 1, None ]) W1 = tf.Variable(tf.random.normal([1, 2])) B1 = tf.Variable(tf.random.normal([1])) Z1 = W1 @ X + B1 A1 = tf.sigmoid(Z1) loss = -tf.reduce_mean( y * tf.log( A1 ) + (1 - y) * (tf.log(1 - A1) ) ) # dW1 = tf.gradients(loss, W1)[0] # dB1 = tf.gradients(loss, B1)[0] # lr = 0.01 # update_W1 = tf.assign(W1 , dW1 - lr * dW1) # update_B1 = tf.assign(B1 , dB1 - lr * dB1) train = tf.train.GradientDescentOptimizer(0.01).minimize(loss) # 첫번째 인수를 두번째 인수로 캐스팅하라. predicted = tf.cast( A1 > 0.5 , dtype = tf.float32 ) accuracy = tf.reduce_mean( tf.cast( tf.equal(predicted, y) , dtype = tf.float32 ) ) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for step in range(10001): _ , cv = sess.run([train, loss], feed_dict = { X : xdata, y : ydata }) # _ , cv, _, _ = sess.run([train, loss, update_W1, update_B1 ], feed_dict = { X : xdata, y : ydata }) if step % 200 == 0: print(cv) hv, pv, av = sess.run([ A1, predicted, accuracy ], feed_dict = {X : xdata, y : ydata}) print ("hf = " , hv, "pred = ", pv, "acc : ", av) df = np.loadtxt(PATH + "data/diabetes.csv", delimiter= ",") xdata = df[:,:-1].T ydata = df[:, -1] xdata.shape, ydata.shape n_h = 1 n_x = 8 n_y = 1 X = tf.placeholder(tf.float32, shape = [ n_x, None ] ) y = tf.placeholder(tf.float32, shape = [ None ] ) W1 = tf.Variable(tf.random.normal([ 1, n_x ]) ) B1 = tf.Variable(tf.random.normal( [ 1 ] ) ) Z1 = W1 @ X + B1 A1 = tf.sigmoid( Z1 ) cost = -tf.reduce_mean( y * tf.log( A1 ) + (1 - y) * tf.log( 1- A1 ) ) train = tf.train.GradientDescentOptimizer(0.01).minimize( cost ) predicted = tf.cast( A1 > 0.5, dtype = tf.float32 ) accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted ,y) , dtype = tf.float32)) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for step in range(20001): if step % 2000 == 0: cv, _ = sess.run([cost, train], feed_dict = { X : xdata, y : ydata}) print(step, cv) hv, pv, av = sess.run([A1, predicted, accuracy],feed_dict = { X : xdata, y : ydata }) print("hv : ", hv, "pv : ", pv , "acc : ", av) m = xdata.shape[1] indices = np.random.permutation(m) k = 3 err = 0 for i in range(k): test_indices = indices[ int( i * ( m/k )) : int((i + 1) * (m / k) - 1)] train_indices = np.setdiff1d( indices, test_indices ) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for step in range(20001): if step % 2000 == 0: cv, _ = sess.run([ cost, train ], feed_dict = { X : xdata[:,train_indices], y : ydata[train_indices] } ) # print(step, cv) preds = sess.run(accuracy,feed_dict = { X : xdata[:,test_indices], y : ydata[test_indices] }) print(preds) err += preds print('total error : ', err/k) """# 시그모이드 시그마 X 에서 X 가 0 보다 작으면 결과는 0에 가까워지고 0보다 크면 y는 1에 가까워 진다. 시그모이드로 분류를 한다는 것은 y^ 이 y가 되는 결정경계를 찾는 과정이다. """ | cs |
'딥러닝 모델 설계 > Machine Learning' 카테고리의 다른 글
Day 16_PCA_HR_DataSet (0) | 2019.08.01 |
---|---|
Day 15_Diabetes_House_XGBoost (0) | 2019.07.31 |
Day 14_house_price (0) | 2019.07.30 |
Day 13_bikeShare_DataSet (0) | 2019.07.25 |
Day 12_RandomForeset_MushroomDataset (0) | 2019.07.19 |