1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 | # -*- coding: utf-8 -*- """DL_Day 01_introduction.ipynb Automatically generated by Colaboratory. Original file is located at """ from google.colab import drive drive.mount('/gdrive') PATH = "/gdrive/My Drive/Colab Notebooks/resources/" import matplotlib.pyplot as plt from tensorflow import keras import tensorflow as tf import numpy as np print(tf.__version__) ## Dense => a layer of connected neurons ## Model with one layer and one unit in it with just one value model = keras.Sequential([keras.layers.Dense(units = 1, input_shape = [1] )]) ## loss function and optimizers. ## lost function massure the diff between each prediction and give it to the optimizer for next guess ## optimizer make next trial with data from previous one. ## this repeated untile algorithm become converged ## loss => mean_squared_error and optimizer => stochastic gradient descent. model.compile(optimizer = 'sgd' , loss = 'mean_squared_error') X = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype = float) y = np.array([-3.0,-1.0, 1.0, 3.0, 5.0, 7.0], dtype = float) ## 500 of training loop model.fit(X, y, epochs = 1000) model.predict([10.0]) model.get_weights() """In this exercise you'll try to build a neural network that predicts the price of a house according to a simple formula. So, imagine if house pricing was as easy as a house costs 50k + 50k per bedroom, so that a 1 bedroom house costs 100k, a 2 bedroom house costs 150k etc. How would you create a neural network that learns this relationship so that it would predict a 7 bedroom house as costing close to 400k etc. Hint: Your network might work better if you scale the house price down. You don't have to give the answer 400...it might be better to create something that predicts the number 4, and then your answer is in the 'hundreds of thousands' etc. """ model = keras.Sequential([keras.layers.Dense(units = 1, input_shape = [1] )]) model.compile(optimizer = 'sgd' , loss = 'mean_squared_error') f = lambda x : (5 + 5 * x) X = np.linspace(0, 10, 10, dtype = np.uint32 ) y = np.array(f(X), dtype = np.uint32 ) model.fit(X, y , epochs = 500) print(model.predict([7.0]) * 10) """#Fashion_mnist""" fasion_mnist = keras.datasets.fashion_mnist (train_images, train_labels), (test_images, test_labels) = fasion_mnist.load_data() """**Sequential**: That defines a SEQUENCE of layers in the neural network **Flatten**: Flatten takes square mat and turns it into a 1 dimensional set. **Dense**: Adds a layer of neurons Each layer of neurons need an **activation function** to tell them what to do. There's lots of options, but just use these for now. **Relu** effectively means "If X>0 return X, else return 0" -- so what it does it it only passes values 0 or greater to the next layer in the network. **Softmax** takes a set of values, and effectively picks the biggest one, so, for example, if the output of the last layer looks like [0.1, 0.1, 0.05, 0.1, 9.5, 0.1, 0.05, 0.05, 0.05], it saves you from fishing through it looking for the biggest value, and turns it into [0,0,0,0,1,0,0,0,0] -- The goal is to save a lot of coding! """ ## now try a model with three layers. focus on the last layer which has ## 10 neurons in it since we have ten class of clothing in the dataset. ## first layer is a flatten layer that takes 28 by 28 square and turns it ## into a simple linear array. ## middle layer have 128 of neurons. we can thick those as a variables in function. ## that variables are maped to one class and coeffient W are calulated when classification is ## correct. ## each neurons in middle have own coefficient w and variable x model = keras.Sequential([ keras.layers.Flatten(input_shape = (28, 28)), keras.layers.Dense(128, activation = tf.nn.relu), keras.layers.Dense(10 , activation = tf.nn.softmax) ]) plt.imshow(train_images[0],cmap = plt.cm.gray) print(train_labels[0]) ## normalizing train_images = train_images / 255.0 test_images = test_images / 255.0 """%%time model.compile(optimizer = tf.train.AdamOptimizer(), loss = 'sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(train_images, train_labels, epochs=5) """ model.evaluate(test_images, test_labels) ## probabilities are in result list, highst one is the prediction of model. classifications = model.predict(test_images) print(classifications[0]) print(test_labels[0]) """%%time ## neurons in middle layer has been incresed to 1024 what's the effect of it? model = tf.keras.models.Sequential([tf.keras.layers.Flatten(input_shape = (28, 28)), tf.keras.layers.Dense(1024, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax)]) model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy',metrics=['accuracy']) model.fit(train_images, train_labels, epochs=5) """ model.evaluate(test_images, test_labels) classifications = model.predict(test_images) print(classifications[0]) print(test_labels[0]) model.evaluate(test_images, test_labels) ## callbacks stop fitting when condition is satisfied class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): if(logs.get('loss')<0.4): print("\nReached 60% accuracy so cancelling training!") self.model.stop_training = True callbacks = myCallback() model.fit(training_images, training_labels, epochs=5, callbacks=[callbacks]) """#MNIST""" mnist = tf.keras.datasets.mnist (x_train, y_train),(x_test, y_test) = mnist.load_data() train_images = train_images / 255.0 test_images = test_images / 255.0 model = tf.keras.models.Sequential([tf.keras.layers.Flatten(input_shape = (28, 28)), tf.keras.layers.Dense(1024, activation=tf.nn.relu), tf.keras.layers.Dense(1024, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax)]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): if(logs.get('acc') > 0.99): print("\nReached 90% accuracy so cancelling training!") self.model.stop_training = True callbacks = myCallback() model.fit(x_train, y_train, epochs=15, callbacks=[callbacks]) model.evaluate(x_test, y_test) | cs |
참고자료 및 출처 : Introduction to TensorFlow for Artificial Intelligence, Machine Learning, and Deep Learning_Weak 1, 2 Exercise
'Python Library > Machine Learning' 카테고리의 다른 글
[CNN] Day 03_vision02_Model_Transfer_With_Augmentation (0) | 2019.07.27 |
---|---|
[CNN] Day 02_vision01_basicOfCNN (0) | 2019.07.27 |
Day 10_PCA_MNIST (0) | 2019.07.21 |
Day 09_Multiclass_SVM( Sklean ) (0) | 2019.07.20 |
Day 09_Multiclass_PerceptronClassifier (0) | 2019.07.19 |