Skip to content

Commit 04ba809

Browse files
authored
Add files via upload
1 parent 3ae9834 commit 04ba809

16 files changed

Lines changed: 21812 additions & 0 deletions

tf_2.x/data_in/ChatBotData.csv

Lines changed: 11824 additions & 0 deletions
Large diffs are not rendered by default.

tf_2.x/lab-11-0-cnn-basics-keras-eager.ipynb

Lines changed: 462 additions & 0 deletions
Large diffs are not rendered by default.

tf_2.x/lab-11-1-mnist-cnn-keras-sequential-eager.ipynb

Lines changed: 426 additions & 0 deletions
Large diffs are not rendered by default.

tf_2.x/lab-11-2-mnist-cnn-keras-functional-eager.ipynb

Lines changed: 427 additions & 0 deletions
Large diffs are not rendered by default.

tf_2.x/lab-11-3-mnist-cnn-keras-subclassing-eager.ipynb

Lines changed: 439 additions & 0 deletions
Large diffs are not rendered by default.
Lines changed: 397 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,397 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "code",
5+
"execution_count": 1,
6+
"metadata": {},
7+
"outputs": [],
8+
"source": [
9+
"from __future__ import absolute_import\n",
10+
"from __future__ import division\n",
11+
"from __future__ import print_function"
12+
]
13+
},
14+
{
15+
"cell_type": "markdown",
16+
"metadata": {},
17+
"source": [
18+
"## Importing Libraries"
19+
]
20+
},
21+
{
22+
"cell_type": "code",
23+
"execution_count": 2,
24+
"metadata": {},
25+
"outputs": [
26+
{
27+
"name": "stdout",
28+
"output_type": "stream",
29+
"text": [
30+
"2.1.0\n",
31+
"2.2.4-tf\n"
32+
]
33+
}
34+
],
35+
"source": [
36+
"import tensorflow as tf\n",
37+
"from tensorflow import keras\n",
38+
"from tensorflow.keras.utils import to_categorical\n",
39+
"import numpy as np\n",
40+
"import matplotlib.pyplot as plt\n",
41+
"import os\n",
42+
"\n",
43+
"print(tf.__version__)\n",
44+
"print(keras.__version__)"
45+
]
46+
},
47+
{
48+
"cell_type": "markdown",
49+
"metadata": {},
50+
"source": [
51+
"## Hyper Parameters"
52+
]
53+
},
54+
{
55+
"cell_type": "code",
56+
"execution_count": 3,
57+
"metadata": {},
58+
"outputs": [],
59+
"source": [
60+
"learning_rate = 0.001\n",
61+
"training_epochs = 15\n",
62+
"batch_size = 100\n",
63+
"\n",
64+
"tf.random.set_seed(777)"
65+
]
66+
},
67+
{
68+
"cell_type": "markdown",
69+
"metadata": {},
70+
"source": [
71+
"## Creating Checkpoint Directory"
72+
]
73+
},
74+
{
75+
"cell_type": "code",
76+
"execution_count": 4,
77+
"metadata": {},
78+
"outputs": [],
79+
"source": [
80+
"cur_dir = os.getcwd()\n",
81+
"ckpt_dir_name = 'checkpoints'\n",
82+
"model_dir_name = 'minst_cnn_emsemble'\n",
83+
"\n",
84+
"checkpoint_dir = os.path.join(cur_dir, ckpt_dir_name, model_dir_name)\n",
85+
"os.makedirs(checkpoint_dir, exist_ok=True)\n",
86+
"\n",
87+
"checkpoint_prefix = os.path.join(checkpoint_dir, model_dir_name)"
88+
]
89+
},
90+
{
91+
"cell_type": "markdown",
92+
"metadata": {},
93+
"source": [
94+
"## MNIST/Fashion MNIST Data"
95+
]
96+
},
97+
{
98+
"cell_type": "code",
99+
"execution_count": 5,
100+
"metadata": {},
101+
"outputs": [],
102+
"source": [
103+
"## MNIST Dataset #########################################################\n",
104+
"mnist = keras.datasets.mnist\n",
105+
"class_names = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n",
106+
"##########################################################################\n",
107+
"\n",
108+
"## Fashion MNIST Dataset #################################################\n",
109+
"#mnist = keras.datasets.fashion_mnist\n",
110+
"#class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n",
111+
"##########################################################################"
112+
]
113+
},
114+
{
115+
"cell_type": "markdown",
116+
"metadata": {},
117+
"source": [
118+
"## Datasets"
119+
]
120+
},
121+
{
122+
"cell_type": "code",
123+
"execution_count": 6,
124+
"metadata": {},
125+
"outputs": [],
126+
"source": [
127+
"(train_images, train_labels), (test_images, test_labels) = mnist.load_data() \n",
128+
" \n",
129+
"train_images = train_images.astype(np.float32) / 255.\n",
130+
"test_images = test_images.astype(np.float32) / 255.\n",
131+
"train_images = np.expand_dims(train_images, axis=-1)\n",
132+
"test_images = np.expand_dims(test_images, axis=-1)\n",
133+
" \n",
134+
"train_labels = to_categorical(train_labels, 10)\n",
135+
"test_labels = to_categorical(test_labels, 10) \n",
136+
" \n",
137+
"train_dataset = tf.data.Dataset.from_tensor_slices((train_images, train_labels)).shuffle(\n",
138+
" buffer_size=100000).batch(batch_size)\n",
139+
"test_dataset = tf.data.Dataset.from_tensor_slices((test_images, test_labels)).batch(batch_size)"
140+
]
141+
},
142+
{
143+
"cell_type": "markdown",
144+
"metadata": {},
145+
"source": [
146+
"## Model Class"
147+
]
148+
},
149+
{
150+
"cell_type": "code",
151+
"execution_count": 7,
152+
"metadata": {},
153+
"outputs": [],
154+
"source": [
155+
"class MNISTModel(tf.keras.Model):\n",
156+
" def __init__(self):\n",
157+
" super(MNISTModel, self).__init__()\n",
158+
" self.conv1 = keras.layers.Conv2D(filters=32, kernel_size=[3, 3], padding='SAME', activation=tf.nn.relu)\n",
159+
" self.pool1 = keras.layers.MaxPool2D(padding='SAME')\n",
160+
" self.conv2 = keras.layers.Conv2D(filters=64, kernel_size=[3, 3], padding='SAME', activation=tf.nn.relu)\n",
161+
" self.pool2 = keras.layers.MaxPool2D(padding='SAME')\n",
162+
" self.conv3 = keras.layers.Conv2D(filters=128, kernel_size=[3, 3], padding='SAME', activation=tf.nn.relu)\n",
163+
" self.pool3 = keras.layers.MaxPool2D(padding='SAME')\n",
164+
" self.pool3_flat = keras.layers.Flatten()\n",
165+
" self.dense4 = keras.layers.Dense(units=256, activation=tf.nn.relu)\n",
166+
" self.drop4 = keras.layers.Dropout(rate=0.4)\n",
167+
" self.dense5 = keras.layers.Dense(units=10)\n",
168+
" def call(self, inputs, training=False):\n",
169+
" net = self.conv1(inputs)\n",
170+
" net = self.pool1(net)\n",
171+
" net = self.conv2(net)\n",
172+
" net = self.pool2(net)\n",
173+
" net = self.conv3(net)\n",
174+
" net = self.pool3(net)\n",
175+
" net = self.pool3_flat(net)\n",
176+
" net = self.dense4(net)\n",
177+
" net = self.drop4(net)\n",
178+
" net = self.dense5(net)\n",
179+
" return net"
180+
]
181+
},
182+
{
183+
"cell_type": "code",
184+
"execution_count": 8,
185+
"metadata": {},
186+
"outputs": [],
187+
"source": [
188+
"models = []\n",
189+
"num_models = 3\n",
190+
"for m in range(num_models):\n",
191+
" models.append(MNISTModel())"
192+
]
193+
},
194+
{
195+
"cell_type": "markdown",
196+
"metadata": {},
197+
"source": [
198+
"## Loss Function"
199+
]
200+
},
201+
{
202+
"cell_type": "code",
203+
"execution_count": 9,
204+
"metadata": {},
205+
"outputs": [],
206+
"source": [
207+
"def loss_fn(model, images, labels):\n",
208+
" logits = model(images, training=True)\n",
209+
" loss = tf.reduce_mean(tf.keras.losses.categorical_crossentropy(\n",
210+
" y_pred=logits, y_true=labels, from_logits=True))\n",
211+
" return loss "
212+
]
213+
},
214+
{
215+
"cell_type": "markdown",
216+
"metadata": {},
217+
"source": [
218+
"## Calculating Gradient"
219+
]
220+
},
221+
{
222+
"cell_type": "code",
223+
"execution_count": 10,
224+
"metadata": {},
225+
"outputs": [],
226+
"source": [
227+
"def grad(model, images, labels):\n",
228+
" with tf.GradientTape() as tape:\n",
229+
" loss = loss_fn(model, images, labels)\n",
230+
" return tape.gradient(loss, model.variables)"
231+
]
232+
},
233+
{
234+
"cell_type": "markdown",
235+
"metadata": {},
236+
"source": [
237+
"## Caculating Model's Accuracy"
238+
]
239+
},
240+
{
241+
"cell_type": "code",
242+
"execution_count": 11,
243+
"metadata": {},
244+
"outputs": [],
245+
"source": [
246+
"def evaluate(models, images, labels):\n",
247+
" predictions = np.zeros_like(labels)\n",
248+
" for model in models:\n",
249+
" logits = model(images, training=False)\n",
250+
" predictions += logits\n",
251+
" correct_prediction = tf.equal(tf.argmax(predictions, 1), tf.argmax(labels, 1))\n",
252+
" accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
253+
" return accuracy"
254+
]
255+
},
256+
{
257+
"cell_type": "markdown",
258+
"metadata": {},
259+
"source": [
260+
"## Optimizer"
261+
]
262+
},
263+
{
264+
"cell_type": "code",
265+
"execution_count": 12,
266+
"metadata": {},
267+
"outputs": [],
268+
"source": [
269+
"optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)"
270+
]
271+
},
272+
{
273+
"cell_type": "markdown",
274+
"metadata": {},
275+
"source": [
276+
"## Creating Checkpoints"
277+
]
278+
},
279+
{
280+
"cell_type": "code",
281+
"execution_count": 13,
282+
"metadata": {},
283+
"outputs": [],
284+
"source": [
285+
"checkpoints = []\n",
286+
"for m in range(num_models):\n",
287+
" checkpoints.append(tf.train.Checkpoint(cnn=models[m]))"
288+
]
289+
},
290+
{
291+
"cell_type": "markdown",
292+
"metadata": {},
293+
"source": [
294+
"## Training"
295+
]
296+
},
297+
{
298+
"cell_type": "code",
299+
"execution_count": 15,
300+
"metadata": {
301+
"scrolled": false
302+
},
303+
"outputs": [
304+
{
305+
"name": "stdout",
306+
"output_type": "stream",
307+
"text": [
308+
"Learning started. It takes sometime.\n",
309+
"Epoch: 1 loss = 0.16236770 train accuracy = 0.9656 test accuracy = 0.9900\n",
310+
"Epoch: 2 loss = 0.04053748 train accuracy = 0.9928 test accuracy = 0.9925\n",
311+
"Epoch: 3 loss = 0.02740762 train accuracy = 0.9956 test accuracy = 0.9938\n",
312+
"Epoch: 4 loss = 0.01959979 train accuracy = 0.9970 test accuracy = 0.9943\n",
313+
"Epoch: 5 loss = 0.01581027 train accuracy = 0.9980 test accuracy = 0.9938\n",
314+
"Epoch: 6 loss = 0.01319993 train accuracy = 0.9986 test accuracy = 0.9935\n",
315+
"Epoch: 7 loss = 0.01084083 train accuracy = 0.9990 test accuracy = 0.9943\n",
316+
"Epoch: 8 loss = 0.00893507 train accuracy = 0.9992 test accuracy = 0.9945\n",
317+
"Epoch: 9 loss = 0.00811294 train accuracy = 0.9993 test accuracy = 0.9940\n",
318+
"Epoch: 10 loss = 0.00708519 train accuracy = 0.9997 test accuracy = 0.9946\n",
319+
"Epoch: 11 loss = 0.00574807 train accuracy = 0.9996 test accuracy = 0.9953\n",
320+
"Epoch: 12 loss = 0.00582443 train accuracy = 0.9997 test accuracy = 0.9950\n",
321+
"Epoch: 13 loss = 0.00497008 train accuracy = 0.9998 test accuracy = 0.9949\n",
322+
"Epoch: 14 loss = 0.00471057 train accuracy = 0.9999 test accuracy = 0.9952\n",
323+
"Epoch: 15 loss = 0.00387729 train accuracy = 0.9999 test accuracy = 0.9953\n",
324+
"Learning Finished!\n"
325+
]
326+
}
327+
],
328+
"source": [
329+
"# train my model\n",
330+
"print('Learning started. It takes sometime.')\n",
331+
"for epoch in range(training_epochs):\n",
332+
" avg_loss = 0.\n",
333+
" avg_train_acc = 0.\n",
334+
" avg_test_acc = 0.\n",
335+
" train_step = 0\n",
336+
" test_step = 0 \n",
337+
" \n",
338+
" for images, labels in train_dataset:\n",
339+
" for model in models:\n",
340+
" #train(model, images, labels)\n",
341+
" grads = grad(model, images, labels) \n",
342+
" optimizer.apply_gradients(zip(grads, model.variables))\n",
343+
" loss = loss_fn(model, images, labels)\n",
344+
" avg_loss += loss / num_models\n",
345+
" acc = evaluate(models, images, labels)\n",
346+
" avg_train_acc += acc\n",
347+
" train_step += 1\n",
348+
" avg_loss = avg_loss / train_step\n",
349+
" avg_train_acc = avg_train_acc / train_step\n",
350+
" \n",
351+
" for images, labels in test_dataset: \n",
352+
" acc = evaluate(models, images, labels) \n",
353+
" avg_test_acc += acc\n",
354+
" test_step += 1 \n",
355+
" avg_test_acc = avg_test_acc / test_step \n",
356+
"\n",
357+
" print('Epoch:', '{}'.format(epoch + 1), 'loss =', '{:.8f}'.format(avg_loss), \n",
358+
" 'train accuracy = ', '{:.4f}'.format(avg_train_acc), \n",
359+
" 'test accuracy = ', '{:.4f}'.format(avg_test_acc))\n",
360+
" \n",
361+
" \n",
362+
" for idx, checkpoint in enumerate(checkpoints):\n",
363+
" checkpoint.save(file_prefix=checkpoint_prefix+'-{}'.format(idx))\n",
364+
"\n",
365+
"print('Learning Finished!')"
366+
]
367+
},
368+
{
369+
"cell_type": "code",
370+
"execution_count": null,
371+
"metadata": {},
372+
"outputs": [],
373+
"source": []
374+
}
375+
],
376+
"metadata": {
377+
"kernelspec": {
378+
"display_name": "Python 3",
379+
"language": "python",
380+
"name": "python3"
381+
},
382+
"language_info": {
383+
"codemirror_mode": {
384+
"name": "ipython",
385+
"version": 3
386+
},
387+
"file_extension": ".py",
388+
"mimetype": "text/x-python",
389+
"name": "python",
390+
"nbconvert_exporter": "python",
391+
"pygments_lexer": "ipython3",
392+
"version": "3.7.3"
393+
}
394+
},
395+
"nbformat": 4,
396+
"nbformat_minor": 2
397+
}

0 commit comments

Comments
 (0)