Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CNN] 어제 하던 CNN드디어 train 돌아감~!~!! #27

Open
17011813 opened this issue Jun 1, 2019 · 4 comments
Open

[CNN] 어제 하던 CNN드디어 train 돌아감~!~!! #27

17011813 opened this issue Jun 1, 2019 · 4 comments

Comments

@17011813
Copy link

17011813 commented Jun 1, 2019

import random
import numpy as np
import tensorflow as tf
import cv2

from random import shuffle
from tqdm import tqdm

train_dir = '/content/gdrive/My Drive/Colab Notebooks/flowers'


img_size = 28

def label_folder(folders):
    if folders == 'tulip': return [1,0,0,0,0,0,0,0,0,0]
    elif folders == 'rose': return [0,1,0,0,0,0,0,0,0,0]
    elif folders == 'cosmos': return [0,0,1,0,0,0,0,0,0,0]
    elif folders == 'cherryblossom': return [0,0,0,1,0,0,0,0,0,0]
    elif folders == 'sunflower': return [0,0,0,0,1,0,0,0,0,0]
    elif folders == 'koreaflower': return [0,0,0,0,0,1,0,0,0,0]
    elif folders == 'lily': return [0,0,0,0,0,0,1,0,0,0]
    elif folders == 'buckwheat': return [0,0,0,0,0,0,0,1,0,0]       #메밀꽃
    elif folders == 'korearosebay': return [0,0,0,0,0,0,0,0,1,0]       #진달래
    elif folders == 'forsythia': return [0,0,0,0,0,0,0,0,0,1]   #개나리

def create_train_data():
    training_data = []
    dirs = os.listdir(train_dir)
    for folders in dirs:
        label = label_folder(folders)
        req_train_dir = os.path.join(train_dir,folders)
        for img in tqdm(os.listdir(req_train_dir)):
            path = os.path.join(req_train_dir,img)
            img = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
            img = cv2.resize(img, (img_size,img_size))
            training_data.append([np.array(img),np.array(label)])
    shuffle(training_data)
    np.save('flower_train_data.npy', training_data)
    return training_data

train_data = create_train_data()

여기서 npy파일 만들어서 라벨링 해줌
@17011813
Copy link
Author

17011813 commented Jun 1, 2019

import math
import matplotlib.pyplot as plt

# Convolutional Layer 1.
filter_size1 = 4 
num_filters1 = 32
# Convolutional Layer 2.
filter_size2 = 4
num_filters2 = 64
# Convolutional Layer 3.
filter_size3 = 4
num_filters3 = 128
# Convolutional Layer 4
filter_size4 = 4
num_filters4 = 256
# Convolutional Layer 5
filter_size5 = 4
num_filters5 = 128
# Fully-connected layer.
fc_size = 1024         
# Number of color channels for the images: 1 channel for gray-scale.  ########################우선 train컬러 흑백으로 1개...?
num_channels = 1
# image dimensions (only squares for now)   #################
img_size = 28
# Size of image when flattened to a single dimension
img_size_flat = img_size * img_size * num_channels
# Tuple with height and width of images used to reshape arrays.
img_shape = (img_size, img_size)
# class 위에 순서 맞게 써줌
classes = ['tulip', 'rose','cosmos', 'cherryblossom','sunflower','koreaflower','lily', 'buckwheat', 'korearosebay','forsythia']
num_classes = len(classes)
# batch size
batch_size = 50


#data 불러옴

train_data = np.load('flower_train_data.npy',allow_pickle=True)

def new_weights(shape):
    return tf.Variable(tf.truncated_normal(shape, stddev=0.05))
def new_biases(length):
    return tf.Variable(tf.constant(0.05, shape=[length]))

def new_conv_layer(input,num_input_channels,filter_size,num_filters,use_pooling=True):  

    shape = [filter_size, filter_size, num_input_channels, num_filters]
    weights = new_weights(shape=shape)
    biases = new_biases(length=num_filters)

    layer = tf.nn.conv2d(input=input,filter=weights,strides=[1, 1, 1, 1],padding='SAME')
    layer += biases

    if use_pooling:
        layer = tf.nn.max_pool(value=layer,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1],padding='SAME')

    layer = tf.nn.relu(layer)

    return layer, weights

def new_fc_layer(input,num_inputs,num_outputs,use_relu=True): 

    weights = new_weights(shape=[num_inputs, num_outputs])
    biases = new_biases(length=num_outputs)

    layer = tf.matmul(input, weights) + biases
    
    if use_relu:
        layer = tf.nn.relu(layer)

    return layer

def flatten_layer(layer):
    
    layer_shape = layer.get_shape()
    num_features = layer_shape[1:4].num_elements()
    layer_flat = tf.reshape(layer, [-1, num_features])

    return layer_flat, num_features

x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x')
x_image = tf.reshape(x, [-1, img_size, img_size, num_channels])
y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true')
y_true_cls = tf.argmax(y_true, axis=1)

layer_conv1, weights_conv1 = new_conv_layer(input=x_image,num_input_channels=num_channels,filter_size=filter_size1,
                                            num_filters=num_filters1,use_pooling=True)
layer_conv2, weights_conv2 = new_conv_layer(input=layer_conv1,num_input_channels=num_filters1,filter_size=filter_size2,
                                            num_filters=num_filters2,use_pooling=True)
layer_conv3, weights_conv3 = new_conv_layer(input=layer_conv2,num_input_channels=num_filters2,filter_size=filter_size3,
                                            num_filters=num_filters3,use_pooling=True)
layer_conv4, weights_conv4 = new_conv_layer(input=layer_conv3,num_input_channels=num_filters3,filter_size=filter_size4,
                                            num_filters=num_filters4,use_pooling=True)
layer_conv5, weights_conv5 = new_conv_layer(input=layer_conv4,num_input_channels=num_filters4,filter_size=filter_size5,
                                            num_filters=num_filters5,use_pooling=True)

layer_flat, num_features = flatten_layer(layer_conv5)
layer_fc1 = new_fc_layer(input=layer_flat,num_inputs=num_features,num_outputs=fc_size,use_relu=True)
layer_fc2 = new_fc_layer(input=layer_fc1,num_inputs=fc_size,num_outputs=num_classes,use_relu=False)

y_pred = tf.nn.softmax(layer_fc2)
y_pred_cls = tf.argmax(y_pred, axis=1)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2,labels=y_true)
cost = tf.reduce_mean(cross_entropy)

optimizer = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(cost)
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

train = train_data[0:-500]

x_batch = np.array([i[0] for i in train]).reshape(len(train),img_size_flat)
y_true_batch = [i[1] for i in train]

session = tf.Session()
session.run(tf.global_variables_initializer())
total_iterations = 0

def optimize(num_iterations):
    
    global total_iterations

    for i in range(total_iterations,total_iterations + num_iterations):
    	a = 0
    	for __ in range(int(len(train)/batch_size)):
        	#feed_dict_train = {x: x_batch[a:a+batch_size,:],y_true: y_true_batch[a:a+batch_size]}
        	session.run(optimizer, feed_dict={x: x_batch[a:a+batch_size,:],y_true: y_true_batch[a:a+batch_size]})
        	a = a + batch_size
 		
    	if i % 2 == 0:
    		print("Iteration = ", i, "Loss = ", session.run(cost, feed_dict={x: x_batch[a:a+batch_size,:],y_true: y_true_batch[a:a+batch_size]}),
                      "Train Accuracy = ", session.run(accuracy, feed_dict={x: x_batch[a:a+batch_size,:],y_true: y_true_batch[a:a+batch_size]}), 
                      )

optimize(num_iterations=50)

cost랑 accuracy만 보면서 코드 고치는중~!~!

그래도 돌아가서 다행이다 여기다 test코드 추가해서 우리 6만장 더 하면 훨씬 올라갈거 같앙
더알아본다아아ㅏㅇ아아아아ㅏ!!!! 뿌아아아앙!^______^

캡처

@17011813
Copy link
Author

17011813 commented Jun 1, 2019

entropy thresholding
Batch Normalization
https://www.slideshare.net/JeongYeonwoo/mnist-classification
성능 높이기 밥먹고 해야지 배고파 ㅎㅎㅎㅎㅎ

@Eomdangyeong
Copy link

ㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋㅋ진짜 기다린 보람이 있누.....

@ghost
Copy link

ghost commented Jun 1, 2019

Iteration = 0 Loss = 2.2915351 Train Accuracy = 0.18 Test Accuracy = 0.22433333
Iteration = 2 Loss = 1.627243 Train Accuracy = 0.48 Test Accuracy = 0.292
Iteration = 4 Loss = 1.3869513 Train Accuracy = 0.52 Test Accuracy = 0.34633332
Iteration = 6 Loss = 1.2535464 Train Accuracy = 0.56 Test Accuracy = 0.34966666
Iteration = 8 Loss = 0.9723007 Train Accuracy = 0.68 Test Accuracy = 0.37233335
Iteration = 10 Loss = 0.82327163 Train Accuracy = 0.7 Test Accuracy = 0.387
Iteration = 12 Loss = 0.6151759 Train Accuracy = 0.78 Test Accuracy = 0.40466666
Iteration = 14 Loss = 0.48329315 Train Accuracy = 0.88 Test Accuracy = 0.41966668
Iteration = 16 Loss = 0.8776555 Train Accuracy = 0.78 Test Accuracy = 0.40666667
Iteration = 18 Loss = 0.47148696 Train Accuracy = 0.9 Test Accuracy = 0.45066667
Iteration = 20 Loss = 0.39670077 Train Accuracy = 0.9 Test Accuracy = 0.43
Iteration = 22 Loss = 0.5305547 Train Accuracy = 0.84 Test Accuracy = 0.384
Iteration = 24 Loss = 0.48535004 Train Accuracy = 0.8 Test Accuracy = 0.41766667
Iteration = 26 Loss = 0.22956605 Train Accuracy = 0.94 Test Accuracy = 0.43133333
Iteration = 28 Loss = 0.31709966 Train Accuracy = 0.92 Test Accuracy = 0.42433333
Iteration = 30 Loss = 0.13685715 Train Accuracy = 0.98 Test Accuracy = 0.46833333
Iteration = 32 Loss = 0.0816638 Train Accuracy = 0.98 Test Accuracy = 0.46033335
Iteration = 34 Loss = 0.14826852 Train Accuracy = 0.96 Test Accuracy = 0.41166666
Iteration = 36 Loss = 0.08975853 Train Accuracy = 0.96 Test Accuracy = 0.49433333
Iteration = 38 Loss = 0.046634417 Train Accuracy = 1.0 Test Accuracy = 0.458
Iteration = 40 Loss = 0.04983877 Train Accuracy = 1.0 Test Accuracy = 0.449
Iteration = 42 Loss = 0.04196478 Train Accuracy = 1.0 Test Accuracy = 0.44166666
Iteration = 44 Loss = 0.022288075 Train Accuracy = 1.0 Test Accuracy = 0.454
Iteration = 46 Loss = 0.01955667 Train Accuracy = 1.0 Test Accuracy = 0.46966666
Iteration = 48 Loss = 0.028818855 Train Accuracy = 1.0 Test Accuracy = 0.451

@PMH2906 PMH2906 changed the title 어제 하던 CNN드디어 train 돌아감~!~!! [CNN] 어제 하던 CNN드디어 train 돌아감~!~!! Jun 2, 2019
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

No branches or pull requests

3 participants