James Jeon 2 years ago
parent
commit
43c89e595d
54 changed files with 1417 additions and 3 deletions
  1. 80
    0
      collect_raw_data.py
  2. 12
    1
      ver_0.3/collect_raw_data.py
  3. BIN
      ver_0.5/__pycache__/get_train_data.cpython-35.pyc
  4. BIN
      ver_0.5/__pycache__/get_val_data.cpython-35.pyc
  5. 1
    1
      ver_0.5/result.txt
  6. 1
    1
      ver_0.6/result.txt
  7. BIN
      ver_0.7.2/.DS_Store
  8. BIN
      ver_0.7.2/._.DS_Store
  9. BIN
      ver_0.7.2/__pycache__/get_train_data.cpython-35.pyc
  10. BIN
      ver_0.7.2/__pycache__/get_val_data.cpython-35.pyc
  11. BIN
      ver_0.7.2/__pycache__/model.cpython-35.pyc
  12. BIN
      ver_0.7.2/__pycache__/params.cpython-35.pyc
  13. 2
    0
      ver_0.7.2/can.sh
  14. 33
    0
      ver_0.7.2/find_bad_data.py
  15. 61
    0
      ver_0.7.2/get_train_data.py
  16. 78
    0
      ver_0.7.2/get_val_data.py
  17. 103
    0
      ver_0.7.2/model.py
  18. 97
    0
      ver_0.7.2/model_backup.py
  19. 1
    0
      ver_0.7.2/mount.sh
  20. 12
    0
      ver_0.7.2/params.py
  21. 5
    0
      ver_0.7.2/result.txt
  22. 57
    0
      ver_0.7.2/train.py
  23. BIN
      ver_a_0.1/.DS_Store
  24. BIN
      ver_a_0.1/._.DS_Store
  25. BIN
      ver_a_0.1/__pycache__/get_train_data.cpython-35.pyc
  26. BIN
      ver_a_0.1/__pycache__/get_val_data.cpython-35.pyc
  27. BIN
      ver_a_0.1/__pycache__/model.cpython-35.pyc
  28. BIN
      ver_a_0.1/__pycache__/params.cpython-35.pyc
  29. 2
    0
      ver_a_0.1/can.sh
  30. 33
    0
      ver_a_0.1/find_bad_data.py
  31. 61
    0
      ver_a_0.1/get_train_data.py
  32. 78
    0
      ver_a_0.1/get_val_data.py
  33. 103
    0
      ver_a_0.1/model.py
  34. 97
    0
      ver_a_0.1/model_backup.py
  35. 1
    0
      ver_a_0.1/mount.sh
  36. 12
    0
      ver_a_0.1/params.py
  37. 5
    0
      ver_a_0.1/result.txt
  38. 57
    0
      ver_a_0.1/train.py
  39. BIN
      ver_a_0.5.1/.DS_Store
  40. BIN
      ver_a_0.5.1/._.DS_Store
  41. BIN
      ver_a_0.5.1/__pycache__/get_train_data.cpython-35.pyc
  42. BIN
      ver_a_0.5.1/__pycache__/get_val_data.cpython-35.pyc
  43. BIN
      ver_a_0.5.1/__pycache__/model.cpython-35.pyc
  44. BIN
      ver_a_0.5.1/__pycache__/params.cpython-35.pyc
  45. 2
    0
      ver_a_0.5.1/can.sh
  46. 33
    0
      ver_a_0.5.1/find_bad_data.py
  47. 46
    0
      ver_a_0.5.1/get_train_data.py
  48. 55
    0
      ver_a_0.5.1/get_val_data.py
  49. 103
    0
      ver_a_0.5.1/model.py
  50. 94
    0
      ver_a_0.5.1/model.py_bak
  51. 1
    0
      ver_a_0.5.1/mount.sh
  52. 10
    0
      ver_a_0.5.1/params.py
  53. 5
    0
      ver_a_0.5.1/result.txt
  54. 76
    0
      ver_a_0.5.1/train.py

+ 80
- 0
collect_raw_data.py View File

@@ -0,0 +1,80 @@
1
+import numpy as np
2
+import cv2
3
+import time
4
+import os
5
+import can
6
+import signal
7
+import sys
8
+
9
+bus = can.interface.Bus(channel='can0', bustype='socketcan_native')
10
+
11
+cap = cv2.VideoCapture(0)
12
+cap.set(3,300) # set height
13
+cap.set(4,200) # set width
14
+
15
+raw_data = []
16
+
17
+def signal_handler(signal, frame):
18
+	name = str(time.time())
19
+	file_name = "/raw_data/" + name + ".npy"
20
+	np.save(file_name,raw_data)
21
+	print("saved {} frame, name : {}".format(len(raw_data),name))
22
+	sys.exit(0)
23
+ 
24
+signal.signal(signal.SIGINT, signal_handler)
25
+
26
+def get_frame():
27
+	# read the frame from webcam
28
+	_, frame = cap.read()
29
+
30
+	# change the color to gray
31
+	#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
32
+
33
+	# change the resolution (256*141), later cut the sky
34
+	frame = cv2.resize(frame,(0,0),fx=0.8, fy=0.8)
35
+	
36
+	return frame
37
+
38
+def get_angle():
39
+    while(True):
40
+	# get one can data
41
+        can_data = str(bus.recv())
42
+	# check it is steering angle, if yes then stop the loop
43
+        if(can_data.find("ID: 0025") > 0):
44
+            break
45
+
46
+    return can_data
47
+
48
+def main():
49
+	global raw_data
50
+
51
+	# discard first 40 frames to give time for webcam to get the proper exposure
52
+	for i in range(40):
53
+			get_angle()
54
+			get_frame()
55
+			print(i, "discarded")
56
+
57
+	while True:
58
+		angle = get_angle()
59
+		frame = get_frame()
60
+		raw_data.append([frame, angle])
61
+		
62
+		if len(raw_data) % 1000 == 0:
63
+			print(len(raw_data))
64
+		'''		
65
+		if len(raw_data) == 5000:
66
+			pid = os.fork()
67
+			if pid == 0:
68
+				name = str(time.time())
69
+				file_name = "/raw_data/" + name + ".npy"
70
+				np.save(file_name,raw_data)
71
+				print("saved {} frame, name : {}".format(len(raw_data),name))
72
+				exit(0)
73
+			else:
74
+				raw_data = []
75
+		'''			
76
+
77
+main()
78
+
79
+cap.release()
80
+# cv2.destroyAllWindows()

+ 12
- 1
ver_0.3/collect_raw_data.py View File

@@ -59,7 +59,18 @@ def main():
59 59
 		raw_data.append([frame, angle])
60 60
 
61 61
 		print(frame.shape[0], frame.shape[1], angle)
62
-
62
+		
63
+		if len(raw_data) > 5000:
64
+			pid = os.fork()
65
+			if pid == 0:
66
+				name = str(time.time())
67
+				file_name = "/raw_data/" + name + ".npy"
68
+				np.save(file_name,raw_data)
69
+				print("saved {} frame, name : {}".format(len(raw_data),name))
70
+				exit(0)
71
+			else:
72
+				raw_data = []
73
+			
63 74
 
64 75
 main()
65 76
 

BIN
ver_0.5/__pycache__/get_train_data.cpython-35.pyc View File


BIN
ver_0.5/__pycache__/get_val_data.cpython-35.pyc View File


+ 1
- 1
ver_0.5/result.txt View File

@@ -1,3 +1,3 @@
1 1
 66*256*1
2 2
 
3
-loss : 31.9
3
+loss : 39.2

+ 1
- 1
ver_0.6/result.txt View File

@@ -3,4 +3,4 @@ Came out with same result as the ver_0.5
3 3
 
4 4
 66*256*3
5 5
 
6
-loss : 31.6
6
+loss : 39.6

BIN
ver_0.7.2/.DS_Store View File


BIN
ver_0.7.2/._.DS_Store View File


BIN
ver_0.7.2/__pycache__/get_train_data.cpython-35.pyc View File


BIN
ver_0.7.2/__pycache__/get_val_data.cpython-35.pyc View File


BIN
ver_0.7.2/__pycache__/model.cpython-35.pyc View File


BIN
ver_0.7.2/__pycache__/params.cpython-35.pyc View File


+ 2
- 0
ver_0.7.2/can.sh View File

@@ -0,0 +1,2 @@
1
+sudo ip link set can0 type can bitrate 500000
2
+sudo ip link set up can0

+ 33
- 0
ver_0.7.2/find_bad_data.py View File

@@ -0,0 +1,33 @@
1
+#!/usr/bin/env python 
2
+
3
+import os
4
+import numpy as np
5
+
6
+file_list = os.listdir('/raw_data')
7
+for file_name in file_list:
8
+	if file_name.endswith('.npy'):
9
+		file_location = "/raw_data/" + file_name
10
+		loaded_data = np.load(file_location)
11
+
12
+		for data in loaded_data:
13
+			# change the can data (HEX) to numerical data
14
+			tmp = data[1]
15
+			hex_data = tmp[-23:-21] + tmp[-20:-18]
16
+			hex_decimal = tmp[-3:-1]
17
+			int_data = int(hex_data, 16)
18
+			int_decimal = int(hex_decimal, 16) / 256
19
+
20
+			# if the steering wheel angle in in right to the center
21
+			if(int_data > 550):
22
+				int_data = int_data - 4096
23
+				int_decimal = 1 - int_decimal 
24
+
25
+			# put the int and the decimal together
26
+			final_data = int_data + int_decimal
27
+
28
+			if final_data > 20 or final_data < -20:
29
+				print("Bad data : ", file_name)
30
+				break
31
+
32
+
33
+

+ 61
- 0
ver_0.7.2/get_train_data.py View File

@@ -0,0 +1,61 @@
1
+#!/usr/bin/env python 
2
+
3
+import params
4
+import cv2
5
+import numpy as np
6
+
7
+def train_data(file_name):
8
+	file_location = "/raw_data/" + file_name
9
+	loaded_data = np.load(file_location)
10
+	
11
+	X = []
12
+	Y = []
13
+	temp_x = []
14
+
15
+	# change the saved data into the form that both human and NN can understand
16
+	for data in loaded_data:
17
+		# flip the frame because the webcam is mounted upside down on the front windsheild, and cut out the sky portion of the image the size became 256*66
18
+		tmp = cv2.flip(data[0],0)
19
+		tmp = cv2.flip(tmp,1)
20
+		if params.img_channels != 3:
21
+			tmp = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)
22
+		frame = (tmp[70:-5,::]).reshape(params.img_height,params.img_width,1)
23
+
24
+		# change the can data (HEX) to numerical data
25
+		tmp = data[1]
26
+		hex_data = tmp[-23:-21] + tmp[-20:-18]
27
+		hex_decimal = tmp[-3:-1]
28
+		int_data = int(hex_data, 16)
29
+		int_decimal = int(hex_decimal, 16) / 256
30
+
31
+		# if the steering wheel angle in in right to the center
32
+		if(int_data > 550):
33
+			int_data = int_data - 4095
34
+			int_decimal = 1 - int_decimal 
35
+			final_data = int_data - int_decimal 
36
+		else:
37
+			# put the int and the decimal together
38
+			final_data = int_data + int_decimal
39
+
40
+		if len(temp_x) == 0:
41
+			temp_x = frame
42
+		elif temp_x.shape[0] < 66*4:
43
+			temp_x = np.concatenate((temp_x, frame), axis=0)
44
+		elif temp_x.shape[0] == 66*4:
45
+			temp_x = np.concatenate((temp_x, frame), axis=0)
46
+			X.append([temp_x])
47
+			Y.append([final_data])
48
+		else:
49
+			temp_x = temp_x[66:,::,::]
50
+			temp_x = np.concatenate((temp_x, frame), axis=0)
51
+			X.append([temp_x])
52
+			Y.append([final_data])
53
+
54
+	X = X[50:]
55
+	Y = Y[50:]
56
+	X = np.array(X).reshape([-1, params.network_height, params.img_width, params.img_channels])
57
+	Y = np.array(Y).reshape([-1,1])
58
+
59
+	return X, Y
60
+
61
+	#finishing making the training data

+ 78
- 0
ver_0.7.2/get_val_data.py View File

@@ -0,0 +1,78 @@
1
+#!/usr/bin/env python 
2
+
3
+import os
4
+import params
5
+import cv2
6
+import numpy as np
7
+
8
+# make the val data
9
+
10
+def val_data():
11
+	val_x = []
12
+	val_y = []
13
+	file_list = os.listdir('/raw_data/eval_data')
14
+	for file in file_list:
15
+		if file.endswith('.npy'):
16
+			file_location = "/raw_data/eval_data/" + file
17
+			loaded_data = np.load(file_location)
18
+			
19
+			X = []
20
+			Y = []
21
+			temp_x = []
22
+
23
+			for data in loaded_data:
24
+				tmp = cv2.flip(data[0],0)
25
+				tmp = cv2.flip(tmp,1)
26
+				if params.img_channels != 3:
27
+					tmp = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)
28
+				frame = (tmp[70:-5,::]).reshape(params.img_height, params.img_width, 1)
29
+
30
+				# change the can data (HEX) to numerical data
31
+				tmp = data[1]
32
+				hex_data = tmp[-23:-21] + tmp[-20:-18]
33
+				hex_decimal = tmp[-3:-1]
34
+				int_data = int(hex_data, 16)
35
+				int_decimal = int(hex_decimal, 16) / 256
36
+
37
+				# if the steering wheel angle in right to the center
38
+				if(int_data > 550):
39
+					int_data = int_data - 4095
40
+					int_decimal = 1 - int_decimal 
41
+					final_data = int_data - int_decimal 
42
+				else:
43
+					# put the int and the decimal together
44
+					final_data = int_data + int_decimal
45
+
46
+				# stack up the image
47
+				if len(temp_x) == 0:
48
+					temp_x = frame
49
+					# print(np.array(temp_x).shape)
50
+				elif temp_x.shape[0] < 66*4:
51
+					temp_x = np.concatenate((temp_x, frame), axis=0)
52
+				elif temp_x.shape[0] == 66*4:
53
+					temp_x = np.concatenate((temp_x, frame), axis=0)
54
+					# print(np.array(temp_x).shape)
55
+					X.append([temp_x])
56
+					Y.append([final_data])
57
+				else:
58
+					temp_x = temp_x[66:,::,::]
59
+					temp_x = np.concatenate((temp_x, frame), axis=0)
60
+					# print(np.array(temp_x).shape)
61
+					X.append([temp_x])
62
+					Y.append([final_data])
63
+
64
+			X = X[50:]
65
+			Y = Y[50:]
66
+			# print(np.array(X).shape)
67
+			X = np.array(X).reshape([-1, params.network_height, params.img_width, params.img_channels])
68
+			Y = np.array(Y).reshape([-1,1])
69
+			
70
+			val_x.extend(X)
71
+			val_y.extend(Y)
72
+	
73
+	return val_x, val_y
74
+
75
+		
76
+	# finish making val data
77
+
78
+	

+ 103
- 0
ver_0.7.2/model.py View File

@@ -0,0 +1,103 @@
1
+import tensorflow as tf
2
+import params
3
+
4
+print_layer = True
5
+
6
+x = tf.placeholder(tf.float32, shape=[None, params.network_height, params.img_width, params.img_channels])
7
+y_ = tf.placeholder(tf.float32, shape=[None, 1])
8
+
9
+keep_prob = tf.placeholder(tf.float32)
10
+
11
+x_image = tf.reshape(x, [-1, params.network_height, params.img_width, params.img_channels])
12
+
13
+network = tf.layers.batch_normalization(x_image, name="norm_0")
14
+if print_layer:
15
+	print(network)
16
+
17
+# Conv Layer # 1
18
+network = tf.layers.conv2d(network, filters=96, kernel_size = (11,11), strides=(4,4), 
19
+	padding='same', activation=tf.nn.relu, use_bias=True,name="conv_1")
20
+if print_layer:
21
+	print(network)
22
+network = tf.layers.max_pooling2d(network, pool_size=(3,3), strides=2, padding='same', 
23
+	name="m_pooling_1")
24
+if print_layer:
25
+	print(network)
26
+network = tf.layers.batch_normalization(network, name="norm_1")
27
+if print_layer:
28
+	print(network)
29
+
30
+# Conv Layer #2
31
+
32
+network = tf.layers.conv2d(network, filters=256, kernel_size = (5,5), strides=(1,1), 
33
+	padding='same', activation=tf.nn.relu, use_bias=True,name="conv_2")
34
+if print_layer:
35
+	print(network)
36
+network = tf.layers.max_pooling2d(network, pool_size=(3,3), strides=2, padding='same', 
37
+	name="m_pooling_2")
38
+if print_layer:
39
+	print(network)
40
+network = tf.layers.batch_normalization(network, name="norm_2")
41
+if print_layer:
42
+	print(network)
43
+
44
+# Conv Layer #3
45
+
46
+network = tf.layers.conv2d(network, filters=384, kernel_size = (3,3), strides=(1,1), 
47
+	padding='same', activation=tf.nn.relu, use_bias=True,name="conv_3_1")
48
+if print_layer:
49
+	print(network)
50
+network = tf.layers.conv2d(network, filters=256, kernel_size = (3,3), strides=(1,1), 
51
+	padding='same', activation=tf.nn.relu, use_bias=True,name="conv_3_2")
52
+if print_layer:
53
+	print(network)
54
+network = tf.layers.conv2d(network, filters=32, kernel_size = (3,3), strides=(1,1), 
55
+	padding='same', activation=tf.nn.relu, use_bias=True,name="conv_3_3")
56
+if print_layer:
57
+	print(network)
58
+network = tf.layers.max_pooling2d(network, pool_size=(3,3), strides=2, padding='same', 
59
+	name="m_pooling_3")
60
+if print_layer:
61
+	print(network)
62
+network = tf.layers.batch_normalization(network, name="norm_3")
63
+if print_layer:
64
+	print(network)
65
+
66
+# Flatten
67
+network = tf.reshape(network, [-1, 2816], name="flatten")
68
+if print_layer:
69
+	print(network)
70
+
71
+# Dense Layer 1
72
+
73
+network = tf.layers.dense(network, 4096, activation=tf.nn.relu, name="dense_1")
74
+if print_layer:
75
+	print(network)
76
+network = tf.layers.dropout(network, rate=keep_prob, name="dropout_1")
77
+if print_layer:
78
+	print(network)
79
+
80
+# Dense Layer 2
81
+
82
+network = tf.layers.dense(network, 4096, activation=tf.nn.relu, name="dense_2")
83
+if print_layer:
84
+	print(network)
85
+network = tf.layers.dropout(network, rate=keep_prob, name="dropout_2")
86
+if print_layer:
87
+	print(network)
88
+
89
+# # Dense Layer 3
90
+
91
+# network = tf.layers.dense(network, 1000, activation=tf.nn.relu, name="dense_3")
92
+# if print_layer:
93
+# 	print(network)
94
+# network = tf.layers.dropout(network, rate=keep_prob, name="dropout_3")
95
+# if print_layer:
96
+# 	print(network)
97
+
98
+# output layer
99
+
100
+# activation=None => linear
101
+y = tf.layers.dense(network, 1, activation=None, name="output")
102
+if print_layer:
103
+	print(y)

+ 97
- 0
ver_0.7.2/model_backup.py View File

@@ -0,0 +1,97 @@
1
+import tensorflow as tf
2
+import params
3
+
4
+def weight_variable(shape):
5
+    initial = tf.truncated_normal(shape, stddev=0.1)
6
+    return tf.Variable(initial)
7
+
8
+def bias_variable(shape):
9
+    initial = tf.constant(0.1, shape=shape)
10
+    return tf.Variable(initial)
11
+
12
+def conv2d(x, W, stride):
13
+    return tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='VALID')
14
+
15
+x = tf.placeholder(tf.float32, shape=[None, params.img_height, params.img_width, params.img_channels])
16
+y_ = tf.placeholder(tf.float32, shape=[None, 1])
17
+keep_prob = tf.placeholder(tf.float32)
18
+
19
+x_image = tf.reshape(x, [-1, params.img_height, params.img_width, params.img_channels])
20
+# print(x_image)
21
+
22
+# first convolutional layer
23
+W_conv1 = weight_variable([5, 5, params.img_channels, 24])
24
+b_conv1 = bias_variable([24])
25
+
26
+h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1, 2) + b_conv1)
27
+print(h_conv1)
28
+
29
+# second convolutional layer
30
+W_conv2 = weight_variable([5, 5, 24, 36])
31
+b_conv2 = bias_variable([36])
32
+
33
+h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2, 2) + b_conv2)
34
+print(h_conv2)
35
+
36
+# third convolutional layer
37
+W_conv3 = weight_variable([5, 5, 36, 48])
38
+b_conv3 = bias_variable([48])
39
+
40
+h_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv3, 2) + b_conv3)
41
+print(h_conv3)
42
+
43
+# fourth convolutional layer
44
+W_conv4 = weight_variable([3, 3, 48, 64])
45
+b_conv4 = bias_variable([64])
46
+
47
+h_conv4 = tf.nn.relu(conv2d(h_conv3, W_conv4, 1) + b_conv4)
48
+print(h_conv4)
49
+
50
+# fifth convolutional layer
51
+W_conv5 = weight_variable([3, 3, 64, 64])
52
+b_conv5 = bias_variable([64])
53
+
54
+h_conv5 = tf.nn.relu(conv2d(h_conv4, W_conv5, 1) + b_conv5)
55
+print(h_conv5)
56
+
57
+# fully connected layer 1
58
+W_fc1 = weight_variable([1600, 1164])
59
+b_fc1 = bias_variable([1164])
60
+
61
+h_conv5_flat = tf.reshape(h_conv5, [-1, 1600])
62
+h_fc1 = tf.nn.relu(tf.matmul(h_conv5_flat, W_fc1) + b_fc1)
63
+
64
+h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
65
+
66
+print(h_conv5_flat)
67
+print(h_fc1)
68
+
69
+# fully connected layer 2
70
+W_fc2 = weight_variable([1164, 100])
71
+b_fc2 = bias_variable([100])
72
+
73
+h_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
74
+
75
+h_fc2_drop = tf.nn.dropout(h_fc2, keep_prob)
76
+
77
+# fully connected layer 3
78
+W_fc3 = weight_variable([100, 50])
79
+b_fc3 = bias_variable([50])
80
+
81
+h_fc3 = tf.nn.relu(tf.matmul(h_fc2_drop, W_fc3) + b_fc3)
82
+
83
+h_fc3_drop = tf.nn.dropout(h_fc3, keep_prob)
84
+
85
+# fully connected layer 4
86
+W_fc4 = weight_variable([50, 10])
87
+b_fc4 = bias_variable([10])
88
+
89
+h_fc4 = tf.nn.relu(tf.matmul(h_fc3_drop, W_fc4) + b_fc4)
90
+
91
+h_fc4_drop = tf.nn.dropout(h_fc4, keep_prob)
92
+
93
+# output
94
+W_fc5 = weight_variable([10, 1])
95
+b_fc5 = bias_variable([1])
96
+
97
+y = tf.matmul(h_fc4_drop, W_fc5) + b_fc5

+ 1
- 0
ver_0.7.2/mount.sh View File

@@ -0,0 +1 @@
1
+sudo mount /dev/mmcblk1p1 /raw_data

+ 12
- 0
ver_0.7.2/params.py View File

@@ -0,0 +1,12 @@
1
+import os
2
+
3
+# 330 height
4
+img_height = 66
5
+img_width = 256
6
+img_channels = 1
7
+network_height = 66*5
8
+network_width = 256
9
+batch = 16
10
+epoch = 1000
11
+write_summary = True
12
+save_dir = os.path.abspath('models')

+ 5
- 0
ver_0.7.2/result.txt View File

@@ -0,0 +1,5 @@
1
+Compare to ver_0.7.1 the output layer have been changed
2
+
3
+66*5,256,1
4
+
5
+loss : 31.9

+ 57
- 0
ver_0.7.2/train.py View File

@@ -0,0 +1,57 @@
1
+#!/usr/bin/env python 
2
+
3
+import os
4
+import tensorflow as tf
5
+import model
6
+import params
7
+import time
8
+import cv2
9
+import numpy as np
10
+
11
+from get_val_data import val_data
12
+from get_train_data import train_data
13
+
14
+sess = tf.InteractiveSession()
15
+
16
+loss = tf.reduce_mean(tf.square(tf.subtract(model.y_, model.y)))
17
+train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
18
+sess.run(tf.global_variables_initializer())
19
+
20
+# get the val data
21
+val_X, val_Y = val_data()
22
+val_X = val_X[600:]
23
+val_Y = val_Y[600:]
24
+# print(np.array(val_Y).shape)
25
+# finish get val data
26
+
27
+file_list = os.listdir('/raw_data')
28
+for i in range(params.epoch):   
29
+	# prepare data for training
30
+	for file in file_list:
31
+		if file.endswith('.npy'):
32
+			train_X, train_Y = train_data(file)
33
+		    	#finishing getting the training data
34
+			print("Start train on file : " , file)
35
+			# start the train on the data
36
+			batch_iteration = int(train_X.shape[0] / params.batch)
37
+			# print(train_X.shape[0])
38
+			if train_X.shape[0] / params.batch > batch_iteration:
39
+				batch_iteration = batch_iteration + 1
40
+
41
+			for iteration in range(batch_iteration):
42
+				# print(iteration)
43
+				batch_X = train_X[iteration*params.batch:(iteration+1)*params.batch]
44
+				batch_Y = train_Y[iteration*params.batch:(iteration+1)*params.batch]
45
+				# print(np.array(batch_X).shape)
46
+				# print(np.array(batch_Y).shape)
47
+
48
+
49
+				train_step.run(feed_dict={model.x: batch_X, model.y_: batch_Y, model.keep_prob: 0.5})
50
+				# print("train done for batch")
51
+
52
+				t_loss = loss.eval(feed_dict={model.x: batch_X, model.y_: batch_Y, model.keep_prob: 1.0})
53
+				v_loss = loss.eval(feed_dict={model.x: val_X, model.y_: val_Y, model.keep_prob: 1.0})
54
+				print ("epoch {} of {}, batch {} of {}, batch loss {}, val loss {}".format(i, params.epoch,iteration,batch_iteration,t_loss, v_loss))
55
+
56
+
57
+			    

BIN
ver_a_0.1/.DS_Store View File


BIN
ver_a_0.1/._.DS_Store View File


BIN
ver_a_0.1/__pycache__/get_train_data.cpython-35.pyc View File


BIN
ver_a_0.1/__pycache__/get_val_data.cpython-35.pyc View File


BIN
ver_a_0.1/__pycache__/model.cpython-35.pyc View File


BIN
ver_a_0.1/__pycache__/params.cpython-35.pyc View File


+ 2
- 0
ver_a_0.1/can.sh View File

@@ -0,0 +1,2 @@
1
+sudo ip link set can0 type can bitrate 500000
2
+sudo ip link set up can0

+ 33
- 0
ver_a_0.1/find_bad_data.py View File

@@ -0,0 +1,33 @@
1
+#!/usr/bin/env python 
2
+
3
+import os
4
+import numpy as np
5
+
6
+file_list = os.listdir('/raw_data')
7
+for file_name in file_list:
8
+	if file_name.endswith('.npy'):
9
+		file_location = "/raw_data/" + file_name
10
+		loaded_data = np.load(file_location)
11
+
12
+		for data in loaded_data:
13
+			# change the can data (HEX) to numerical data
14
+			tmp = data[1]
15
+			hex_data = tmp[-23:-21] + tmp[-20:-18]
16
+			hex_decimal = tmp[-3:-1]
17
+			int_data = int(hex_data, 16)
18
+			int_decimal = int(hex_decimal, 16) / 256
19
+
20
+			# if the steering wheel angle in in right to the center
21
+			if(int_data > 550):
22
+				int_data = int_data - 4096
23
+				int_decimal = 1 - int_decimal 
24
+
25
+			# put the int and the decimal together
26
+			final_data = int_data + int_decimal
27
+
28
+			if final_data > 20 or final_data < -20:
29
+				print("Bad data : ", file_name)
30
+				break
31
+
32
+
33
+

+ 61
- 0
ver_a_0.1/get_train_data.py View File

@@ -0,0 +1,61 @@
1
+#!/usr/bin/env python 
2
+
3
+import params
4
+import cv2
5
+import numpy as np
6
+
7
+def train_data(file_name):
8
+	file_location = "/raw_data/" + file_name
9
+	loaded_data = np.load(file_location)
10
+	
11
+	X = []
12
+	Y = []
13
+	temp_x = []
14
+
15
+	# change the saved data into the form that both human and NN can understand
16
+	for data in loaded_data:
17
+		# flip the frame because the webcam is mounted upside down on the front windsheild, and cut out the sky portion of the image the size became 256*66
18
+		tmp = cv2.flip(data[0],0)
19
+		tmp = cv2.flip(tmp,1)
20
+		if params.img_channels != 3:
21
+			tmp = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)
22
+		frame = (tmp[70:-5,::]).reshape(params.img_height,params.img_width,1)
23
+
24
+		# change the can data (HEX) to numerical data
25
+		tmp = data[1]
26
+		hex_data = tmp[-23:-21] + tmp[-20:-18]
27
+		hex_decimal = tmp[-3:-1]
28
+		int_data = int(hex_data, 16)
29
+		int_decimal = int(hex_decimal, 16) / 256
30
+
31
+		# if the steering wheel angle in in right to the center
32
+		if(int_data > 550):
33
+			int_data = int_data - 4095
34
+			int_decimal = 1 - int_decimal 
35
+			final_data = int_data - int_decimal 
36
+		else:
37
+			# put the int and the decimal together
38
+			final_data = int_data + int_decimal
39
+
40
+		if len(temp_x) == 0:
41
+			temp_x = frame
42
+		elif temp_x.shape[0] < 66*4:
43
+			temp_x = np.concatenate((temp_x, frame), axis=0)
44
+		elif temp_x.shape[0] == 66*4:
45
+			temp_x = np.concatenate((temp_x, frame), axis=0)
46
+			X.append([temp_x])
47
+			Y.append([final_data])
48
+		else:
49
+			temp_x = temp_x[66:,::,::]
50
+			temp_x = np.concatenate((temp_x, frame), axis=0)
51
+			X.append([temp_x])
52
+			Y.append([final_data])
53
+
54
+	X = X[50:]
55
+	Y = Y[50:]
56
+	X = np.array(X).reshape([-1, params.network_height, params.img_width, params.img_channels])
57
+	Y = np.array(Y).reshape([-1,1])
58
+
59
+	return X, Y
60
+
61
+	#finishing making the training data

+ 78
- 0
ver_a_0.1/get_val_data.py View File

@@ -0,0 +1,78 @@
1
+#!/usr/bin/env python 
2
+
3
+import os
4
+import params
5
+import cv2
6
+import numpy as np
7
+
8
+# make the val data
9
+
10
+def val_data():
11
+	val_x = []
12
+	val_y = []
13
+	file_list = os.listdir('/raw_data/eval_data')
14
+	for file in file_list:
15
+		if file.endswith('.npy'):
16
+			file_location = "/raw_data/eval_data/" + file
17
+			loaded_data = np.load(file_location)
18
+			
19
+			X = []
20
+			Y = []
21
+			temp_x = []
22
+
23
+			for data in loaded_data:
24
+				tmp = cv2.flip(data[0],0)
25
+				tmp = cv2.flip(tmp,1)
26
+				if params.img_channels != 3:
27
+					tmp = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)
28
+				frame = (tmp[70:-5,::]).reshape(params.img_height, params.img_width, 1)
29
+
30
+				# change the can data (HEX) to numerical data
31
+				tmp = data[1]
32
+				hex_data = tmp[-23:-21] + tmp[-20:-18]
33
+				hex_decimal = tmp[-3:-1]
34
+				int_data = int(hex_data, 16)
35
+				int_decimal = int(hex_decimal, 16) / 256
36
+
37
+				# if the steering wheel angle in right to the center
38
+				if(int_data > 550):
39
+					int_data = int_data - 4095
40
+					int_decimal = 1 - int_decimal 
41
+					final_data = int_data - int_decimal 
42
+				else:
43
+					# put the int and the decimal together
44
+					final_data = int_data + int_decimal
45
+
46
+				# stack up the image
47
+				if len(temp_x) == 0:
48
+					temp_x = frame
49
+					# print(np.array(temp_x).shape)
50
+				elif temp_x.shape[0] < 66*4:
51
+					temp_x = np.concatenate((temp_x, frame), axis=0)
52
+				elif temp_x.shape[0] == 66*4:
53
+					temp_x = np.concatenate((temp_x, frame), axis=0)
54
+					# print(np.array(temp_x).shape)
55
+					X.append([temp_x])
56
+					Y.append([final_data])
57
+				else:
58
+					temp_x = temp_x[66:,::,::]
59
+					temp_x = np.concatenate((temp_x, frame), axis=0)
60
+					# print(np.array(temp_x).shape)
61
+					X.append([temp_x])
62
+					Y.append([final_data])
63
+
64
+			X = X[50:]
65
+			Y = Y[50:]
66
+			# print(np.array(X).shape)
67
+			X = np.array(X).reshape([-1, params.network_height, params.img_width, params.img_channels])
68
+			Y = np.array(Y).reshape([-1,1])
69
+			
70
+			val_x.extend(X)
71
+			val_y.extend(Y)
72
+	
73
+	return val_x, val_y
74
+
75
+		
76
+	# finish making val data
77
+
78
+	

+ 103
- 0
ver_a_0.1/model.py View File

@@ -0,0 +1,103 @@
1
+import tensorflow as tf
2
+import params
3
+
4
+print_layer = True
5
+
6
+x = tf.placeholder(tf.float32, shape=[None, params.network_height, params.img_width, params.img_channels])
7
+y_ = tf.placeholder(tf.float32, shape=[None, 1])
8
+
9
+keep_prob = tf.placeholder(tf.float32)
10
+
11
+x_image = tf.reshape(x, [-1, params.network_height, params.img_width, params.img_channels])
12
+
13
+network = tf.layers.batch_normalization(x_image, name="norm_0")
14
+if print_layer:
15
+	print(network)
16
+
17
+# Conv Layer # 1
18
+network = tf.layers.conv2d(network, filters=96, kernel_size = (11,11), strides=(4,4), 
19
+	padding='same', activation=tf.nn.relu, use_bias=True,name="conv_1")
20
+if print_layer:
21
+	print(network)
22
+network = tf.layers.max_pooling2d(network, pool_size=(3,3), strides=2, padding='same', 
23
+	name="m_pooling_1")
24
+if print_layer:
25
+	print(network)
26
+network = tf.layers.batch_normalization(network, name="norm_1")
27
+if print_layer:
28
+	print(network)
29
+
30
+# Conv Layer #2
31
+
32
+network = tf.layers.conv2d(network, filters=256, kernel_size = (5,5), strides=(1,1), 
33
+	padding='same', activation=tf.nn.relu, use_bias=True,name="conv_2")
34
+if print_layer:
35
+	print(network)
36
+network = tf.layers.max_pooling2d(network, pool_size=(3,3), strides=2, padding='same', 
37
+	name="m_pooling_2")
38
+if print_layer:
39
+	print(network)
40
+network = tf.layers.batch_normalization(network, name="norm_2")
41
+if print_layer:
42
+	print(network)
43
+
44
+# Conv Layer #3
45
+
46
+network = tf.layers.conv2d(network, filters=384, kernel_size = (3,3), strides=(1,1), 
47
+	padding='same', activation=tf.nn.relu, use_bias=True,name="conv_3_1")
48
+if print_layer:
49
+	print(network)
50
+network = tf.layers.conv2d(network, filters=256, kernel_size = (3,3), strides=(1,1), 
51
+	padding='same', activation=tf.nn.relu, use_bias=True,name="conv_3_2")
52
+if print_layer:
53
+	print(network)
54
+network = tf.layers.conv2d(network, filters=32, kernel_size = (3,3), strides=(1,1), 
55
+	padding='same', activation=tf.nn.relu, use_bias=True,name="conv_3_3")
56
+if print_layer:
57
+	print(network)
58
+network = tf.layers.max_pooling2d(network, pool_size=(3,3), strides=2, padding='same', 
59
+	name="m_pooling_3")
60
+if print_layer:
61
+	print(network)
62
+network = tf.layers.batch_normalization(network, name="norm_3")
63
+if print_layer:
64
+	print(network)
65
+
66
+# Flatten
67
+network = tf.reshape(network, [-1, 2816], name="flatten")
68
+if print_layer:
69
+	print(network)
70
+
71
+# Dense Layer 1
72
+
73
+network = tf.layers.dense(network, 4096, activation=tf.nn.relu, name="dense_1")
74
+if print_layer:
75
+	print(network)
76
+network = tf.layers.dropout(network, rate=keep_prob, name="dropout_1")
77
+if print_layer:
78
+	print(network)
79
+
80
+# Dense Layer 2
81
+
82
+network = tf.layers.dense(network, 4096, activation=tf.nn.relu, name="dense_2")
83
+if print_layer:
84
+	print(network)
85
+network = tf.layers.dropout(network, rate=keep_prob, name="dropout_2")
86
+if print_layer:
87
+	print(network)
88
+
89
+# # Dense Layer 3
90
+
91
+# network = tf.layers.dense(network, 1000, activation=tf.nn.relu, name="dense_3")
92
+# if print_layer:
93
+# 	print(network)
94
+# network = tf.layers.dropout(network, rate=keep_prob, name="dropout_3")
95
+# if print_layer:
96
+# 	print(network)
97
+
98
+# output layer
99
+
100
+# activation=None => linear
101
+y = tf.layers.dense(network, 1, activation=None, name="output")
102
+if print_layer:
103
+	print(y)

+ 97
- 0
ver_a_0.1/model_backup.py View File

@@ -0,0 +1,97 @@
1
+import tensorflow as tf
2
+import params
3
+
4
+def weight_variable(shape):
5
+    initial = tf.truncated_normal(shape, stddev=0.1)
6
+    return tf.Variable(initial)
7
+
8
+def bias_variable(shape):
9
+    initial = tf.constant(0.1, shape=shape)
10
+    return tf.Variable(initial)
11
+
12
+def conv2d(x, W, stride):
13
+    return tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='VALID')
14
+
15
+x = tf.placeholder(tf.float32, shape=[None, params.img_height, params.img_width, params.img_channels])
16
+y_ = tf.placeholder(tf.float32, shape=[None, 1])
17
+keep_prob = tf.placeholder(tf.float32)
18
+
19
+x_image = tf.reshape(x, [-1, params.img_height, params.img_width, params.img_channels])
20
+# print(x_image)
21
+
22
+# first convolutional layer
23
+W_conv1 = weight_variable([5, 5, params.img_channels, 24])
24
+b_conv1 = bias_variable([24])
25
+
26
+h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1, 2) + b_conv1)
27
+print(h_conv1)
28
+
29
+# second convolutional layer
30
+W_conv2 = weight_variable([5, 5, 24, 36])
31
+b_conv2 = bias_variable([36])
32
+
33
+h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2, 2) + b_conv2)
34
+print(h_conv2)
35
+
36
+# third convolutional layer
37
+W_conv3 = weight_variable([5, 5, 36, 48])
38
+b_conv3 = bias_variable([48])
39
+
40
+h_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv3, 2) + b_conv3)
41
+print(h_conv3)
42
+
43
+# fourth convolutional layer
44
+W_conv4 = weight_variable([3, 3, 48, 64])
45
+b_conv4 = bias_variable([64])
46
+
47
+h_conv4 = tf.nn.relu(conv2d(h_conv3, W_conv4, 1) + b_conv4)
48
+print(h_conv4)
49
+
50
+# fifth convolutional layer
51
+W_conv5 = weight_variable([3, 3, 64, 64])
52
+b_conv5 = bias_variable([64])
53
+
54
+h_conv5 = tf.nn.relu(conv2d(h_conv4, W_conv5, 1) + b_conv5)
55
+print(h_conv5)
56
+
57
+# fully connected layer 1
58
+W_fc1 = weight_variable([1600, 1164])
59
+b_fc1 = bias_variable([1164])
60
+
61
+h_conv5_flat = tf.reshape(h_conv5, [-1, 1600])
62
+h_fc1 = tf.nn.relu(tf.matmul(h_conv5_flat, W_fc1) + b_fc1)
63
+
64
+h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
65
+
66
+print(h_conv5_flat)
67
+print(h_fc1)
68
+
69
+# fully connected layer 2
70
+W_fc2 = weight_variable([1164, 100])
71
+b_fc2 = bias_variable([100])
72
+
73
+h_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
74
+
75
+h_fc2_drop = tf.nn.dropout(h_fc2, keep_prob)
76
+
77
+# fully connected layer 3
78
+W_fc3 = weight_variable([100, 50])
79
+b_fc3 = bias_variable([50])
80
+
81
+h_fc3 = tf.nn.relu(tf.matmul(h_fc2_drop, W_fc3) + b_fc3)
82
+
83
+h_fc3_drop = tf.nn.dropout(h_fc3, keep_prob)
84
+
85
+# fully connected layer 4
86
+W_fc4 = weight_variable([50, 10])
87
+b_fc4 = bias_variable([10])
88
+
89
+h_fc4 = tf.nn.relu(tf.matmul(h_fc3_drop, W_fc4) + b_fc4)
90
+
91
+h_fc4_drop = tf.nn.dropout(h_fc4, keep_prob)
92
+
93
+# output
94
+W_fc5 = weight_variable([10, 1])
95
+b_fc5 = bias_variable([1])
96
+
97
+y = tf.matmul(h_fc4_drop, W_fc5) + b_fc5

+ 1
- 0
ver_a_0.1/mount.sh View File

@@ -0,0 +1 @@
1
+sudo mount /dev/mmcblk1p1 /raw_data

+ 12
- 0
ver_a_0.1/params.py View File

@@ -0,0 +1,12 @@
1
+import os
2
+
3
+# 330 height
4
+img_height = 66
5
+img_width = 256
6
+img_channels = 1
7
+network_height = 66*5
8
+network_width = 256
9
+batch = 16
10
+epoch = 1000
11
+write_summary = True
12
+save_dir = os.path.abspath('models')

+ 5
- 0
ver_a_0.1/result.txt View File

@@ -0,0 +1,5 @@
1
+Compare to ver_0.7.1 the output layer have been changed
2
+
3
+66*5,256,1
4
+
5
+loss : 31.9

+ 57
- 0
ver_a_0.1/train.py View File

@@ -0,0 +1,57 @@
1
+#!/usr/bin/env python 
2
+
3
+import os
4
+import tensorflow as tf
5
+import model
6
+import params
7
+import time
8
+import cv2
9
+import numpy as np
10
+
11
+from get_val_data import val_data
12
+from get_train_data import train_data
13
+
14
+sess = tf.InteractiveSession()
15
+
16
+loss = tf.reduce_mean(tf.square(tf.subtract(model.y_, model.y)))
17
+train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
18
+sess.run(tf.global_variables_initializer())
19
+
20
+# get the val data
21
+val_X, val_Y = val_data()
22
+val_X = val_X[600:]
23
+val_Y = val_Y[600:]
24
+# print(np.array(val_Y).shape)
25
+# finish get val data
26
+
27
+file_list = os.listdir('/raw_data')
28
+for i in range(params.epoch):   
29
+	# prepare data for training
30
+	for file in file_list:
31
+		if file.endswith('.npy'):
32
+			train_X, train_Y = train_data(file)
33
+		    	#finishing getting the training data
34
+			print("Start train on file : " , file)
35
+			# start the train on the data
36
+			batch_iteration = int(train_X.shape[0] / params.batch)
37
+			# print(train_X.shape[0])
38
+			if train_X.shape[0] / params.batch > batch_iteration:
39
+				batch_iteration = batch_iteration + 1
40
+
41
+			for iteration in range(batch_iteration):
42
+				# print(iteration)
43
+				batch_X = train_X[iteration*params.batch:(iteration+1)*params.batch]
44
+				batch_Y = train_Y[iteration*params.batch:(iteration+1)*params.batch]
45
+				# print(np.array(batch_X).shape)
46
+				# print(np.array(batch_Y).shape)
47
+
48
+
49
+				train_step.run(feed_dict={model.x: batch_X, model.y_: batch_Y, model.keep_prob: 0.5})
50
+				# print("train done for batch")
51
+
52
+				t_loss = loss.eval(feed_dict={model.x: batch_X, model.y_: batch_Y, model.keep_prob: 1.0})
53
+				v_loss = loss.eval(feed_dict={model.x: val_X, model.y_: val_Y, model.keep_prob: 1.0})
54
+				print ("epoch {} of {}, batch {} of {}, batch loss {}, val loss {}".format(i, params.epoch,iteration,batch_iteration,t_loss, v_loss))
55
+
56
+
57
+			    

BIN
ver_a_0.5.1/.DS_Store View File


BIN
ver_a_0.5.1/._.DS_Store View File


BIN
ver_a_0.5.1/__pycache__/get_train_data.cpython-35.pyc View File


BIN
ver_a_0.5.1/__pycache__/get_val_data.cpython-35.pyc View File


BIN
ver_a_0.5.1/__pycache__/model.cpython-35.pyc View File


BIN
ver_a_0.5.1/__pycache__/params.cpython-35.pyc View File


+ 2
- 0
ver_a_0.5.1/can.sh View File

@@ -0,0 +1,2 @@
1
+sudo ip link set can0 type can bitrate 500000
2
+sudo ip link set up can0

+ 33
- 0
ver_a_0.5.1/find_bad_data.py View File

@@ -0,0 +1,33 @@
1
+#!/usr/bin/env python 
2
+
3
+import os
4
+import numpy as np
5
+
6
+file_list = os.listdir('/raw_data')
7
+for file_name in file_list:
8
+	if file_name.endswith('.npy'):
9
+		file_location = "/raw_data/" + file_name
10
+		loaded_data = np.load(file_location)
11
+
12
+		for data in loaded_data:
13
+			# change the can data (HEX) to numerical data
14
+			tmp = data[1]
15
+			hex_data = tmp[-23:-21] + tmp[-20:-18]
16
+			hex_decimal = tmp[-3:-1]
17
+			int_data = int(hex_data, 16)
18
+			int_decimal = int(hex_decimal, 16) / 256
19
+
20
+			# if the steering wheel angle in in right to the center
21
+			if(int_data > 550):
22
+				int_data = int_data - 4096
23
+				int_decimal = 1 - int_decimal 
24
+
25
+			# put the int and the decimal together
26
+			final_data = int_data + int_decimal
27
+
28
+			if final_data > 20 or final_data < -20:
29
+				print("Bad data : ", file_name)
30
+				break
31
+
32
+
33
+

+ 46
- 0
ver_a_0.5.1/get_train_data.py View File

@@ -0,0 +1,46 @@
1
+#!/usr/bin/env python 
2
+
3
+import params
4
+import cv2
5
+import numpy as np
6
+
7
+def train_data(file_name):
8
+	file_location = "/raw_data/" + file_name
9
+	loaded_data = np.load(file_location)
10
+
11
+	# change the saved data into the form that both human and NN can understand
12
+	for data in loaded_data:
13
+		# flip the frame because the webcam is mounted upside down on the front windsheild, and cut out the sky portion of the image the size became 256*66
14
+		tmp = cv2.flip(data[0],0)
15
+		tmp = cv2.flip(tmp,1)
16
+		if params.img_channels != 3:
17
+			tmp = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)
18
+		data[0] = (tmp[70:-5,::]).reshape(params.img_height,params.img_width,params.img_channels)
19
+
20
+		# change the can data (HEX) to numerical data
21
+		tmp = data[1]
22
+		hex_data = tmp[-23:-21] + tmp[-20:-18]
23
+		hex_decimal = tmp[-3:-1]
24
+		int_data = int(hex_data, 16)
25
+		int_decimal = int(hex_decimal, 16) / 256
26
+
27
+		# if the steering wheel angle in in right to the center
28
+		if(int_data > 550):
29
+			int_data = int_data - 4095
30
+			int_decimal = 1 - int_decimal 
31
+			final_data = int_data - int_decimal
32
+		else:
33
+			# put the int and the decimal together
34
+			final_data = int_data + int_decimal
35
+		data[1] = final_data
36
+		#print(final_data)
37
+	#did cut out the first few frames because the webcam need time to adjust the exposure
38
+	# but seems like still the white out frames exist
39
+
40
+	loaded_data = loaded_data[30:]
41
+	train_X = np.array([i[0] for i in loaded_data]).reshape([-1, params.img_height,params.img_width,params.img_channels])
42
+	train_Y = np.array([i[1] for i in loaded_data]).reshape([-1,1])
43
+
44
+	return train_X, train_Y
45
+
46
+	#finishing making the training data

+ 55
- 0
ver_a_0.5.1/get_val_data.py View File

@@ -0,0 +1,55 @@
1
+#!/usr/bin/env python 
2
+
3
+import os
4
+import params
5
+import cv2
6
+import numpy as np
7
+
8
+# make the val data
9
+
10
+def val_data():
11
+	val_x = []
12
+	val_y = []
13
+	file_list = os.listdir('/raw_data/eval_data')
14
+	for file in file_list:
15
+		if file.endswith('.npy'):
16
+			file_location = "/raw_data/eval_data/" + file
17
+			loaded_data = np.load(file_location)
18
+
19
+			for data in loaded_data:
20
+				tmp = cv2.flip(data[0],0)
21
+				tmp = cv2.flip(tmp,1)
22
+				if params.img_channels == 1:
23
+					tmp = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)
24
+				data[0] = (tmp[70:-5,::]).reshape(params.img_height, params.img_width, params.img_channels)
25
+
26
+				# change the can data (HEX) to numerical data
27
+				tmp = data[1]
28
+				hex_data = tmp[-23:-21] + tmp[-20:-18]
29
+				hex_decimal = tmp[-3:-1]
30
+				int_data = int(hex_data, 16)
31
+				int_decimal = int(hex_decimal, 16) / 256
32
+
33
+				# if the steering wheel angle in in right to the center
34
+				if(int_data > 550):
35
+					int_data = int_data - 4095
36
+					int_decimal = 1 - int_decimal 
37
+					final_data = int_data - int_decimal
38
+				else:
39
+					# put the int and the decimal together
40
+					final_data = int_data + int_decimal
41
+				data[1] = final_data
42
+
43
+			loaded_data = loaded_data[50:]
44
+			tmp_x = np.array([i[0] for i in loaded_data]).reshape([-1, params.img_height, params.img_width, params.img_channels])
45
+			tmp_y = np.array([i[1] for i in loaded_data]).reshape([-1,1])
46
+			
47
+			val_x.extend(tmp_x)
48
+			val_y.extend(tmp_y)
49
+	
50
+	return val_x, val_y
51
+
52
+		
53
+	# finish making val data
54
+
55
+	

+ 103
- 0
ver_a_0.5.1/model.py View File

@@ -0,0 +1,103 @@
1
+import tensorflow as tf
2
+import params
3
+
4
+print_layer = True
5
+
6
+x = tf.placeholder(tf.float32, shape=[None, params.network_height, params.img_width, params.img_channels])
7
+y_ = tf.placeholder(tf.float32, shape=[None, 1])
8
+
9
+keep_prob = tf.placeholder(tf.float32)
10
+
11
+x_image = tf.reshape(x, [-1, params.network_height, params.img_width, params.img_channels])
12
+
13
+network = tf.layers.batch_normalization(x_image, name="norm_0")
14
+if print_layer:
15
+	print(network)
16
+
17
+# Conv Layer # 1
18
+network = tf.layers.conv2d(network, filters=96, kernel_size = (11,11), strides=(4,4), 
19
+	padding='same', activation=tf.nn.relu, use_bias=True,name="conv_1")
20
+if print_layer:
21
+	print(network)
22
+network = tf.layers.max_pooling2d(network, pool_size=(3,3), strides=2, padding='same', 
23
+	name="m_pooling_1")
24
+if print_layer:
25
+	print(network)
26
+network = tf.layers.batch_normalization(network, name="norm_1")
27
+if print_layer:
28
+	print(network)
29
+
30
+# Conv Layer #2
31
+
32
+network = tf.layers.conv2d(network, filters=256, kernel_size = (5,5), strides=(1,1), 
33
+	padding='same', activation=tf.nn.relu, use_bias=True,name="conv_2")
34
+if print_layer:
35
+	print(network)
36
+network = tf.layers.max_pooling2d(network, pool_size=(3,3), strides=2, padding='same', 
37
+	name="m_pooling_2")
38
+if print_layer:
39
+	print(network)
40
+network = tf.layers.batch_normalization(network, name="norm_2")
41
+if print_layer:
42
+	print(network)
43
+
44
+# Conv Layer #3
45
+
46
+network = tf.layers.conv2d(network, filters=384, kernel_size = (3,3), strides=(1,1), 
47
+	padding='same', activation=tf.nn.relu, use_bias=True,name="conv_3_1")
48
+if print_layer:
49
+	print(network)
50
+network = tf.layers.conv2d(network, filters=256, kernel_size = (3,3), strides=(1,1), 
51
+	padding='same', activation=tf.nn.relu, use_bias=True,name="conv_3_2")
52
+if print_layer:
53
+	print(network)
54
+network = tf.layers.conv2d(network, filters=32, kernel_size = (3,3), strides=(1,1), 
55
+	padding='same', activation=tf.nn.relu, use_bias=True,name="conv_3_3")
56
+if print_layer:
57
+	print(network)
58
+network = tf.layers.max_pooling2d(network, pool_size=(3,3), strides=2, padding='same', 
59
+	name="m_pooling_3")
60
+if print_layer:
61
+	print(network)
62
+network = tf.layers.batch_normalization(network, name="norm_3")
63
+if print_layer:
64
+	print(network)
65
+
66
+# Flatten
67
+network = tf.reshape(network, [-1, 768], name="flatten")
68
+if print_layer:
69
+	print(network)
70
+
71
+# Dense Layer 1
72
+
73
+network = tf.layers.dense(network, 4096, activation=tf.nn.relu, name="dense_1")
74
+if print_layer:
75
+	print(network)
76
+network = tf.layers.dropout(network, rate=keep_prob, name="dropout_1")
77
+if print_layer:
78
+	print(network)
79
+
80
+# Dense Layer 2
81
+
82
+network = tf.layers.dense(network, 4096, activation=tf.nn.relu, name="dense_2")
83
+if print_layer:
84
+	print(network)
85
+network = tf.layers.dropout(network, rate=keep_prob, name="dropout_2")
86
+if print_layer:
87
+	print(network)
88
+
89
+# # Dense Layer 3
90
+
91
+# network = tf.layers.dense(network, 1000, activation=tf.nn.relu, name="dense_3")
92
+# if print_layer:
93
+# 	print(network)
94
+# network = tf.layers.dropout(network, rate=keep_prob, name="dropout_3")
95
+# if print_layer:
96
+# 	print(network)
97
+
98
+# output layer
99
+
100
+# activation=None => linear
101
+y = tf.layers.dense(network, 1, activation=None, name="output")
102
+if print_layer:
103
+	print(y)

+ 94
- 0
ver_a_0.5.1/model.py_bak View File

@@ -0,0 +1,94 @@
1
+import tensorflow as tf
2
+import params
3
+
4
+def weight_variable(shape):
5
+    initial = tf.truncated_normal(shape, stddev=0.1)
6
+    return tf.Variable(initial)
7
+
8
+def bias_variable(shape):
9
+    initial = tf.constant(0.1, shape=shape)
10
+    return tf.Variable(initial)
11
+
12
+def conv2d(x, W, stride):
13
+    return tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='VALID')
14
+
15
+x = tf.placeholder(tf.float32, shape=[None, params.img_height, params.img_width, params.img_channels])
16
+y_ = tf.placeholder(tf.float32, shape=[None, 1])
17
+keep_prob = tf.placeholder(tf.float32)
18
+
19
+x_image = tf.reshape(x, [-1, params.img_height, params.img_width, params.img_channels])
20
+# print(x_image)
21
+
22
+# first convolutional layer
23
+W_conv1 = weight_variable([5, 5, params.img_channels, 24])
24
+b_conv1 = bias_variable([24])
25
+
26
+h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1, 2) + b_conv1)
27
+print(h_conv1)
28
+
29
+# second convolutional layer
30
+W_conv2 = weight_variable([5, 5, 24, 36])
31
+b_conv2 = bias_variable([36])
32
+
33
+h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2, 2) + b_conv2)
34
+print(h_conv2)
35
+
36
+# third convolutional layer
37
+W_conv3 = weight_variable([5, 5, 36, 48])
38
+b_conv3 = bias_variable([48])
39
+
40
+h_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv3, 2) + b_conv3)
41
+print(h_conv3)
42
+
43
+# fourth convolutional layer
44
+W_conv4 = weight_variable([3, 3, 48, 64])
45
+b_conv4 = bias_variable([64])
46
+
47
+h_conv4 = tf.nn.relu(conv2d(h_conv3, W_conv4, 1) + b_conv4)
48
+print(h_conv4)
49
+
50
+# fifth convolutional layer
51
+W_conv5 = weight_variable([3, 3, 64, 64])
52
+b_conv5 = bias_variable([64])
53
+
54
+h_conv5 = tf.nn.relu(conv2d(h_conv4, W_conv5, 1) + b_conv5)
55
+print(h_conv5)
56
+
57
+# fully connected layer 1
58
+W_fc1 = weight_variable([1600, 1164])
59
+b_fc1 = bias_variable([1164])
60
+
61
+h_conv5_flat = tf.reshape(h_conv5, [-1, 1600])
62
+h_fc1 = tf.nn.relu(tf.matmul(h_conv5_flat, W_fc1) + b_fc1)
63
+
64
+h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
65
+
66
+# fully connected layer 2
67
+W_fc2 = weight_variable([1164, 100])
68
+b_fc2 = bias_variable([100])
69
+
70
+h_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
71
+
72
+h_fc2_drop = tf.nn.dropout(h_fc2, keep_prob)
73
+
74
+# fully connected layer 3
75
+W_fc3 = weight_variable([100, 50])
76
+b_fc3 = bias_variable([50])
77
+
78
+h_fc3 = tf.nn.relu(tf.matmul(h_fc2_drop, W_fc3) + b_fc3)
79
+
80
+h_fc3_drop = tf.nn.dropout(h_fc3, keep_prob)
81
+
82
+# fully connected layer 4
83
+W_fc4 = weight_variable([50, 10])
84
+b_fc4 = bias_variable([10])
85
+
86
+h_fc4 = tf.nn.relu(tf.matmul(h_fc3_drop, W_fc4) + b_fc4)
87
+
88
+h_fc4_drop = tf.nn.dropout(h_fc4, keep_prob)
89
+
90
+# output
91
+W_fc5 = weight_variable([10, 1])
92
+b_fc5 = bias_variable([1])
93
+
94
+y = tf.matmul(h_fc4_drop, W_fc5) + b_fc5

+ 1
- 0
ver_a_0.5.1/mount.sh View File

@@ -0,0 +1 @@
1
+sudo mount /dev/mmcblk1p1 /raw_data

+ 10
- 0
ver_a_0.5.1/params.py View File

@@ -0,0 +1,10 @@
1
+import os
2
+
3
+img_height = 66
4
+img_width = 256
5
+img_channels = 1
6
+network_height = 66
7
+batch = 32
8
+epoch = 1000
9
+write_summary = True
10
+save_dir = os.path.abspath('models')

+ 5
- 0
ver_a_0.5.1/result.txt View File

@@ -0,0 +1,5 @@
1
+Compare to ver_0.5 the output layer have been changed
2
+
3
+66*256*1
4
+
5
+loss : 34.4

+ 76
- 0
ver_a_0.5.1/train.py View File

@@ -0,0 +1,76 @@
1
+#!/usr/bin/env python 
2
+
3
+import os
4
+import tensorflow as tf
5
+import model
6
+import params
7
+import time
8
+import cv2
9
+import numpy as np
10
+
11
+from get_val_data import val_data
12
+from get_train_data import train_data
13
+
14
+# write_summary = params.write_summary
15
+
16
+sess = tf.InteractiveSession()
17
+
18
+loss = tf.reduce_mean(tf.square(tf.subtract(model.y_, model.y)))
19
+train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
20
+sess.run(tf.global_variables_initializer())
21
+
22
+# create a summary to monitor cost tensor
23
+# if write_summary:
24
+# 	tf.summary.scalar("loss", loss)
25
+
26
+# merge all summaries into a single op
27
+# if write_summary:
28
+# 	merged_summary_op = tf.summary.merge_all()
29
+
30
+# saver = tf.train.Saver()
31
+time_start = time.time()
32
+
33
+# op to write logs to Tensorboard
34
+# if write_summary:
35
+# 	summary_writer = tf.summary.FileWriter(params.save_dir, graph=tf.get_default_graph())
36
+
37
+
38
+# get the val data
39
+val_X, val_Y = val_data()
40
+# finish get val data
41
+
42
+file_list = os.listdir('/raw_data')
43
+for i in range(params.epoch):   
44
+	# prepare data for training
45
+	for file in file_list:
46
+		if file.endswith('.npy'):
47
+			print("Start process on file : " , file)
48
+			train_X, train_Y = train_data(file)
49
+		    	#finishing getting the training data
50
+		
51
+			# start the train on the data
52
+			batch_iteration = int(train_X.shape[0] / params.batch) + 1
53
+			for iteration in range(batch_iteration):
54
+				batch_X = train_X[iteration*params.batch:(iteration+1)*params.batch]
55
+				batch_Y = train_Y[iteration*params.batch:(iteration+1)*params.batch]
56
+
57
+				train_step.run(feed_dict={model.x: batch_X, model.y_: batch_Y, model.keep_prob: 0.8})
58
+
59
+			# write logs at every iteration
60
+			# if write_summary:
61
+			#     summary = merged_summary_op.eval(feed_dict={model.x: train_X, model.y_: train_Y, model.keep_prob: 1.0})
62
+			#     summary_writer.add_summary(summary, i)
63
+
64
+				t_loss = loss.eval(feed_dict={model.x: batch_X, model.y_: batch_Y, model.keep_prob: 1.0})
65
+				v_loss = loss.eval(feed_dict={model.x: val_X, model.y_: val_Y, model.keep_prob: 1.0})
66
+				print ("epoch {} of {}, batch {} of {}, train loss {}, val loss {}".format(i, params.epoch,iteration,batch_iteration,t_loss, v_loss))
67
+
68
+			# if (i+1) % 10 == 0:
69
+			# 	if not os.path.exists(params.save_dir):
70
+ 		# 			os.makedirs(params.save_dir)
71
+			# 	checkpoint_path = os.path.join(params.save_dir, "model.ckpt")
72
+			# 	filename = saver.save(sess, checkpoint_path)
73
+
74
+
75
+
76
+			    

Loading…
Cancel
Save