forked from udacity/self-driving-car
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request udacity#15 from udacity/yf_evaluation
Evaluation folder
- Loading branch information
Showing
8 changed files
with
569 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,31 @@ | ||
<img src="../../images/cover.png" alt="Self-Driving Car" width="800px"> | ||
|
||
## Evaluation | ||
|
||
Downlaod the model from S3 and use the appropriate script to evaluate against a bag. | ||
|
||
### Models available: | ||
|
||
#### autumn | ||
|
||
* [autumn-cnn-model-tf.meta](https://s3.amazonaws.com/udacity-sdc/steering-models/autumn/autumn-cnn-model-tf.meta) | ||
* [autumn-lstm-model-keras.json](https://s3.amazonaws.com/udacity-sdc/steering-models/autumn/autumn-lstm-model-keras.json) | ||
* [autumn-cnn-weights.ckpt](https://s3.amazonaws.com/udacity-sdc/steering-models/autumn/autumn-cnn-weights.ckpt) | ||
* [autumn-lstm-weights.hdf5](https://s3.amazonaws.com/udacity-sdc/steering-models/autumn/autumn-lstm-weights.hdf5) | ||
|
||
#### komanda | ||
* [komanda.test-subgraph.meta](https://s3.amazonaws.com/udacity-sdc/steering-models/komanda/komanda.test-subgraph.meta) | ||
* [FINE_TUNE_2-checkpoint-sdc-ch2-epoch5](https://s3.amazonaws.com/udacity-sdc/steering-models/komanda/udacity-challenge2-model/FINE_TUNE_2-checkpoint-sdc-ch2-epoch5) | ||
|
||
#### chauffeur | ||
* [cnn.json](https://s3.amazonaws.com/udacity-sdc/steering-models/chauffeur/cnn.json) | ||
* [cnn.weights](https://s3.amazonaws.com/udacity-sdc/steering-models/chauffeur/cnn.weights) | ||
* [lstm.json](https://s3.amazonaws.com/udacity-sdc/steering-models/chauffeur/lstm.json) | ||
* [lstm.weights](https://s3.amazonaws.com/udacity-sdc/steering-models/chauffeur/lstm.weights) | ||
|
||
#### rambo | ||
* [X_train_mean.np](https://s3.amazonaws.com/udacity-sdc/steering-models/rambo/X_train_mean.npy) | ||
* [final_model.hdf](https://s3.amazonaws.com/udacity-sdc/steering-models/rambo/final_model.hdf5) | ||
|
||
#### rwightman | ||
* [resnet50-50n-small-2bn.model](https://s3.amazonaws.com/udacity-sdc/steering-models/rwightman/resnet50-50n-small-2bn.model) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,115 @@ | ||
#import rospy | ||
#from steering_node import SteeringNode | ||
|
||
from collections import deque | ||
import argparse | ||
import csv | ||
import scipy.misc | ||
import cv2 | ||
|
||
import tensorflow as tf | ||
from keras import backend as K | ||
from keras.models import * | ||
from keras.layers import * | ||
from keras.layers.recurrent import LSTM | ||
|
||
from math import pi | ||
from rmse import calc_rmse | ||
from generator import gen | ||
|
||
import tensorflow as tf | ||
from keras import backend as K | ||
from keras.models import * | ||
from keras.layers import * | ||
from keras.layers.recurrent import LSTM | ||
|
||
|
||
class AutumnModel(object): | ||
def __init__(self, cnn_graph, lstm_json, cnn_weights, lstm_weights): | ||
sess = tf.InteractiveSession() | ||
saver = tf.train.import_meta_graph(cnn_graph) | ||
saver.restore(sess, args.cnn_weights) | ||
self.cnn = tf.get_default_graph() | ||
|
||
self.fc3 = self.cnn.get_tensor_by_name("fc3/mul:0") | ||
self.y = self.cnn.get_tensor_by_name("y:0") | ||
self.x = self.cnn.get_tensor_by_name("x:0") | ||
self.keep_prob = self.cnn.get_tensor_by_name("keep_prob:0") | ||
|
||
with open(lstm_json, 'r') as f: | ||
json_string = f.read() | ||
self.model = model_from_json(json_string) | ||
self.model.load_weights(lstm_weights) | ||
|
||
self.prev_image = None | ||
self.last = [] | ||
self.steps = [] | ||
|
||
def process(self, img): | ||
prev_image = self.prev_image if self.prev_image is not None else img | ||
self.prev_image = img | ||
prev = cv2.cvtColor(prev_image, cv2.COLOR_RGB2GRAY) | ||
next = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) | ||
|
||
flow = cv2.calcOpticalFlowFarneback(prev, next, None, 0.5, 3, 15, 3, 5, 1.2, 0) | ||
|
||
self.last.append(flow) | ||
|
||
if len(self.last) > 4: | ||
self.last.pop(0) | ||
|
||
weights = [1, 1, 2, 2] | ||
last = list(self.last) | ||
for x in range(len(last)): | ||
last[x] = last[x] * weights[x] | ||
|
||
avg_flow = sum(last) / sum(weights) | ||
mag, ang = cv2.cartToPolar(avg_flow[..., 0], avg_flow[..., 1]) | ||
|
||
hsv = np.zeros_like(prev_image) | ||
hsv[..., 1] = 255 | ||
hsv[..., 0] = ang * 180 / np.pi / 2 | ||
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX) | ||
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) | ||
return rgb | ||
|
||
def predict(self, img): | ||
img = self.process(img) | ||
image = scipy.misc.imresize(img[-400:], [66, 200]) / 255.0 | ||
cnn_output = self.fc3.eval(feed_dict={self.x: [image], self.keep_prob: 1.0}) | ||
self.steps.append(cnn_output) | ||
if len(self.steps) > 100: | ||
self.steps.pop(0) | ||
output = self.y.eval(feed_dict={self.x: [image], self.keep_prob: 1.0}) | ||
return output[0][0] | ||
|
||
|
||
if __name__ == '__main__': | ||
parser = argparse.ArgumentParser(description='Model Runner for team autumn') | ||
parser.add_argument('bagfile', type=str, help='Path to ROS bag') | ||
parser.add_argument('--input', '-i', action='store', dest='input_file', | ||
default='example-final.csv', help='Input model csv file name') | ||
parser.add_argument('--output', '-o', action='store', dest='output_file', | ||
default='output-lstm.csv', help='Output csv file name') | ||
parser.add_argument('--data-dir', '--data', action='store', dest='data_dir') | ||
parser.add_argument('--cnn-graph', '--cnn-meta', action='store', dest='cnn_graph', | ||
default='autumn/autumn-cnn-model-tf.meta') | ||
parser.add_argument('--lstm-json', '--lstm-meta', action='store', dest='lstm_json', | ||
default='autumn/autumn-lstm-model-keras.json') | ||
parser.add_argument('--cnn-weights', action='store', dest='cnn_weights', | ||
default='autumn/autumn-cnn-weights.ckpt') | ||
parser.add_argument('--lstm-weights', action='store', dest='lstm_weights', | ||
default='autumn/autumn-lstm-weights.hdf5') | ||
args = parser.parse_args() | ||
|
||
def make_predictor(): | ||
model = AutumnModel(args.cnn_graph, args.lstm_json, args.cnn_weights, args.lstm_weights) | ||
return lambda img: model.predict(img) | ||
|
||
def process(predictor, img): | ||
return predictor(img) | ||
|
||
model = make_predictor() | ||
|
||
print calc_rmse(lambda image_pred: model(image_pred), | ||
gen(args.bagfile)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,124 @@ | ||
|
||
import cv2 | ||
import numpy as np | ||
#import rospy | ||
from keras import backend as K | ||
from keras import metrics | ||
from keras.models import load_model | ||
from keras.models import model_from_json | ||
import argparse | ||
from collections import deque | ||
|
||
from scipy import misc | ||
import csv | ||
import os | ||
from math import pi | ||
from rmse import calc_rmse | ||
from generator import gen | ||
|
||
|
||
class ChauffeurModel(object): | ||
def __init__(self, | ||
cnn_json_path, | ||
cnn_weights_path, | ||
lstm_json_path, | ||
lstm_weights_path): | ||
|
||
self.cnn = self.load_from_json(cnn_json_path, cnn_weights_path) | ||
self.encoder = self.load_encoder(cnn_json_path, cnn_weights_path) | ||
self.lstm = self.load_from_json(lstm_json_path, lstm_weights_path) | ||
|
||
# hardcoded from final submission model | ||
self.scale = 16. | ||
self.timesteps = 100 | ||
|
||
def load_encoder(self, cnn_json_path, cnn_weights_path): | ||
model = self.load_from_json(cnn_json_path, cnn_weights_path) | ||
model.load_weights(cnn_weights_path) | ||
|
||
model.layers.pop() | ||
model.outputs = [model.layers[-1].output] | ||
model.layers[-1].outbound_nodes = [] | ||
|
||
return model | ||
|
||
def load_from_json(self, json_path, weights_path): | ||
model = model_from_json(open(json_path, 'r').read()) | ||
model.load_weights(weights_path) | ||
return model | ||
|
||
def make_cnn_only_predictor(self): | ||
def predict_fn(img): | ||
img = cv2.resize(img, (320, 240)) | ||
img = cv2.cvtColor(img, cv2.COLOR_BGR2YUV) | ||
img = img[120:240, :, :] | ||
img[:,:,0] = cv2.equalizeHist(img[:,:,0]) | ||
img = ((img-(255.0/2))/255.0) | ||
|
||
return self.cnn.predict_on_batch(img.reshape((1, 120, 320, 3)))[0, 0] / self.scale | ||
|
||
return predict_fn | ||
|
||
def make_stateful_predictor(self): | ||
steps = deque() | ||
|
||
def predict_fn(img): | ||
# preprocess image to be YUV 320x120 and equalize Y histogram | ||
img = cv2.resize(img, (320, 240)) | ||
img = cv2.cvtColor(img, cv2.COLOR_BGR2YUV) | ||
img = img[120:240, :, :] | ||
img[:,:,0] = cv2.equalizeHist(img[:,:,0]) | ||
img = ((img-(255.0/2))/255.0) | ||
|
||
# apply feature extractor | ||
img = self.encoder.predict_on_batch(img.reshape((1, 120, 320, 3))) | ||
|
||
# initial fill of timesteps | ||
if not len(steps): | ||
for _ in xrange(self.timesteps): | ||
steps.append(img) | ||
|
||
# put most recent features at end | ||
steps.popleft() | ||
steps.append(img) | ||
|
||
timestepped_x = np.empty((1, self.timesteps, img.shape[1])) | ||
for i, img in enumerate(steps): | ||
timestepped_x[0, i] = img | ||
|
||
return self.lstm.predict_on_batch(timestepped_x)[0, 0] / self.scale | ||
|
||
return predict_fn | ||
|
||
def rmse(y_true, y_pred): | ||
'''Calculates RMSE | ||
''' | ||
return K.sqrt(K.mean(K.square(y_pred - y_true))) | ||
|
||
metrics.rmse = rmse | ||
|
||
if __name__ == '__main__': | ||
parser = argparse.ArgumentParser(description='Model Runner for team chauffeur') | ||
parser.add_argument('bagfile', type=str, help='Path to ROS bag') | ||
parser.add_argument('cnn_json_path', type=str, help='Path to cnn encoding json') | ||
parser.add_argument('cnn_weights_path', type=str, help='Path to cnn encoding weights') | ||
parser.add_argument('lstm_json_path', type=str, help='Path to lstm encoding json') | ||
parser.add_argument('lstm_weights_path', type=str, help='Path to lstm encoding weights') | ||
args = parser.parse_args() | ||
|
||
def make_predictor(): | ||
K.set_learning_phase(0) | ||
model = ChauffeurModel( | ||
args.cnn_json_path, | ||
args.cnn_weights_path, | ||
args.lstm_json_path, | ||
args.lstm_weights_path) | ||
return model.make_stateful_predictor() | ||
|
||
def process(predictor, img): | ||
return predictor(img) | ||
|
||
model = make_predictor() | ||
|
||
print calc_rmse(lambda image_pred: model(image_pred), | ||
gen(args.bagfile)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,40 @@ | ||
|
||
import rosbag | ||
from StringIO import StringIO | ||
from scipy import misc | ||
import numpy as np | ||
|
||
KEY_NAME = { | ||
'/vehicle/steering_report': 'steering', | ||
'/center_camera/image_color/c': 'image', | ||
} | ||
|
||
def update(msg, d): | ||
key = KEY_NAME.get(msg.topic) | ||
if key is None: return | ||
d[key] = msg | ||
|
||
|
||
def gen(bag): | ||
print 'Getting bag' | ||
bag = rosbag.Bag(bag) | ||
print 'Got bag' | ||
|
||
image = {} | ||
total = bag.get_message_count() | ||
count = 0 | ||
for e in bag.read_messages(): | ||
count += 1 | ||
if count % 10000 == 0: | ||
print count, '/', total | ||
if e.topic in ['/center_camera/image_color/compressed']: | ||
if len({'steering'} - set(image.keys())): | ||
continue | ||
if image['steering'].message.speed < 5.: continue | ||
s = StringIO(e.message.data) | ||
img = misc.imread(s) | ||
yield img, np.copy(img), image['steering'].message.speed, | ||
image['steering'].message.steering_wheel_angle, e.timestamp.to_nsec() | ||
last_ts = e.timestamp.to_nsec() | ||
else: | ||
update(e, image) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,76 @@ | ||
#!/usr/bin/env python | ||
|
||
""" | ||
Udacity self-driving car challenge 2 | ||
Team komanda steering model | ||
Author: Ilya Edrenkin, [email protected] | ||
""" | ||
|
||
import argparse | ||
import tempfile | ||
from collections import deque | ||
|
||
import cv2 | ||
import numpy as np | ||
import tensorflow as tf | ||
|
||
from math import pi | ||
from rmse import calc_rmse | ||
from generator import gen | ||
import time | ||
|
||
|
||
class KomandaModel(object): | ||
def __init__(self, checkpoint_dir, metagraph_file): | ||
self.graph =tf.Graph() | ||
self.LEFT_CONTEXT = 5 # TODO remove hardcode; store it in the graph | ||
with self.graph.as_default(): | ||
saver = tf.train.import_meta_graph(metagraph_file) | ||
ckpt = tf.train.latest_checkpoint(checkpoint_dir) | ||
self.session = tf.Session(graph=self.graph) | ||
saver.restore(self.session, ckpt) | ||
self.input_images = deque() # will be of size self.LEFT_CONTEXT + 1 | ||
self.internal_state = [] # will hold controller_{final -> initial}_state_{0,1,2} | ||
|
||
# TODO controller state names should be stored in the graph | ||
self.input_tensors = map(self.graph.get_tensor_by_name, ["input_images:0", "controller_initial_state_0:0", "controller_initial_state_1:0", "controller_initial_state_2:0"]) | ||
self.output_tensors = map(self.graph.get_tensor_by_name, ["output_steering:0", "controller_final_state_0:0", "controller_final_state_1:0", "controller_final_state_2:0"]) | ||
|
||
def predict(self, img): | ||
if len(self.input_images) == 0: | ||
self.input_images += [img] * (self.LEFT_CONTEXT + 1) | ||
else: | ||
self.input_images.popleft() | ||
self.input_images.append(img) | ||
input_images_tensor = np.stack(self.input_images) | ||
if not self.internal_state: | ||
feed_dict = {self.input_tensors[0] : input_images_tensor} | ||
else: | ||
feed_dict = dict(zip(self.input_tensors, [input_images_tensor] + self.internal_state)) | ||
steering, c0, c1, c2 = self.session.run(self.output_tensors, feed_dict=feed_dict) | ||
self.internal_state = [c0, c1, c2] | ||
return steering[0][0] | ||
|
||
|
||
if __name__ == '__main__': | ||
parser = argparse.ArgumentParser(description='Model Runner for team komanda') | ||
parser.add_argument('bagfile', type=str, help='Path to ROS bag') | ||
parser.add_argument('metagraph_file', type=str, help='Path to the metagraph file') | ||
parser.add_argument('checkpoint_dir', type=str, help='Path to the checkpoint dir') | ||
parser.add_argument('--debug_print', dest='debug_print', action='store_true', help='Debug print of predicted steering commands') | ||
args = parser.parse_args() | ||
|
||
def make_predictor(): | ||
model = KomandaModel( | ||
checkpoint_dir=args.checkpoint_dir, | ||
metagraph_file=args.metagraph_file) | ||
return lambda img: model.predict(img) | ||
|
||
def process(predictor, img): | ||
steering = predictor(img) | ||
if args.debug_print: print steering | ||
return steering | ||
model = make_predictor() | ||
|
||
print calc_rmse(lambda image_pred: model(image_pred), | ||
gen(args.bagfile)) |
Oops, something went wrong.