-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
16 changed files
with
3,360 additions
and
3,402 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,6 +1,317 @@ | ||
{ | ||
"cells": [], | ||
"metadata": {}, | ||
"cells": [ | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 1, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"(540, 960, 3)\n", | ||
"Currently processing image: ./test_images/challenge_Moment.jpg\n", | ||
"(540, 960, 3)\n", | ||
"Currently processing image: ./test_images/solidWhiteCurve.jpg\n", | ||
"(540, 960, 3)\n", | ||
"Currently processing image: ./test_images/solidWhiteRight.jpg\n", | ||
"(540, 960, 3)\n", | ||
"Currently processing image: ./test_images/solidYellowCurve.jpg\n", | ||
"(540, 960, 3)\n", | ||
"Currently processing image: ./test_images/solidYellowCurve2.jpg\n", | ||
"(540, 960, 3)\n", | ||
"Currently processing image: ./test_images/solidYellowLeft.jpg\n", | ||
"(540, 960, 3)\n", | ||
"Currently processing image: ./test_images/whiteCarLaneSwitch.jpg\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"import matplotlib.pyplot as plt\n", | ||
"import cv2\n", | ||
"import os\n", | ||
"import array\n", | ||
"from os.path import join, basename\n", | ||
"import matplotlib.image as mpimg\n", | ||
"from lane_finding import lane_finding_pipeline\n", | ||
"import numpy as np\n", | ||
"\n", | ||
"\n", | ||
"# Import everything needed to edit/save/watch video clips\n", | ||
"from moviepy.editor import VideoFileClip\n", | ||
"from IPython.display import HTML\n", | ||
"\n", | ||
"if __name__ == '__main__':\n", | ||
"\n", | ||
" verbose = True\n", | ||
" #if verbose:\n", | ||
" #plt.ion()\n", | ||
" # figManager = plt.get_current_fig_manager()\n", | ||
" #figManager.window.showMaximized()\n", | ||
"\n", | ||
" input_image_list = os.listdir(\"test_images/\")\n", | ||
" input_image_dir = \"./test_images/\"\n", | ||
" input_image_list = [join(input_image_dir, name) for name in input_image_list]\n", | ||
" image_out_dir = 'test_images_output'\n", | ||
"\n", | ||
"\n", | ||
" for image in input_image_list:\n", | ||
" # Read in the image\n", | ||
" image_raw = mpimg.imread(image)\n", | ||
" image_raw_resized = cv2.resize(image_raw, dsize=(960,540), interpolation=cv2.INTER_CUBIC)\n", | ||
" \n", | ||
" imshape = image_raw_resized.shape\n", | ||
" print (imshape)\n", | ||
" imshape = image_raw.shape\n", | ||
" vertices = np.array([[(100,imshape[0]),(450, 320), (520,320), (925,imshape[0])]], dtype=np.int32)\n", | ||
" img_out = lane_finding_pipeline(image_raw, vertices)\n", | ||
" out_path = join(image_out_dir, basename(image))\n", | ||
" mpimg.imsave(out_path, img_out)\n", | ||
"\n", | ||
" print('Currently processing image: {}'.format(image))\n", | ||
" \n", | ||
" \n", | ||
"\n", | ||
" \n", | ||
" \n", | ||
" def process_image(image):\n", | ||
" # NOTE: The output you return should be a color image (3 channel) for processing video below\n", | ||
" # TODO: put your pipeline here,\n", | ||
" # you should return the final output (image where lines are drawn on lanes)\n", | ||
" \n", | ||
" image = cv2.resize(image, dsize=(960,540), interpolation=cv2.INTER_CUBIC)\n", | ||
" \n", | ||
" #vertices = np.array([[(180,imshape[0]-50),(480, 320), (530,320), (825,imshape[0]-50)]], dtype=np.int32)\n", | ||
" vertices = np.array([[(100,imshape[0]),(450, 320), (520,320), (925,imshape[0])]], dtype=np.int32)\n", | ||
" result = lane_finding_pipeline(image, vertices)\n", | ||
"\n", | ||
" return result\n", | ||
" \n", | ||
" \n", | ||
" \n", | ||
" " | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 2, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"[MoviePy] >>>> Building video test_videos_output/solidWhiteRight.mp4\n", | ||
"[MoviePy] Writing video test_videos_output/solidWhiteRight.mp4\n" | ||
] | ||
}, | ||
{ | ||
"name": "stderr", | ||
"output_type": "stream", | ||
"text": [ | ||
"100%|█████████▉| 221/222 [00:11<00:00, 19.92it/s]\n" | ||
] | ||
}, | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"[MoviePy] Done.\n", | ||
"[MoviePy] >>>> Video ready: test_videos_output/solidWhiteRight.mp4 \n", | ||
"\n", | ||
"CPU times: user 4.29 s, sys: 670 ms, total: 4.96 s\n", | ||
"Wall time: 12.9 s\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"white_output = 'test_videos_output/solidWhiteRight.mp4'\n", | ||
"## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n", | ||
"## To do so add .subclip(start_second,end_second) to the end of the line below\n", | ||
"## Where start_second and end_second are integer values representing the start and end of the subclip\n", | ||
"## You may also uncomment the following line for a subclip of the first 5 seconds\n", | ||
"#clip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\").subclip(0,2)\n", | ||
"clip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\")\n", | ||
"white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!\n", | ||
"%time white_clip.write_videofile(white_output, audio=False)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 3, | ||
"metadata": { | ||
"scrolled": true | ||
}, | ||
"outputs": [ | ||
{ | ||
"data": { | ||
"text/html": [ | ||
"\n", | ||
" <video width=\"960\" height=\"540\" controls>\n", | ||
" <source src=\"test_videos_output/solidWhiteRight.mp4\">\n", | ||
" </video>\n", | ||
" " | ||
], | ||
"text/plain": [ | ||
"<IPython.core.display.HTML object>" | ||
] | ||
}, | ||
"execution_count": 3, | ||
"metadata": {}, | ||
"output_type": "execute_result" | ||
} | ||
], | ||
"source": [ | ||
"HTML(\"\"\"\n", | ||
" <video width=\"960\" height=\"540\" controls>\n", | ||
" <source src=\"{0}\">\n", | ||
" </video>\n", | ||
" \"\"\".format(white_output))" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 4, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"def process_image(image):\n", | ||
" # NOTE: The output you return should be a color image (3 channel) for processing video below\n", | ||
" # TODO: put your pipeline here,\n", | ||
" # you should return the final output (image where lines are drawn on lanes)\n", | ||
" \n", | ||
" image = cv2.resize(image, dsize=(960,540), interpolation=cv2.INTER_CUBIC)\n", | ||
" \n", | ||
" vertices = np.array([[(180,imshape[0]-50),(370, 360), (600,360), (825,imshape[0]-50)]], dtype=np.int32)\n", | ||
" result = lane_finding_pipeline(image, vertices)\n", | ||
"\n", | ||
" return result" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 5, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"[MoviePy] >>>> Building video test_videos_output/challenge.mp4\n", | ||
"[MoviePy] Writing video test_videos_output/challenge.mp4\n" | ||
] | ||
}, | ||
{ | ||
"name": "stderr", | ||
"output_type": "stream", | ||
"text": [ | ||
"100%|██████████| 251/251 [00:15<00:00, 18.40it/s]\n" | ||
] | ||
}, | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"[MoviePy] Done.\n", | ||
"[MoviePy] >>>> Video ready: test_videos_output/challenge.mp4 \n", | ||
"\n", | ||
"CPU times: user 7.35 s, sys: 700 ms, total: 8.05 s\n", | ||
"Wall time: 16.7 s\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"challenge_output = 'test_videos_output/challenge.mp4'\n", | ||
"## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n", | ||
"## To do so add .subclip(start_second,end_second) to the end of the line below\n", | ||
"## Where start_second and end_second are integer values representing the start and end of the subclip\n", | ||
"## You may also uncomment the following line for a subclip of the first 5 seconds\n", | ||
"#clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(4,6)\n", | ||
"clip3 = VideoFileClip('test_videos/challenge.mp4')\n", | ||
"challenge_clip = clip3.fl_image(process_image)\n", | ||
"%time challenge_clip.write_videofile(challenge_output, audio=False)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 6, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"data": { | ||
"text/html": [ | ||
"\n", | ||
"<video width=\"960\" height=\"540\" controls>\n", | ||
" <source src=\"test_videos_output/challenge.mp4\">\n", | ||
"</video>\n" | ||
], | ||
"text/plain": [ | ||
"<IPython.core.display.HTML object>" | ||
] | ||
}, | ||
"execution_count": 6, | ||
"metadata": {}, | ||
"output_type": "execute_result" | ||
} | ||
], | ||
"source": [ | ||
"HTML(\"\"\"\n", | ||
"<video width=\"960\" height=\"540\" controls>\n", | ||
" <source src=\"{0}\">\n", | ||
"</video>\n", | ||
"\"\"\".format(challenge_output))" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"image = mpimg.imread('./test_images/challenge_Moment.jpg')\n", | ||
"image_raw_resized = cv2.resize(image, dsize=(960,540), interpolation=cv2.INTER_CUBIC)\n", | ||
"\n", | ||
"imshape = image_raw_resized.shape\n", | ||
"print (imshape)\n", | ||
"vertices = np.array([[(180,imshape[0]-50),(370, 360), (600,360), (825,imshape[0]-50)]], dtype=np.int32)\n", | ||
"img_out = lane_finding_pipeline(image_raw_resized, vertices)\n", | ||
"out_path = './test_images_output/challenge_Moment.jpg'\n", | ||
"mpimg.imsave(out_path, img_out)\n", | ||
"\n", | ||
"print('Currently processing image: {}'.format('challenge_Moment.jpg'))" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [] | ||
} | ||
], | ||
"metadata": { | ||
"kernelspec": { | ||
"display_name": "Python 3", | ||
"language": "python", | ||
"name": "python3" | ||
}, | ||
"language_info": { | ||
"codemirror_mode": { | ||
"name": "ipython", | ||
"version": 3 | ||
}, | ||
"file_extension": ".py", | ||
"mimetype": "text/x-python", | ||
"name": "python", | ||
"nbconvert_exporter": "python", | ||
"pygments_lexer": "ipython3", | ||
"version": "3.5.2" | ||
}, | ||
"widgets": { | ||
"state": {}, | ||
"version": "1.1.2" | ||
} | ||
}, | ||
"nbformat": 4, | ||
"nbformat_minor": 2 | ||
} |
Oops, something went wrong.