-
Notifications
You must be signed in to change notification settings - Fork 0
/
studentMainP1P2.py
187 lines (172 loc) · 7.47 KB
/
studentMainP1P2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
# ----------
# Background
#
# A robotics company named Trax has created a line of small self-driving robots
# designed to autonomously traverse desert environments in search of undiscovered
# water deposits.
#
# A Traxbot looks like a small tank. Each one is about half a meter long and drives
# on two continuous metal tracks. In order to maneuver itself, a Traxbot can do one
# of two things: it can drive in a straight line or it can turn. So to make a
# right turn, A Traxbot will drive forward, stop, turn 90 degrees, then continue
# driving straight.
#
# This series of questions involves the recovery of a rogue Traxbot. This bot has
# gotten lost somewhere in the desert and is now stuck driving in an almost-circle: it has
# been repeatedly driving forward by some step size, stopping, turning a certain
# amount, and repeating this process... Luckily, the Traxbot is still sending all
# of its sensor data back to headquarters.
#
# In this project, we will start with a simple version of this problem and
# gradually add complexity. By the end, you will have a fully articulated
# plan for recovering the lost Traxbot.
#
# ----------
# Part One
#
# Let's start by thinking about circular motion (well, really it's polygon motion
# that is close to circular motion). Assume that Traxbot lives on
# an (x, y) coordinate plane and (for now) is sending you PERFECTLY ACCURATE sensor
# measurements.
#
# With a few measurements you should be able to figure out the step size and the
# turning angle that Traxbot is moving with.
# With these two pieces of information, you should be able to
# write a function that can predict Traxbot's next location.
#
# You can use the robot class that is already written to make your life easier.
# You should re-familiarize yourself with this class, since some of the details
# have changed.
#
# ----------
# YOUR JOB
#
# Complete the estimate_next_pos function. You will probably want to use
# the OTHER variable to keep track of information about the runaway robot.
#
# ----------
# GRADING
#
# We will make repeated calls to your estimate_next_pos function. After
# each call, we will compare your estimated position to the robot's true
# position. As soon as you are within 0.01 stepsizes of the true position,
# you will be marked correct and we will tell you how many steps it took
# before your function successfully located the target bot.
from matrix import *
# These import steps give you access to libraries which you may (or may
# not) want to use.
from robot import *
# This is the function you have to write. The argument 'measurement' is a
# single (x, y) point. This function will have to be called multiple
# times before you have enough information to accurately predict the
# next position. The OTHER variable that your function returns will be
# passed back to your function the next time it is called. You can use
# this to keep track of important information over time.
def estimate_next_pos(measurement, OTHER=None):
"""Estimate the next (x, y) position of the wandering Traxbot
based on noisy (x, y) measurements."""
if OTHER is None:
OTHER = []
x_old = measurement[0]
y_old = measurement[1]
else:
x_old = OTHER[-1][0]
y_old = OTHER[-1][1]
x = measurement[0]
y = measurement[1]
if len(OTHER) >= 3:
x_old2 = OTHER[-2][0]
y_old2 = OTHER[-2][1]
else:
x_old2 = x_old
y_old2 = y_old
bearing = atan2(y - y_old, x - x_old)
bearing_old = atan2(y_old - y_old2, x_old - x_old2)
theta = bearing - bearing_old
d = sqrt((y - y_old) ** 2 + (x - x_old) ** 2)
x_new = x + d * cos(theta + bearing)
y_new = y + d * sin(theta + bearing)
xy_estimate = (x_new, y_new)
# You must return xy_estimate (x, y), and OTHER (even if it is None)
# in this order for grading purposes.
OTHER.append(measurement)
return xy_estimate, OTHER
# A helper function you may find useful.
def distance_between(point1, point2):
"""Computes distance between point1 and point2. Points are (x, y) pairs."""
x1, y1 = point1
x2, y2 = point2
return sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
# This is here to give you a sense for how we will be running and grading
# your code. Note that the OTHER variable allows you to store any
# information that you want.
def demo_grading(estimate_next_pos_fcn, target_bot, OTHER=None):
localized = False
distance_tolerance = 0.01 * target_bot.distance
ctr = 0
# if you haven't localized the target bot, make a guess about the next
# position, then we move the bot and compare your guess to the true
# next position. When you are close enough, we stop checking.
# For Visualization
import turtle # You need to run this locally to use the turtle module
window = turtle.Screen()
window.bgcolor('white')
size_multiplier = 25.0 # change Size of animation
broken_robot = turtle.Turtle()
broken_robot.shape('turtle')
broken_robot.color('green')
broken_robot.resizemode('user')
broken_robot.shapesize(0.1, 0.1, 0.1)
measured_broken_robot = turtle.Turtle()
measured_broken_robot.shape('circle')
measured_broken_robot.color('red')
measured_broken_robot.resizemode('user')
measured_broken_robot.shapesize(0.1, 0.1, 0.1)
prediction = turtle.Turtle()
prediction.shape('arrow')
prediction.color('blue')
prediction.resizemode('user')
prediction.shapesize(0.1, 0.1, 0.1)
prediction.penup()
broken_robot.penup()
measured_broken_robot.penup()
# End of Visualization
while not localized and ctr <= 1000:
ctr += 1
measurement = target_bot.sense()
position_guess, OTHER = estimate_next_pos_fcn(measurement, OTHER)
target_bot.move_in_circle()
true_position = (target_bot.x, target_bot.y)
error = distance_between(position_guess, true_position)
if error <= distance_tolerance:
print "You got it right! It took you ", ctr, " steps to localize."
localized = True
if ctr == 1000:
print "Sorry, it took you too many steps to localize the target."
# More Visualization
measured_broken_robot.setheading(target_bot.heading * 180 / pi)
measured_broken_robot.goto(measurement[0] * size_multiplier, measurement[1] * size_multiplier - 200)
measured_broken_robot.stamp()
broken_robot.setheading(target_bot.heading * 180 / pi)
broken_robot.goto(target_bot.x * size_multiplier, target_bot.y * size_multiplier - 200)
broken_robot.stamp()
prediction.setheading(target_bot.heading * 180 / pi)
prediction.goto(position_guess[0] * size_multiplier, position_guess[1] * size_multiplier - 200)
prediction.stamp()
# End of Visualization
return localized
# This is a demo for what a strategy could look like. This one isn't very good.
def naive_next_pos(measurement, OTHER=None):
"""This strategy records the first reported position of the target and
assumes that eventually the target bot will eventually return to that
position, so it always guesses that the first position will be the next."""
if not OTHER: # this is the first measurement
OTHER = measurement
xy_estimate = OTHER
return xy_estimate, OTHER
# This is how we create a target bot. Check the robot.py file to understand
# How the robot class behaves.
test_target = robot(2.1, 4.3, 0.5, 2 * pi / 34.0, 1.5)
measurement_noise = 0.05 * test_target.distance
test_target.set_noise(0.0, 0.0, measurement_noise)
demo_grading(estimate_next_pos, test_target)