-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathforwardSimulate.m
68 lines (55 loc) · 1.25 KB
/
forwardSimulate.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
function [observation,reward,newState] = forwardSimulate(state, action)
newState = state;
x = state.robotPose(1);
y = state.robotPose(2);
theta = state.robotPose(3);
u_t_tmp = [ 0 0 0];
dist = 0;
actionReward = -1;
switch action
case 1
dist = 1;
actionReward = 1;
case 2
theta = theta -1;
case 3
theta = theta + 1;
case 4
dist = 2;
actionReward = 0;
end
if theta > 4
theta = 1;
elseif theta < 1
theta = 4;
end
switch theta
case 1
u_t_tmp = [0 dist 0];
case 2
u_t_tmp = [dist 0 0];
case 3
u_t_tmp = [0 -dist 0];
case 4
u_t_tmp = [-dist 0 0];
end
newState.robotPose = state.robotPose + u_t_tmp;
newState.robotPose(3) = theta;
observation = generateObservation(newState);
newState = particle_filter(newState,u_t_tmp,observation);
%Reward Mapping:
visionReward = sum(newState.seenCells(find(observation >= 0)));
newState.seenCells(find(observation >= 0)) = 0;
x = newState.robotPose(1);
y = newState.robotPose(2);
if x < 1 || y < 1 || x > 100 || y > 100
reward = -1000;
return;
end
if state.physicalMap(y,x) > 0.1
impactReward = -100;
else
impactReward = 0;
end
reward = visionReward+impactReward+actionReward;
end