diff --git a/notebooks/3_informed_search/Picture1.png b/notebooks/3_informed_search/Picture1.png new file mode 100644 index 00000000..38be2686 Binary files /dev/null and b/notebooks/3_informed_search/Picture1.png differ diff --git a/notebooks/3_informed_search/Picture10.png b/notebooks/3_informed_search/Picture10.png new file mode 100644 index 00000000..cf576495 Binary files /dev/null and b/notebooks/3_informed_search/Picture10.png differ diff --git a/notebooks/3_informed_search/Picture11.png b/notebooks/3_informed_search/Picture11.png new file mode 100644 index 00000000..632e0d44 Binary files /dev/null and b/notebooks/3_informed_search/Picture11.png differ diff --git a/notebooks/3_informed_search/Picture12.png b/notebooks/3_informed_search/Picture12.png new file mode 100644 index 00000000..a11dd9f8 Binary files /dev/null and b/notebooks/3_informed_search/Picture12.png differ diff --git a/notebooks/3_informed_search/Picture13.png b/notebooks/3_informed_search/Picture13.png new file mode 100644 index 00000000..de661f79 Binary files /dev/null and b/notebooks/3_informed_search/Picture13.png differ diff --git a/notebooks/3_informed_search/Picture14.png b/notebooks/3_informed_search/Picture14.png new file mode 100644 index 00000000..a82b6f9e Binary files /dev/null and b/notebooks/3_informed_search/Picture14.png differ diff --git a/notebooks/3_informed_search/Picture15.png b/notebooks/3_informed_search/Picture15.png new file mode 100644 index 00000000..0276b5fd Binary files /dev/null and b/notebooks/3_informed_search/Picture15.png differ diff --git a/notebooks/3_informed_search/Picture16.png b/notebooks/3_informed_search/Picture16.png new file mode 100644 index 00000000..b03292dd Binary files /dev/null and b/notebooks/3_informed_search/Picture16.png differ diff --git a/notebooks/3_informed_search/Picture17.png b/notebooks/3_informed_search/Picture17.png new file mode 100644 index 00000000..aa134df8 Binary files /dev/null and b/notebooks/3_informed_search/Picture17.png differ diff --git a/notebooks/3_informed_search/Picture18.png b/notebooks/3_informed_search/Picture18.png new file mode 100644 index 00000000..0953bb88 Binary files /dev/null and b/notebooks/3_informed_search/Picture18.png differ diff --git a/notebooks/3_informed_search/Picture19.png b/notebooks/3_informed_search/Picture19.png new file mode 100644 index 00000000..f6cdcad3 Binary files /dev/null and b/notebooks/3_informed_search/Picture19.png differ diff --git a/notebooks/3_informed_search/Picture2.png b/notebooks/3_informed_search/Picture2.png new file mode 100644 index 00000000..6cc985ac Binary files /dev/null and b/notebooks/3_informed_search/Picture2.png differ diff --git a/notebooks/3_informed_search/Picture20.png b/notebooks/3_informed_search/Picture20.png new file mode 100644 index 00000000..77792187 Binary files /dev/null and b/notebooks/3_informed_search/Picture20.png differ diff --git a/notebooks/3_informed_search/Picture21.png b/notebooks/3_informed_search/Picture21.png new file mode 100644 index 00000000..90e0c8c2 Binary files /dev/null and b/notebooks/3_informed_search/Picture21.png differ diff --git a/notebooks/3_informed_search/Picture3.png b/notebooks/3_informed_search/Picture3.png new file mode 100644 index 00000000..51ebe74b Binary files /dev/null and b/notebooks/3_informed_search/Picture3.png differ diff --git a/notebooks/3_informed_search/Picture4.gif b/notebooks/3_informed_search/Picture4.gif new file mode 100644 index 00000000..849afb70 Binary files /dev/null and b/notebooks/3_informed_search/Picture4.gif differ diff --git a/notebooks/3_informed_search/Picture5.png b/notebooks/3_informed_search/Picture5.png new file mode 100644 index 00000000..02233ac9 Binary files /dev/null and b/notebooks/3_informed_search/Picture5.png differ diff --git a/notebooks/3_informed_search/Picture6.png b/notebooks/3_informed_search/Picture6.png new file mode 100644 index 00000000..1be23812 Binary files /dev/null and b/notebooks/3_informed_search/Picture6.png differ diff --git a/notebooks/3_informed_search/Picture7.png b/notebooks/3_informed_search/Picture7.png new file mode 100644 index 00000000..b7911451 Binary files /dev/null and b/notebooks/3_informed_search/Picture7.png differ diff --git a/notebooks/3_informed_search/Picture8.png b/notebooks/3_informed_search/Picture8.png new file mode 100644 index 00000000..cdfce030 Binary files /dev/null and b/notebooks/3_informed_search/Picture8.png differ diff --git a/notebooks/3_informed_search/Picture9.png b/notebooks/3_informed_search/Picture9.png new file mode 100644 index 00000000..c5360912 Binary files /dev/null and b/notebooks/3_informed_search/Picture9.png differ diff --git a/notebooks/3_informed_search/Screenshot 2021-03-14 130441.png b/notebooks/3_informed_search/Screenshot 2021-03-14 130441.png deleted file mode 100644 index 4087c974..00000000 Binary files a/notebooks/3_informed_search/Screenshot 2021-03-14 130441.png and /dev/null differ diff --git a/notebooks/3_informed_search/Screenshot 2021-03-14 134643.png b/notebooks/3_informed_search/Screenshot 2021-03-14 134643.png deleted file mode 100644 index be9a83f9..00000000 Binary files a/notebooks/3_informed_search/Screenshot 2021-03-14 134643.png and /dev/null differ diff --git a/notebooks/3_informed_search/Screenshot 2021-03-14 140555.png b/notebooks/3_informed_search/Screenshot 2021-03-14 140555.png deleted file mode 100644 index 2c44fa45..00000000 Binary files a/notebooks/3_informed_search/Screenshot 2021-03-14 140555.png and /dev/null differ diff --git a/notebooks/3_informed_search/Screenshot 2021-03-14 142505.png b/notebooks/3_informed_search/Screenshot 2021-03-14 142505.png deleted file mode 100644 index 5c36bcce..00000000 Binary files a/notebooks/3_informed_search/Screenshot 2021-03-14 142505.png and /dev/null differ diff --git a/notebooks/3_informed_search/Screenshot 2021-03-14 201420.png b/notebooks/3_informed_search/Screenshot 2021-03-14 201420.png deleted file mode 100644 index 3549fd07..00000000 Binary files a/notebooks/3_informed_search/Screenshot 2021-03-14 201420.png and /dev/null differ diff --git a/notebooks/3_informed_search/Screenshot 2021-03-14 205043.png b/notebooks/3_informed_search/Screenshot 2021-03-14 205043.png deleted file mode 100644 index fb38b3f7..00000000 Binary files a/notebooks/3_informed_search/Screenshot 2021-03-14 205043.png and /dev/null differ diff --git a/notebooks/3_informed_search/Screenshot 2021-03-14 211426.png b/notebooks/3_informed_search/Screenshot 2021-03-14 211426.png deleted file mode 100644 index 787387e8..00000000 Binary files a/notebooks/3_informed_search/Screenshot 2021-03-14 211426.png and /dev/null differ diff --git a/notebooks/3_informed_search/Screenshot 2021-03-14 212456.png b/notebooks/3_informed_search/Screenshot 2021-03-14 212456.png deleted file mode 100644 index 02e75adb..00000000 Binary files a/notebooks/3_informed_search/Screenshot 2021-03-14 212456.png and /dev/null differ diff --git a/notebooks/3_informed_search/Screenshot 2021-03-14 215803.png b/notebooks/3_informed_search/Screenshot 2021-03-14 215803.png deleted file mode 100644 index a497492d..00000000 Binary files a/notebooks/3_informed_search/Screenshot 2021-03-14 215803.png and /dev/null differ diff --git a/notebooks/3_informed_search/a.png b/notebooks/3_informed_search/a.png deleted file mode 100644 index f27cebde..00000000 Binary files a/notebooks/3_informed_search/a.png and /dev/null differ diff --git a/notebooks/3_informed_search/b.png b/notebooks/3_informed_search/b.png deleted file mode 100644 index 5b37f5d2..00000000 Binary files a/notebooks/3_informed_search/b.png and /dev/null differ diff --git a/notebooks/3_informed_search/c.png b/notebooks/3_informed_search/c.png deleted file mode 100644 index dbffc382..00000000 Binary files a/notebooks/3_informed_search/c.png and /dev/null differ diff --git a/notebooks/3_informed_search/example1.png b/notebooks/3_informed_search/example1.png deleted file mode 100644 index cbedd24b..00000000 Binary files a/notebooks/3_informed_search/example1.png and /dev/null differ diff --git a/notebooks/3_informed_search/example2.png b/notebooks/3_informed_search/example2.png deleted file mode 100644 index 627b09ab..00000000 Binary files a/notebooks/3_informed_search/example2.png and /dev/null differ diff --git a/notebooks/3_informed_search/index.ipynb b/notebooks/3_informed_search/index.ipynb deleted file mode 100644 index ae73e0b2..00000000 --- a/notebooks/3_informed_search/index.ipynb +++ /dev/null @@ -1,539 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Informed Search\n", - "\n", - "Informed search is an approach to solving problems with a start state to a goal state, that is based on some mathematical concepts that estimate our distance to a goal state and inform us about some of following states. \n", - "\n", - "This approach needs a function called “Heuristic”, that estimates the distance to the goal state from the current state, so that it can help us find an optimal way (way with the least cost) to reach the goal. This function can be considered based on different points of view and logical explanation of the approximal distance to goal state in the problem. Such as the following example that we can consider the heuristic function based on two points of view (Euclidean distance and Manhattan distance).\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![1](p11.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Greedy Search\n", - "Greedy search is an approach for finding the best way to goal state considering that in each step we choose the state that is the nearest state to goal state. Therefore in such problem it is so important that the heuristic function be exact to an acceptable extent so that the estimation of distance from goal do not end in any wrong decisions. For instance, the following figure shows a situation that our heuristic (Manhattan and Euclidean distance) with faces trouble in finding the best route to goal. The best route is through B to goal, but the greedy search with the heuristic chooses the route to goal that passes through A.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![2](p2.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In common cases greedy search can lead to an optimal (or a suboptimal) goal as shown in fig 3. But in worst case (like badly-guided DFS) can explore every state except the goal state or can end in loops if cycle check is not considered , therefore greedy search is not complete generally. But in cases with finite states or cycle checks it is complete." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![3](p3.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Greedy search does not necessarily end in best way to goal (as mentioned above) , so it is not an optimal search.\n", - "\n", - "Suppose that in a greedy search , with the maximum branching factor is b and the depth of search tree is m , in each step maximumly b states get expanded and the best state with the least heuristic is selected to get expanded into maximumly b following states. This process repeats m times (depth of search tree) so the time complexity and Space complexity of the search would be O(b^m) at the most- all nodes are kept in memory. But good heuristic can dramatically improve the time and memory complexity.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# A* Search\n", - "\n", - "A* search was introduced by Hart ,Nilsson , and Rafael in 1968. This search is based on both sum of costs from start to a state n g(n) ,and the heuristic function of a state n to goal state h(n) ( therefor h(goal)=0). The idea is that in each step we select the state with the least value of f(n)=g(n)+h(n).\n", - "\n", - "Suppose the problem is to find the best route from Arad to Bucharest. In fig 5 the map and the distance between cities are shown. In fig 6 the search tree is shown, bellow each node f(n)=g(n)+h(n) is written. You can see that how the A* search lead to the best route.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![4](p4.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![5](p5.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Fig 6: A*search tree of best route from Arad to Bucharest\n", - "\n", - "For following topics , first we need to define two characteristics for a heuristic function h : Admissibility and Monotonicity\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 1)\tAdmissibility:\n", - "\n", - "a heuristic function h is admissible if for evey node n in search tree, h(n) is less than the optimal value remaining. In fig 7 we see the difference between an admissible and a non-admissible heuristic function." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![6](p6.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 2) Monotonicity: \n", - " \n", - "a heuristic function h is monotonic if for evey node n in search tree, f(n)=g(n)+h(n) be additive while we find the best way from start node to goal node. In fig 8 we see the difference between a monotonic and a non-monotonic heuristic function." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![8](p8.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# There are two types of A* search:\n", - " \n", - "\tTree-search: Repetitious states (nodes) are allowed to be inserted in the search tree\n", - " \n", - "\tGraph-search: Repetitious states (nodes) are not allowed to be inserted in the search tree\n", - " \n", - "In both cases, for reaching an optimal goal, some conditions need to be true.\n", - "Theorem 1: If h(n) is admissible then A* is optimal in tree search.\n", - " \n", - "Proof:\n", - " \n", - "Suppose some suboptimal goal G_2 has been generated and is in the frontier. Let n be an unexpanded node in the frontier such that n is on a shortest path to an optimal goal G. \n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![9](p9.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "• f(G_2 )=g(G_2) since h(G_2)=0 \n", - "\n", - "• g(G_2)>g(G) since G_2 is suboptimal \n", - "\n", - "• f(G)=g(G) since h(G)=0 \n", - "\n", - "• f(G_2)>f(G) from above\n", - "\n", - "• h(n)≤h^* (n) (since h is admissible) hence g(n)+h(n)≤g(n)+h^* (n) \n", - "\n", - "• f(n)≤g(n)+h^* (n)f(n), and A* will never select G_2 for expansion.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Theorem 2: If h(n) is admissible and monotonic then A* is optimal in graph search.\n", - "\n", - "Proof:\n", - "\n", - "\tA*(admissible/monotonic) will expand only nodes whose f-values are less (or equal) to the optimal cost path C* (f(n) is less-or-equal C*). \n", - "\tThe evaluation function of a goal node along an optimal path equals C*.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Alternative definition of monotonicity(consistency):\n", - "\n", - "previously it was concluded that if in a given search path we went from state a to b and said search path was in fact monotonic then F(a)<=F(b) now based on this knowledge we are going to form another definition for monotonicity please look at the slide below:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Screenshot%202021-03-14%20130441.png](Screenshot%202021-03-14%20130441.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "as shown in the slide above if in a given search path we have to travel from state a to a neighboring state b and the difference between H(a) and H(b) is at most the actual distance between the two states then said search path is monotone so instead of defining monotonicity based on F(X) we can define it based on H(X).\n", - "\n", - "\n", - "now lets test the new deffinition in an example:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Screenshot%202021-03-14%20134643.png](Screenshot%202021-03-14%20134643.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Solution:\n", - "\n", - "now even at first glance we can undrestand that the manhattan distance admissible because we will not be taking the obstacle walls in the middle of the maze into account so the heuristic is always shorter than the actual distance\n", - "\n", - "now regarding the monotonicity of the manhatan distance, imagine two horizontally adjacent states a and b now it's obvious because a and b are adjacent that H(a)-H(b) is either 1 or -1 and the actual distance between a and b is always 1 so H(a)-H(b) is at most the actual distance between a and b hence the manhatan distance heuristic is monotonic.\n", - "\n", - "\n", - "now lets see another example in the form two heuristics for the 8-puzzle:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Screenshot%202021-03-14%20140555.png](Screenshot%202021-03-14%20140555.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Question:\n", - "witch of the two demonstrated heuristics is dominant and better to use in the start state?\n", - "\n", - "\n", - "Answer:\n", - "considering both of the given heuristics are admissible, h1(s) is the dominant heuristic because the ideal heuristic to use in a given state while being admissible has the biggest amount.\n", - "\n", - "note: given a number of admissible heuristics to use in our search the ideal one has the biggest amount\n", - "\n", - "furthermore please look at the figures shown in the slide below:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Screenshot%202021-03-14%20142505.png](Screenshot%202021-03-14%20142505.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Relaxed problems:\n", - "one of the many ways we can aquire heuristic functions is by deriving it from the exact solution cost of a more relaxed version of the problem.\n", - "keep in mind when we say more relaxed version of the problem what we realy mean is a version of the problem with looser and more relaxed constraints.\n", - "\n", - "Example:\n", - "previousley the we talked about heuristics h1(n) and h2(n) in the 8-puzzle problem. as is evident we can aquire heuristic h1(n) by relaxing the rules of the 8-puzzle ina way that a tile can move anywhere in the map or similarly we can aquire h2(n) by bending and relaxing the rules of the 8-puzzle so that a tile can move to any adjacent square on the map.\n", - "\n", - "the key point to all this is the optimal solution to a relaxed problem is no greaterthan the optimal solution in the real problem.\n", - "\n", - "another great example of this can be seen in the slide below sorounding the traveling sales person or TSP problem:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Screenshot%202021-03-14%20201420.png](Screenshot%202021-03-14%20201420.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "a tour is actually a linear tree with each branch having only one child and starting from a root and eventually reaching a goal now as is shown the np hard TSP problem is initialy an optimization problem in witch the h* is the minimum amount of the sum of the costs of the edges in linear trees we can relax the TSP problem in such a manner that we acquire the minimum sum cost of edges in all trees not just the linear ones and with this act we derive a new heuristic h with ultimately equals the minimum sum of costs of the edges in all trees" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Optimality of A*(tree search):\n", - "proving the optimality of A* search is fairly easy now considering the subjects covered so far and is handled as such in the slide below:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Screenshot%202021-03-14%20205043.png](Screenshot%202021-03-14%20205043.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "base on the slide above suboptimal goals such as G2 will never be chosen for expantion by the A* in tree search hence rendering A* as a completely optimal algorithem.\n", - "\n", - "now we want to again delve in the subject of monotonicity again and explain why an A* algorithem in a graph search is only optimal if it is in fact monotone. lets explain this with an example in the form ofthe slide below:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Screenshot%202021-03-14%20211426.png](Screenshot%202021-03-14%20211426.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "at first glance we can sumize that the optimal path is as follows acdG but if we follow the A* algorithem based on the given f figures we see that the algorithem ultimatley chooses the suboptimal path of abdG this happens becuas node c is never expanded in the algorithem due to f(c) > f(d) and c cannot be expanded untill d is expanded and G is reached in other words f decreases from c to d and is not monotonic hence the suboptimal path is chosen\n", - "\n", - "based on this example we can conclude that in order for the A* to be optimal in a graph search monotonicity is required.\n", - "\n", - "now lets delve in the subject of optimality of A* some more with the demonstrated lemmas in the slide below:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Screenshot%202021-03-14%20212456.png](Screenshot%202021-03-14%20212456.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "now even at first glance all of the lemmas shown above are quite evident all except for lemma number 2 witch is infact the key lemma for proving the optimality of A* in graph search so lets prove lemma 2:\n", - "\n", - "imagine we started down a start state and find ourselves expanding node n but the path we've chosen turns out to be suboptimal and there is an optimal path for us to choose in order to reach n and the last node stored in the fringe in the optimal path is node m and has not yet been chosen for expantion now because m is in the optimal path from s to n,\n", - "then g(n) > g(m) + (optimal actual distance from m to n or mn) now lets assume there are k states between m and n considering the heuristic used in this A* is monotonic we can come to the conclusion that:\n", - "h(m) <= h(m1) + mm1,\n", - "h(m1) <= h(m2) + mm2,\n", - ".\n", - ".\n", - ".\n", - "h(mk) <= h(n) + mkn\n", - "\n", - "now we can see that the sum of the above statements is:\n", - "\n", - "h(m) <= h(n) + mn\n", - "and we previousley mentioned that : g(n) > g(m) + mn\n", - "using both statements we see that: g(n) + h(n) > g(m) + h(m) witch translates to f(n) > f(m) witch contradicts our initial hypothesis" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "now let us see the properties of A* in the form of the slide below:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![Screenshot%202021-03-14%20215803.png](Screenshot%202021-03-14%20215803.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# A* example:\n", - "\n", - "In this example A* algorithm does not work very well until it reaches the wall, and in this special example, it is not very different from the bfs, but when it \n", - "\n", - "crosses the obstacle, it is very fast and clear how clear this algorithm is.\n", - "In the image below, you can see the A algorithm and BFS algorithm\n", - "\n", - "This example illustrates the importance of accurate heuristic\n", - "The heuristic is not accurate until it reaches the wall, and it will not be accurate until we cross\n", - "\n", - "the obstacle. The whole advantage is for when we overcome the obstacle\n", - "So this show us that we need to design good heuristic.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![A*](example1.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![bfs](example2.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# A* summary\n", - "\n", - "Advantages:\n", - "\n", - "It is complete and optimal.\n", - "\n", - "It is the best one from other techniques. It is used to solve very complex problems. \n", - "\n", - "It is optimally efficient, i.e. there is no other optimal algorithm guaranteed to expand fewer nodes than A*.\n", - "\n", - "Disadvantages:\n", - "\n", - "This algorithm is complete if the branching factor is finite and every action has fixed cost.\n", - "\n", - "The speed execution of A* search is highly dependant on the accuracy of the heuristic algorithm that is used to compute h (n).\n", - "\n", - "Exponential growth will occur unless |ℎ(𝑛)−ℎ∗ (𝑛)|≤𝑂(log⁡〖ℎ∗ (𝑛)〗 )\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Optimal Efficiency: \n", - "\n", - "Among all optimal algorithms that start from the same start node and use the same heuristic h, A∗ expands the minimal number of paths.\n", - "\n", - "problem: A∗ could be unlucky about how it breaks ties.\n", - "\n", - "So let’s define optimal efficiency as expanding the minimal number of paths p for which f(p) != f∗, where f∗ is the cost of the shortest path." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Lets assume an algorithm B does not expand a node n which is A* expanded by A*. By definition for this path g(n)+h(n) <= f where f is the cost of the shortest\n", - "\n", - "path. Consider a second problem for which all heuristics values are the same as in the original problem. However, there is a new path to a new goal with total\n", - "\n", - "cost smaller f. The assumed algorithm B would expand n hence never reach this new goal. Hence, B wouldn't find this optimal path. Therefore, our original\n", - "\n", - "assumption that B is optimal is violated." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# IDA*\n", - "\n", - "perform deapth-first search limited to some f-bound\n", - "\n", - "if goal found: it's ok\n", - "\n", - "else: increase the f-bound and restart\n", - "\n", - "how to stablish the f-bound?\n", - "\n", - "start with f(start)\n", - "\n", - "Next f-limit = min-cost of any node pruned \n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "IDA* example" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![a](a.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![b](b.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![c](c.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "IDA* analyze\n", - "\n", - "IDA* is complete, optimal and optimally efficient\n", - "\n", - "IDA* is complete and optimal space usage is linear in the depth of. each iteration is dfs" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.3" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/notebooks/3_informed_search/index.md b/notebooks/3_informed_search/index.md new file mode 100644 index 00000000..985424b5 --- /dev/null +++ b/notebooks/3_informed_search/index.md @@ -0,0 +1,249 @@ +# Informed Search + +## Table of Contents +* [Introduction](#Introduction) +* [Hueristic](#Hueristic) +* [Greedy Search](#Greedy_Search) +* [How it may go wrong](#wrong) +* [Properties of Greedy Search](#prop1) +* [A* Search ](#A*) +* [Optimality of A*](#opt1) +* [Examples](#examples) +* [Obtaining admissible heuristics using relaxed problems](#relaxed) +* [Iterative-Deepening A*](#IDA) +* [Properties of IDA*](#prop2) +* [Conclusion](#Conclusion) +* [Other Useful Links](#useful) +* [References](#References) + + +## Introduction + +In the last chapter, blind search was thoroughly discussed and examined. In blind search algorithms - such as BFS, and DFS – we search without any prior knowledge of what lies ahead. In other words, the algorithm has no information about the future of the path it is expanding. + +The main idea of informed search is to first come up with an estimate of the remaining cost for each node, and then, use this extra information to search the solution space more cleverly. + +## Hueristic +Heuristic as a word, roughly means discovering. In this course however, it is used to describe +“**an estimate of how close a state is to a goal**”. +This estimate varies problem by problem and the burden of designing a good heuristic function is -unfortunately- on the programmer! +To clarify, let us examine the famous game Pac-Man. But for simplicity, let’s ignore the enemies and all the “food” Pac-Man has to eat. + +![1](Picture1.png) + + +Since the main character can only move in vertical or horizontal lines, we can safely estimate the distance that it has left before reaching its goal state by its Manhattan distance to the goal. +Note that, in a plane with p1 at (x1, y1) and p2 at (x2, y2), +Manhattan distance is |x1 - x2| + |y1 - y2|. +Clearly, Manhattan distance underestimates the true distance between the source and the goal since the agent’s path is blocked by walls and it has to circumvent these walls by sometimes going further away from the goal. +Nevertheless, Manhattan distance is not the only function that can be used to estimate the remaining distance. Another example would be Ellucian distance. At this point you might ask which heuristic function should I use. A great question which will be answered shortly! + +## Greedy Search +Now let us discuss how we should put this newly found knowledge to good use. A basic idea would be to run our previous algorithms such as Dijkstra with f(n) = heuristic estimate of state n. +This results in greedy search. For more clarification, pay attention to the figure below. + +![2](Picture2.png) + +As you might remember, the example of flying from Arad to Bucharest was discussed in the previous chapter. Here we used the straight-line distance to goal, i.e., Ellucian distance, as the heuristic function. We start at Arad. Out of possible nodes to expand at the next step, Sibiu has the least f(n) (= 253), therefore, it is selected for expansion. With the same reasoning, Fagaras, and finally, Bucharest is expanded and a solution has been found. The nice property of this approach is that its search cost is minimal. On the other hand, the presented path is by no means optimal. + +## How it may go wrong +The following example should clarify the main shortcoming of greedy approach. + +![3](Picture3.png) + +When deciding between A and B, suppose h(A) < h(B). A greedy algorithm would choose A to expand. For every node x in the path between A and goal we have h(x) + +* In most cases, greedy best-first search takes you straight to the goal with a suboptimal path. Like the two examples shown above. +* Incomplete even in finite spaces, similar to DFS. + In other words, it might get stuck in loops. Consider the example of going from Iasi to Fagaras where the greedy best-first algorithm vacillates between Neamt and Iasi (we assumed tree-search is implemented). With cycle checking, and finite number of states, the algorithm is complete. +* Time complexity is in O(b^m) where m is the maximum depth of the search tree. Keep in mind that this can be significantly reduced by choosing a good heuristic. +* Space complexity is in O(b^m) + +## A* Search + +As you might have guessed, some modification to greedy search is required before using heuristic. A solution commonly used is to consider the cost to the node in addition to the cost required to get from the node to the goal. That is to say: +F(n) = g(n) + h(n) +Where g(n) is sum of costs from start to n and h(n) is estimate of lowest cost path from n to goal, and h(goal) = 0. Intuitively, f(n) is the estimated cost of the optimal path going through node n. A* can be viewed as combining the idea of uniform cost search (g(n)), and greedy search (h(n)). +For further clarification of how A* expands nodes, look at the figure below: + +![4](Picture4.gif) + +## Optimality of A* +By optimality, we mean to have an algorithm that finds the optimal solution (a real goal). +### Tree search + Theorem: If h(n) is **admissible** then A* is optimal in tree search. +### Graph search + Theorem: If h(n) is **admissible** and **monotonic**, then A* is optimal in graph search. + +The first condition that is required for ’s optimality, is to have an admissible heuristic function h(n). we call a heuristic function admissible, if it always underestimates the cost to reach the goal. Underestimating means that the value returned by h(n) at any point, should be less than the actual cost from the aforementioned point to the goal node. +For instance, in case of a Maze game as below, if we consider the cross point as current position, the actual cost to the goal equals to d1 + d2 + d3 + d4. However, the value returned by heuristic function should be less than this value, for example d5 is accepted. Such inequality should not be violated at any node; otherwise, h won’t be regarded admissible + +![5](Picture5.png) + +The diagrams below demonstrate the aforementioned point. + +![6](Picture6.png) + +In the left diagram, the heuristic’s estimated cost value always (in all states) remains below the true cost value so h is admissible, however, in the right diagram, h sometimes overestimates the real cost value and violates admissibility condition. +The second important condition on heuristic function is Monotonicity or also called consistency. A heuristic function h(n) is monotonic, is consistent if, for every node n and every successor of n generated by any action a, the estimated cost of reaching the goal from n is no greater than the step cost of getting to plus the estimated cost of reaching the goal from n': +h(n) ≤ c(n, a, n') + h(n') . +If we heed the above formulation, we will be reminded of the general Triangle inequality. The picture below describes this issue: + +![7](Picture7.png) + +Because h(n) estimates the cost from node n to goal, the above inequality indicates the triangle inequality, the sum of two sides in any triangle, should be more than the third side. Considering admissibility, consistency is a well-understood quality, meaning that if there was a route from n to G via that was cheaper than h(n), that would violate the property that h(n) is a lower bound on the cost to reach G. +Monotonicity can be proved using a simple comprehension: + +![8](Picture8.png) + +By the theorem and as mentioned before, if a is expanded earlier than b, then f(a) is smaller than f(b) or f is non-decreasing along any path. So, we have. + +## Examples + +For the Maze game, Manhattan distance, sum of absolute differences between the coordinates of two point, is a monotonic and admissible heuristic function. + +![9](Picture9.png) + +Here is another example. Let’s consider 8-puzzle game (generally n-puzzle); in 8-puzzle, there are 8 tiles that are arbitrarily positioned in the puzzle grid, such as the state below: + +![10](Picture10.png) + +Here each state is considered as any configuration of tiles in the puzzle, action is moving any tile to the empty one (replacing tile with the empty tile), and the goal is to have all tiles in their correct places + +![11](Picture11.png) + +The 8-puzzle was one of the earliest heuristic search problems. The average solution cost for a randomly generated 8-puzzle instance is about 22 steps. The branching factor is about 3. (When the empty tile is in the middle, four moves are possible; when it is in a corner, two; and when it is along an edge, three.) +This means that an exhaustive tree search to depth 22 would look at about states. It is a burdensome task! If we want to use , we should use an admissible and monotonic heuristic function. Two common candidates are mentioned here: +1. h1= number of misplaced tiles. This is obviously admissible because it just reports how many tiles are not in their correct positions, thus it never overestimates the actual number of operations (nodes to be expanded). In the above example, h1=8, all tiles are misplaced. h1 is also monotonic, since each move reduces the h at most 1 unit + +2. h2 = sum of the (Manhattan) distance of every tile to its goal position. h2 is also evidently admissible, because the number of moves required to be done to have each tile in its true position is much more than its distance to the goal. For example, in the configuration above, h2 equals to 3+1+2+2+2+3+3+2 = 18. + +In terms of consistency, any move can increase or decrease h by at most 1 value; and cost is again 1. Thus, the inequality is again satisfied: + +The actual cost for the problem above is 26, which is more than the estimated cost of h1 and h2. +3. h3= number of permutation inversions. We can demonstrate that this heuristic function is not admissible. +So far we have understood the definition of admissibility and monotonicity. It can be shown that monotonicity yields admissibility and we should just check whether or not a heuristic is monotonic. + But not all monotonic and admissible heuristics are good. Now we want to introduce a very important feature, which is called heuristic dominance. + +If h2(n) ≥ h1(n) for all n (both admissible) then h2 dominates h1 and is better for search; because h2 provides us with a more realistic estimation of the actual cost. Proof is simple and straightforward. We know that every node with f(n) < will surely be expanded, where is the cost of optimal path. This is the same as saying that every node with h(n) < − g(n) will surely be expanded. But because h2 is at least as big as h1 for all nodes, every node that is surely expanded by search with h2 will also surely be expanded with h1, and h1 might cause other nodes to be expanded as well. Hence, it is generally better to use a heuristic function with higher values. +Choosing the heuristic function has direct effect on the number of nodes expanded by , so we should select the heuristic that while remaining admissible, dominates other heuristics. For example, the average number of nodes for different depth of solutions in n-puzzle, for h1 and h2 are reported as below: + +![12](Picture12.png) + +How to generate dominant admissible heuristics? It is pretty easy! Given any admissible heuristics h1 and h2, we may assign h(n) as . h(n) is also admissible because at any node n, h(n) equals either h1(n) or h2(n), which in both cases it is less than . Moreover, based on the feature of max function, at all nodes n, , thus h(n) dominates h1 and h2. + + +## Obtaining admissible heuristics using relaxed problems +As one can discern, a constrained minimization problem, can be stated such as below: + +By relaxing the problem, we mean to allow the x go beyond the set c1, and lie in a set C2, that is a super set of C1 . Regarding this modification: + +If we consider X1 as the optimal heuristic cost (, and X2 as a new heuristic h, then h never overestimates , and is an admissible heuristics. Thus, we should come up with the exact solution of a relaxed version of the problem to generate an admissible heuristic. +For example, in the 8-puzzle problem we may relax the problem in one of these two ways: +1. a tile can move anywhere: Now with this new version of problem, when it comes to minimizing f, number of moves, one can easily realize that each tile can go to its correct position in one move. Consequently, h1= number of misplaced tiles is the optimal solution (and exact solution) of the problem and it can be an admissible heuristic (and of course we know it is) + +2. a tile can move to any adjacent square: in the new problem, the exact and optimal solution would be h2 = the Manhattan distance from the current position to the correct position. So, it can be also used as an admissible heuristic function. + +Another example: A well-known example in the graph theory is Travelling Salesperson Problem (TSP) which is actually finding the Hamiltonian cycle with minimum cost, is a NP-hard problem. However, we can relax this problem and come up with a heuristic function for that. As we know, a Hamiltonian path, is a specific type of a tree, a tree that is expanded linearly: + +![13](Picture13.png) + +So, if we relax the problem and say that instead of a linearly expanded tree, we would accept any tree, then the problem is relaxed, and we have assuaged the constraints. The new version of TSP problem would be to find a tree with the minimum sum of cost on its edges, it is the famous MST problem which can be solved in polynomial time (by using algorithms such as kruskal or prim) + +![14](Picture14.png) + +Proof of optimality of for tree search + +![15](Picture15.png) + +Here we want to prove that will find the optimal goal using proof by contradiction method. Suppose there is a suboptimal goal G2 that has been generated, and node n be an unexpanded node on the path to the optimal goal G1, that has not been generated by . Here we have: + +So, node n can’t be an unexpanded node since we know that expands n all node with +In other words, n was a better node than G2 for continuing the search and mustn't have chosen G2, and proof is finished here. +However, in graph search, monotonicity is also needed. Here we consider an example, in which monotonicity is violated, and will fail in graph search. + +![16](Picture16.png) + +The picture above is the graph state diagram. Started at node a, chooses node b because its f is less than that of node c. after expanding node b, there will be node d and c in the fringe: + +![17](Picture17.png) + +At this moment, selects node d because it has a less f value and d will be expanded. Consequently, will see node c and G in the fringe and must choose between them. + +![18](Picture18.png) + + It selects C because of its lower f value; however, after choosing node c for expansion, it can’t continue the path! Be reminded that in graph search, once a node is expanded, it will be added to the list of visited nodes and won’t be considered again. So, node d has been added to the visited nodes and can’t be chosen after the expansion of c. Subsequently, the only remaining node is G and will be regarded as the goal, and it will purport that has found the optimal path as a – b – d – G. This path is not optimal because a – c – d – G has a cost of 8.1 which is less than 9. Thus, has failed. +The reason why failed is that when the algorithm reached node d, it hadn’t found the optimal path from a to d. a – c – d was a better option, however, it wasn’t chosen because works based on the evaluation of the values of f(n) at different nodes. Since f wasn’t increasing from c to d, was forced to mistakenly choose d and eventually it failed. Thus, we can safely conclude that the value of f must be increasing from any node to its children, which is the definition of monotonicity or consistency. + +As discussed earlier A* search algorithm has the following features: +* Tree search version of this algorithm is optimal if h(n) is admissible. +* Graph search version of this algorithm is optimal if h(n) is consistent. +In order to show the second claim, we need to exploit the following lemmas: +* If h(n) is monotonic, then the values of f along any path are non-decreasing. +* Whenever A* selects node n for expansion, the optimal path to that node has been found. +* Optimal goal, G, has the lowest f(G) among all the goals, when selected for expansion. +* A* expands all nodes in order of non-decreasing f value. +The proof of the first lemma follows directly from the definition of consistency. If we suppose n’ is a successor of n then: g(n’) = g(n) + c(n,a,n’) and we also have: + +The proof of the second lemma: +If we assume the opposite of lemma is true, then there would have to be another frontier node n’ on the optimal path from the start node to n, by the graph separation property of the figure below: + +![19](Picture19.png) + +From the two preceding observations, it follows that the sequence of nodes expanded by A∗ using GRAPH-SEARCH is in non-decreasing order of f(n). Hence, the first goal node selected for expansion must be an optimal solution. +To sum up, we demonstrate the properties of A* search algorithm: +**Complete**: Yes, if there is a lower bound on costs +**Time**: For uniform cost, reversible action: exponential in [relative error in hd] +**Space**: This algorithm keeps all nodes in memory +**Optimal**: Yes +It is worth mentioning that A∗ expands no nodes with f(n) > C*, for example: Timisoara is not expanded in the figure below even though it is a child of the root. + +![20](Picture20.png) + +One final observation is that among optimal algorithms of this type A∗ is optimally efficient for any given consistent heuristic. That is, no other optimal algorithm is guaranteed to expand fewer nodes than A∗ (except possibly through tie-breaking among nodes with f(n) = C∗. This is because any algorithm that does not expand all nodes with f(n) < C∗ runs the risk of missing the optimal solution. +The proof of this is as follows: +* Let f* be the cost of the shortest path to a goal. Consider any algorithm A’ that has the same beginning node as A*, exploits the same heuristic and fails to expand some path p’ expanded by A* for which cost(p’) + h(p’) < f*. Assume that A’ is optimal. + +* Consider a different search problem which is as the same as the original and on which h returns the same estimate for each path, except that p’ has a child path p’’ which is a goal node, and the true cost of the path to p’’ is f(p’). + + +* A’ would behave identically on this new problem +* Cost of the path to p’’ is lower than the cost of the path found by A’ +* This violates our assumption that A’ is optimal +Therefore among the optimal algorithms of this type A∗ is **optimally efficient** for any given consistent heuristic. + +A* search algorithm maintains priority queue and can get exponentially big. Also it requires us to harness Non artificial intelligence power to define an appropriate heuristic. To address these cons of A* algorithm Iterative-Deepening A* has been introduced. + +## Iterative-Deepening A* +Iterative deepening A* (IDA*) is a graph traversal and path search algorithm that can find the shortest path between a designated start node and any member of a set of goal nodes in a weighted graph. Like A*, IDA* is guaranteed to find the shortest path leading from the given start node to any goal node in the problem graph. +This algorithm is nothing but iterative-deepening but has a slight difference. In iterative deepening search we set a limit on the depth, but in iterative deepening A* we set this limit on cost function or f(n). This algorithm works as follows: +* At first, it starts with f-limit = h(start) +* Then it performs depth-first search(instead of using queue, it uses stack) +* Then it prunes any node if f(node) > f-limit +* Then the values of f-limit changes to f-limit = min-cost of any node pruned + +## Properties of IDA* +It is complete and optimal (space usage ∝ depth of solution). And in each iteration of this algorithm DFS is performed but with no queue priority. Expanded nodes in this algorithm depends on the number of unique values of heuristic function. For example in TSP(Traveling Salesman) problem each f value is unique : + +Where n = nodes the A* expands. It is obvious that if n is too big for main memory, n2 is too long to wait. To address this issue, we need to either change our heuristic function of A* algorithm steps. But there is another way! Another algorithm called SMA* or Simplified memory bounded A* is developed to circumvent the memory problem. In this algorithm, we define how much memory we have, and therefore how many nodes should be expanded. SMA* stores all expanded nodes and open the nodes in memory, and if the memory is full it deletes the leaf with highest f value and backs up the value in its parent. You can find an example of this algorithm in the pictures below: + + +![21](Picture21.png) + +## Conclusion + +In this section we discussed the use of hueristic functions in order to search the space of solutions more intelligently and save time and computation power. Furthermore, properties of the A* algorithm and the conditions that guarantee optimality were explained and proved in addition to examples and IDA*. + +## Other Useful Links + +[Difference between Informed and Uninformed Search in AI](https://www.geeksforgeeks.org/difference-between-informed-and-uninformed-search-in-ai/) +[Informed Search](https://www.cs.iusb.edu/~danav/teach/c463/5_informed_search.html) +[Indian explaination!](https://www.youtube.com/watch?v=PzEWHH2v3TE&ab_channel=Education4u) +[Indian explaination 2!](https://www.youtube.com/watch?v=5F9YzkpnaRw&ab_channel=GateSmashers) + +## References + +[1] *Artificial Intelligence A Modern Approach Third Edition* + diff --git a/notebooks/3_informed_search/metadata.yml b/notebooks/3_informed_search/metadata.yml index 73a62cd0..464d026a 100644 --- a/notebooks/3_informed_search/metadata.yml +++ b/notebooks/3_informed_search/metadata.yml @@ -1,4 +1,4 @@ -title: LN | Informed Search +title: Informed Search header: title: Informed Search @@ -9,32 +9,26 @@ authors: text: Authors kind: people content: - - name: Alireza Hosseinpour + - name: Pouria Molahoseini role: Author contact: + - icon: fab fa-github + link: https://github.com/pxouria - icon: fas fa-envelope - link: mailto:alirezzzhp1378@gmail.com + link: mailto:pouria77up@gmail.com - - name: Mohammad Ali Pashanj + - name: Armin Azizi role: Author contact: - icon: fab fa-github - link: https://github.com/mohammadalipashanj + link: https://github.com/seyedarmin-azizi - icon: fas fa-envelope - link: mailto:pashanj.mohammadali@gmail.com + link: mailto:armin.az77@yahoo.com - - name: Majid Taherkhani + - name: Mohammad Reza Taremi role: Author - contact: - - icon: fas fa-envelope - link: mailto:majidtaherkhani555@gmail.com - - - name: Zeinab Sadat Saghi - role: Supervisor contact: - icon: fab fa-github - link: https://github.com/atenasadat - - icon: fab fa-linkedin - link: https://www.linkedin.com/in/atena-saghi/ + link: https://github.com/themimte - icon: fas fa-envelope - link: mailto:atenasaghi@ce.sharif.edu + link: mailto:mohammadrezataremi77@gmail.com \ No newline at end of file diff --git a/notebooks/3_informed_search/p1.png b/notebooks/3_informed_search/p1.png deleted file mode 100644 index 337951b7..00000000 Binary files a/notebooks/3_informed_search/p1.png and /dev/null differ diff --git a/notebooks/3_informed_search/p11.PNG b/notebooks/3_informed_search/p11.PNG deleted file mode 100644 index 40c81c7b..00000000 Binary files a/notebooks/3_informed_search/p11.PNG and /dev/null differ diff --git a/notebooks/3_informed_search/p2.PNG b/notebooks/3_informed_search/p2.PNG deleted file mode 100644 index e2d23cd5..00000000 Binary files a/notebooks/3_informed_search/p2.PNG and /dev/null differ diff --git a/notebooks/3_informed_search/p3.PNG b/notebooks/3_informed_search/p3.PNG deleted file mode 100644 index 63a39431..00000000 Binary files a/notebooks/3_informed_search/p3.PNG and /dev/null differ diff --git a/notebooks/3_informed_search/p4.PNG b/notebooks/3_informed_search/p4.PNG deleted file mode 100644 index abf6ad9a..00000000 Binary files a/notebooks/3_informed_search/p4.PNG and /dev/null differ diff --git a/notebooks/3_informed_search/p5.PNG b/notebooks/3_informed_search/p5.PNG deleted file mode 100644 index 39946933..00000000 Binary files a/notebooks/3_informed_search/p5.PNG and /dev/null differ diff --git a/notebooks/3_informed_search/p6.PNG b/notebooks/3_informed_search/p6.PNG deleted file mode 100644 index 35636e2c..00000000 Binary files a/notebooks/3_informed_search/p6.PNG and /dev/null differ diff --git a/notebooks/3_informed_search/p7.PNG b/notebooks/3_informed_search/p7.PNG deleted file mode 100644 index 0cc9f68e..00000000 Binary files a/notebooks/3_informed_search/p7.PNG and /dev/null differ diff --git a/notebooks/3_informed_search/p8.PNG b/notebooks/3_informed_search/p8.PNG deleted file mode 100644 index 0cc9f68e..00000000 Binary files a/notebooks/3_informed_search/p8.PNG and /dev/null differ diff --git a/notebooks/3_informed_search/p9.PNG b/notebooks/3_informed_search/p9.PNG deleted file mode 100644 index a7132c36..00000000 Binary files a/notebooks/3_informed_search/p9.PNG and /dev/null differ diff --git a/notebooks/index.yml b/notebooks/index.yml index 1b2133c0..c19c935b 100644 --- a/notebooks/index.yml +++ b/notebooks/index.yml @@ -7,7 +7,8 @@ notebooks: kind: S2021, LN, PDF - notebook: notebooks/2_uninformed_search/ kind: S2021, LN, Notebook - #- notebook: notebooks/3_informed_search/ + - notebook: notebooks/3_informed_search/ + kind: S2021, LN, Notebook - notebook: notebooks/4_advanced_heuistics/ kind: S2021, LN, Notebook - notebook: notebooks/5_local_search/ @@ -54,4 +55,4 @@ notebooks: kind: S2021, LN, Notebook #- notebook: notebooks/17_markov_decision_processes/ - notebook: notebooks/18_reinforcement_learning/ - kind: S2021, LN, Notebook \ No newline at end of file + kind: S2021, LN, Notebook