-
Notifications
You must be signed in to change notification settings - Fork 0
/
InClassMarkov.py
146 lines (130 loc) · 6 KB
/
InClassMarkov.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
"""
Created on Tue Feb 10 11:22:01 2015
@author: jasminebrown
"""
"""
Recall from your reading that any irreducible and aperiodic Markov chain has a
stationary distribution. To convince ourselves that things will converge for
such a chain with arbitrary transition probabilities, let's give it a try.
Work in pairs for this. It's more fun to be social.
"""
# Paste your Markov chain simulation function below, where the starting state
# is drawn with uniform probability from all possible states. Remember to also
# copy any import statements or other functions on which your simulator is
# dependent.
def DiscreteDistr(events,probabilities):
"""This is my discreet probability function"""
#you need to import random to get arbritrary random numbers generated
import random
x= random.choice(events)
#this pulls a random number from a list
#the lists must be the same length and I should use the lists index to match
#events with its proper probability
index=events.index(x)
#this indexes the random events
probability=probabilities[index]
#this is the probabilities of the events given
return "this is the event", x, "and this is the probability", probability
def Markov(states,MatrixData,steps):
"""Now I am going to create a function for a Markov Chain. states can use a list for the space. MatrixData
can be used to represent a list of tuples that represent the transitions, steps is how many times this will run"""
import random
if steps < 0: #dummy check for invalid value for steps
return 'Invalid number of steps. Must be positive integer.'
initialState = random.choice(states) #draws the first state with equal probability
markov_chain = [] #creates the list to store the Markov chain as it transitions
markov_chain.append(initialState) #appends the first state to the Markov chain
n = 0 #sets the starting index of the Markov list
while n <= steps-1: #sets the number of "steps" to run the chain
if markov_chain[n] == states[0]: #checks if the current element is state[0]
nextState = DiscreteDistr(states, MatrixData[0])[1] #if it's true, draws the next state based on the transition probabilities of state[0]
markov_chain.append(nextState) #appends it to the Markov chain
else:
nextState = DiscreteDistr(states, MatrixData[1])[1]
markov_chain.append(nextState)
n += 1 #increases the value of n to look at the next index
return markov_chain
# Define a 2x2 transition matrix. For fun, don't make all the probabilities
# equal. Also, don't use any 0s or 1s (to make sure the chain is irreducible
# and aperiodic).
Chain=("A","B")
Matrix=[[0.6,0.4], [0.3,0.7]]
# Simulate a single chain for three time steps and print the states
print Markov(Chain,Matrix,3)
#[A B B]
# Analytically calculate the progression of states for this chain.
# Calculate the probability of observing the state in step 3, given the initial
# state in step 1 (i.e., as if you didn't know the state in step 2).
"""
P(x1=state1,[x2=state2,x3=state3])
P(A)*P(B|A)
P(state1)*P(state2,state3|state1)
P(state1)* P(state3|state1, state2)* P(state2|state1) ---state 1 disappears because state3 depends on 2 not 1 according to the memeoryless fucntiong of markov
P(state1)* P(state2|state1) *P(state3|state2)
#WHAT IF WE DIDN'T KNOW THE MIDDLE B existed???
#can only be AAB or ABB
"""
"""
#find the probabilities of both of these combinations #n step transition probability in the reading
P(A,B,B)=0.5*0.7*0.7
#the first probability if 0.5 because of having a uniform distribution
P(A,?,B)=(0.5*0.3*0.7)+(0.5*0.7*0.7)=0.35
P(A,A,B)+P(B,B,B)
#this is the probability of both sets
# Now think of the chain progressing in the opposite direction. What is the
# probability of the progression through all 3 states in this direction? How
# does this compare to the original direction?
#This means the probability of
P(B,A,A)+P(B,B,B)=(0.5*0.3*0.3)+(0.5*0.7*0.7)=0.245
The probabilities are different
"""
# Try the same "forward" and "reverse" calculations as above, but with this
# transition matrix:
revMat = [[0.77,0.23], [0.39,0.61]]
# and these starting frequencies for "a" and "b"
freqa = 0.63
freqb = 0.37
import numpy as np
import random
mt=np.matrix(revMat)
ran=random.random()
if ran <= freqa:
i=0
elif ran >= freqb:
i=1
print i
def transitionmatrix(matrix,i,k,n):
mat=np.matrix(mt)
transition=mat**n
probability=(transition[i,k])
print "my matrix is" +str(transition)
return "The probability is"+ str(probability)
transitionmatrix(revMat,i,1,1000)
# What is (roughly) true about these probabilities?
# Simulate 1,000 replicates (or 10K if your computer is fast enough) of 25
# steps. What are the frequencies of the 2 states across replicates through time?
# NOTE: Here is a function that reports the frequencies of a state through time
# for replicate simulations. You'll need to do this several times during this exercise.
def mcStateFreqSum(sims,state=revMat):
"""
Pass this function a list of lists. Each individual list should be the
states of a discrete-state Markov chain through time (and all the same
length). It will return a list containing the frequency of one state
("a" by default) across all simulations through time.
"""
freqs = []
for i in range(len(sims[0])): # Iterate across time steps
stateCount = 0
for j in range(len(sims)): # Iterate across simulations
if sims[j][i] == state:
stateCount += 1
freqs.extend([float(stateCount)/float(len(sims))])
return freqs
# Run replicate simulations
# Summarize the frequency of one state through time
# What do you notice about the state frequencies through time? Try another round
# of simulations with a different transition matrix. How do the state freq.
# values change?
# Now, calculate a vector of probabilities for the focal state (e.g., 'a')
# based on the transition matrix directly (not by simulation). How do these
# values compare to the simulated frequencies?