-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathprofiks-4.py
102 lines (74 loc) · 2.55 KB
/
profiks-4.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import sys
from typing import Callable, TypeVar, Generic, Literal
import collections
import requests
A = TypeVar("A")
B = TypeVar("B")
def profile(f: Callable[A, B], *args: A):
# a log of f's execution
trajectory = []
sys.settrace(makeTrace(trajectory))
result = f(*args)
sys.settrace(emptyTrace)
return [result, trajectory]
def emptyTrace(_, __, ___):
return None
def makeTrace(trajectory):
def go(frame, event: Literal["call", "line", "return", "exception", "opcode"], arg):
if event != "line":
return go
stack = [frame.f_code.co_name]
while frame.f_back != None:
frame = frame.f_back
stack.append(frame.f_code.co_name)
stack.reverse()
trajectory.append(stack)
return go
return go
def summariseTrajectory(trajectory):
summary = collections.defaultdict(int)
for stack in trajectory:
key = ";".join(stack)
if key in summary:
summary[key] += 1
else:
summary[key] = 1
return "\n".join([
k + " " + str(v) for k, v in summary.items()
])
def uploadSummary(summary):
headers = {'Content-Type': 'application/octet-stream'}
with requests.post("https://flamegraph.com", data=summary, headers=headers) as resp:
return resp.text
# functions to be profiled
def fib(x: int) -> int:
if x < 2:
return 1
return fib(x - 1) + fib(x - 2)
def wikiCommonWords():
import json
from urllib.request import urlopen
import collections
import operator
import sys
WIKIPEDIA_ARTICLE_API_URL = "https://en.wikipedia.org/w/api.php?action=query&titles=Spoon&prop=revisions&rvprop=content&format=json"
def download():
return urlopen(WIKIPEDIA_ARTICLE_API_URL).read()
def parse(json_data):
return json.loads(json_data)
def most_common_words(page):
word_occurences = collections.defaultdict(int)
for revision in page["revisions"]:
article = revision["*"]
for word in article.split():
if len(word) < 2:
continue
word_occurences[word] += 1
word_list = sorted(word_occurences.items(), key=operator.itemgetter(1), reverse=True)
return word_list[0:5]
data = parse(download())
page = list(data["query"]["pages"].values())[0]
sys.stderr.write("The most common words were %s\n" % most_common_words(page))
if __name__ == "__main__":
result = profile(lambda _: fib(7), None)
print(uploadSummary(summariseTrajectory(result[1])))