diff --git a/mathics/builtin/lowlevelprofile.py b/mathics/builtin/lowlevelprofile.py
deleted file mode 100644
index 033a2d4f4..000000000
--- a/mathics/builtin/lowlevelprofile.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# -*- coding: utf-8 -*-
-
-"""
-Low-level Profiling
-
-Low-level (Python) profile from inside the Mathics3 interpreter
-
-"""
-
-import cProfile
-import pstats
-import sys
-from io import StringIO
-
-from mathics.builtin.base import Builtin
-from mathics.core.attributes import A_HOLD_ALL_COMPLETE, A_PROTECTED
-from mathics.core.convert.python import from_python
-from mathics.core.evaluation import Evaluation
-from mathics.core.expression import Expression
-from mathics.core.list import ListExpression
-from mathics.core.symbols import SymbolNull
-
-
-class PythonCProfileEvaluation(Builtin):
- """
- :Python:https://docs.python.org/3/library/profile.html
-
-
- - 'PythonProfileEvaluation[$expr$]'
-
- profile $expr$ with the Python's cProfiler.
-
-
- >> PythonCProfileEvaluation[a + b + 1]
- = ...
- """
-
- attributes = A_HOLD_ALL_COMPLETE | A_PROTECTED
- summary_text = "profile the internal evaluation of an expression"
-
- def eval(self, expr: Expression, evaluation: Evaluation):
- "PythonCProfileEvaluation[expr_]"
- profile_result = SymbolNull
- textstream = StringIO()
- if sys.version_info >= (3, 8):
- with cProfile.Profile() as pr:
- result = expr.evaluate(evaluation)
- stats = pstats.Stats(pr, stream=textstream)
- stats.strip_dirs().sort_stats(-1).print_stats()
- # TODO: convert the string (or the statistics)
- # into something like a WL Table, by splitting the
- # rows and the columns. By now, just a string
- # is returned.
- profile_result = from_python(textstream.getvalue())
- else:
- result = expr.evaluate(evaluation)
- return ListExpression(result, profile_result)
diff --git a/mathics/builtin/trace.py b/mathics/builtin/trace.py
index 3cca7c8c4..759c4df31 100644
--- a/mathics/builtin/trace.py
+++ b/mathics/builtin/trace.py
@@ -1,25 +1,34 @@
# -*- coding: utf-8 -*-
"""
-Tracing Built-in Functions
+Tracing and Profiling
-Built-in Function Tracing provides one high-level way understand what is \
+The 'Trace' builtins provide a Mathics3-oriented trace of what is \
getting evaluated and where the time is spent in evaluation.
With this, it may be possible for both users and implementers to follow \
how Mathics3 arrives at its results, or guide how to speed up expression \
evaluation.
+
+Python :CProfile:https://docs.python.org/3/library/profile.html \
+profiling is available via 'PythonCProfileEvaluation'.
"""
+import cProfile
+import pstats
+import sys
from collections import defaultdict
+from io import StringIO
from time import time
from typing import Callable
from mathics.builtin.base import Builtin
-from mathics.core.attributes import A_HOLD_ALL, A_PROTECTED
-from mathics.core.convert.python import from_bool
+from mathics.core.attributes import A_HOLD_ALL, A_HOLD_ALL_COMPLETE, A_PROTECTED
+from mathics.core.convert.python import from_bool, from_python
from mathics.core.definitions import Definitions
from mathics.core.evaluation import Evaluation
+from mathics.core.expression import Expression
+from mathics.core.list import ListExpression
from mathics.core.rules import BuiltinRule
from mathics.core.symbols import SymbolFalse, SymbolNull, SymbolTrue, strip_context
@@ -110,7 +119,8 @@ class PrintTrace(_TraceBase):
Note that in a browser the information only appears in a console.
- If '$TraceBuiltins' was never set to 'True', this will print an empty list.
+ Note: before '$TraceBuiltins' is set to 'True', 'PrintTrace[]' will print an empty
+ list.
>> PrintTrace[]
>> $TraceBuiltins = True
@@ -158,7 +168,12 @@ class TraceBuiltins(_TraceBase):
>> TraceBuiltins[Graphics3D[Tetrahedron[]]]
= -Graphics3D-
- By default, the output is sorted by the number of calls of the builtin from highest to lowest:
+ By default, the output is sorted by the name:
+ >> TraceBuiltins[Times[x, x]]
+ = x ^ 2
+
+ By default, the output is sorted by the number of calls of the builtin from \
+ highest to lowest:
>> TraceBuiltins[Times[x, x], SortBy->"count"]
= x ^ 2
@@ -189,17 +204,26 @@ def dump_tracing_stats(sort_by: str, evaluation) -> None:
sort_by = "count"
print()
+ def sort_by_count(tup: tuple):
+ return tup[1]["count"]
+
+ def sort_by_time(tup: tuple):
+ return tup[1]["elapsed_milliseconds"]
+
+ def sort_by_name(tup: tuple):
+ return tup[0]
+
print("count ms Builtin name")
if sort_by == "count":
inverse = True
- sort_fn = lambda tup: tup[1]["count"]
+ sort_fn = sort_by_count
elif sort_by == "time":
inverse = True
- sort_fn = lambda tup: tup[1]["elapsed_milliseconds"]
+ sort_fn = sort_by_time
else:
inverse = False
- sort_fn = lambda tup: tup[0]
+ sort_fn = sort_by_name
for name, statistic in sorted(
TraceBuiltins.function_stats.items(),
@@ -260,9 +284,12 @@ class TraceBuiltinsVariable(Builtin):
A Boolean Built-in variable when True collects function evaluation statistics.
- Setting this variable True will enable statistics collection for Built-in functions that are evaluated.
- In contrast to 'TraceBuiltins[]' statistics are accumulated and over several inputs, and are not shown after each input is evaluated.
- By default this setting is False.
+ Setting this variable True will enable statistics collection for Built-in \
+ functions that are evaluated.
+ In contrast to 'TraceBuiltins[]' statistics are accumulated and over several \
+ inputs,and are not shown after each input is evaluated.
+
+ By default, this setting is False.
>> $TraceBuiltins = True
= True
@@ -271,7 +298,8 @@ class TraceBuiltinsVariable(Builtin):
#> $TraceBuiltins = False
= False
- Tracing is enabled, so the expressions entered and evaluated will have statistics collected for the evaluations.
+ Tracing is enabled, so the expressions entered and evaluated will have statistics \
+ collected for the evaluations.
>> x
= x
@@ -407,3 +435,38 @@ def eval_set(self, value, evaluation: Evaluation):
evaluation.message("$TraceEvaluation", "bool", value)
return value
+
+
+class PythonCProfileEvaluation(Builtin):
+ """
+ :Python:https://docs.python.org/3/library/profile.html
+
+
+ - 'PythonProfileEvaluation[$expr$]'
+
- profile $expr$ with the Python's cProfiler.
+
+
+ >> PythonCProfileEvaluation[a + b + 1]
+ = ...
+ """
+
+ attributes = A_HOLD_ALL_COMPLETE | A_PROTECTED
+ summary_text = "profile the internal evaluation of an expression"
+
+ def eval(self, expr: Expression, evaluation: Evaluation):
+ "PythonCProfileEvaluation[expr_]"
+ profile_result = SymbolNull
+ textstream = StringIO()
+ if sys.version_info >= (3, 8):
+ with cProfile.Profile() as pr:
+ result = expr.evaluate(evaluation)
+ stats = pstats.Stats(pr, stream=textstream)
+ stats.strip_dirs().sort_stats(-1).print_stats()
+ # TODO: convert the string (or the statistics)
+ # into something like a WL Table, by splitting the
+ # rows and the columns. By now, just a string
+ # is returned.
+ profile_result = from_python(textstream.getvalue())
+ else:
+ result = expr.evaluate(evaluation)
+ return ListExpression(result, profile_result)