-
Notifications
You must be signed in to change notification settings - Fork 0
/
p.py
142 lines (104 loc) · 3.94 KB
/
p.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
import easyparse as ep
import easyparse.tokenizers as epts
class WordTokenizer(ep.Tokenizer):
def __init__(self, extra_chars=()):
self.extra_chars = extra_chars
def tokenize(self, view):
buffer = view.consume_while(lambda x: x.isalnum() or x in self.extra_chars)
if buffer:
return ep.Token("WORD", "".join(buffer))
class PropertyTokenizer(ep.Tokenizer):
def tokenize(self, view):
buffer = view.consume_while(lambda x: x.TYPE == "WORD")
if buffer:
return ep.Token("Property", [ token.content for token in buffer ])
class ReaperNode(object):
def __init__(self, node):
node = ep.DeepTreeNode(node)
if node.properties:
header = node.properties[0].content
self.name = header[0 ]
self.args = header[1:]
else:
self.name = ""
self.params = []
self.properties = { p.content[0] : p.content[1:] for p in node.properties[1:] }
self.children = [ ReaperNode(child) for child in node.children ]
def __repr__(self):
return f"ReaperNode({self.name}{self.args}, properties={self.properties}, children = {self.children})\n"
def filter_tree(tree_node, filter_f):
nodes = []
if filter_f(tree_node):
nodes.append(tree_node)
for child in tree_node.children:
nodes.extend(filter_tree(child, filter_f))
return nodes
class RPP(object):
def __init__(self, rpp_filepath):
self.filepath = rpp_filepath
self.rpp_raw = open(self.filepath).read()
self.tree, self.rpp = self._parse(self.rpp_raw)
def _parse(self, raw_content):
rpp_lexer_stage1 = ep.Lexer([
WordTokenizer(extra_chars=("_","-",".", "/","\"", "{", "}")),
epts.WhitespaceTokenizer(auto_discard=True),
epts.SingleTokenizer("NEWLINE", lambda x: "\n" == x, lambda x: None),
] + [ epts.CharTokenizer(char) for char in ["<", ">"] ])
rpp_lexer_stage2 = ep.Lexer([
PropertyTokenizer(),
epts.DiscardTokens(lambda token: token.TYPE == "NEWLINE"),
epts.IdentityTokenizer()])
tokens = rpp_lexer_stage1.parse(raw_content) # acts as tokenizer
tokens = rpp_lexer_stage2.parse(tokens) # parses the tokens
tree_maker = ep.TreeMaker(ep.DeepTreeNode, lambda x: x.TYPE=="<", lambda x: x.TYPE==">")
tree = tree_maker.parse(tokens).children[0]
rpp = ReaperNode(tree)
tree.pretty_print()
print("\n"*5)
return (tree, rpp)
def parse_reaper_file(filepath):
rpp = RPP(filepath)
tracks = filter_tree(rpp.rpp, lambda x: x.name == "TRACK")
print("RPP:", rpp.filepath, "\n")
for track in tracks:
track_items = track.children
track_items = [ (item.name, item.properties["NAME"][0], item.children[0].properties["FILE"][0]) for item in track_items ]
print(f""" TRACK {track.properties["NAME"][0]}""")
for item in (track_items):
print(" ", " ".join(item))
import lark
from lark import Lark
print(dir(lark))
def lark_parse_rpp(filepath):
s = open(filepath).read()
print(s)
grammar = r"""
start: node+
| EMPTY_LINE
EMPTY_LINE: _WS? _NEWLINE _WS?
node: _WS? "<" HEADER (_NEWLINE|_WS)+ (node|property)+ ">" _NEWLINE+
property: WORD (_WS|NUM|WORD)? _NEWLINE+
HEADER: LETTER+
WORD: (LETTER|"0".."9"|"_"|"."|"-"|"\\"|"/"|"\""|":")+
NUM.9: INT
_NEWLINE: "\n"
_WS.10: (" " | "\t" )+
%import common.ESCAPED_STRING
%import common.LETTER
%import common.DIGIT
%import common.INT
%ignore _WS
%ignore _NEWLINE
"""
s = """
<reaper
value 2
>
"""
p = Lark(grammar).parse(s)
print(p)
print( p.pretty() )
if __name__ == "__main__":
rpp_path = r"C:\Users\AlbertoEAF\Desktop\teste\teste.rpp"
parse_reaper_file(rpp_path)
lark_parse_rpp(rpp_path)