Skip to content

Commit

Permalink
working on test for lexer...
Browse files Browse the repository at this point in the history
  • Loading branch information
Llelepipede committed Feb 28, 2024
1 parent b28da9e commit 0b9d461
Show file tree
Hide file tree
Showing 9 changed files with 59 additions and 56 deletions.
1 change: 1 addition & 0 deletions lexer/IToken.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ var (
&BNOT,
&BMULT,
&TDQUOTE,
&TSQUOTE,
&TCOMMENT,
&TCOMMENTGROUP,
}
Expand Down
49 changes: 22 additions & 27 deletions lexer/Lexer.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
package lexer

import "strconv"

type ReadingType int

const (
Expand Down Expand Up @@ -50,11 +48,12 @@ func (l *TLexer) Ret() []Token {
func (l *TLexer) Step() {

if l.index < len(l.sentence) {

l.position++
l.index++
l.tempVal = l.sentence[l.prevIndex:l.index]
l.stepFind = l.IsSyntax()
l.DEBUGLEXER("each step " + strconv.Itoa(l.index))
l.DEBUGLEXER("STEP")
switch l.stepFind {
case NOTFOUND:

Expand Down Expand Up @@ -154,7 +153,6 @@ func (l *TLexer) FindSyntax() {
}

func (l *TLexer) AddToken(TokenType string) {
l.DEBUGLEXER("before add " + TokenType)
var tmp = Token{
TokenType: TokenType,
Value: l.tempVal,
Expand All @@ -166,7 +164,6 @@ func (l *TLexer) AddToken(TokenType string) {
}

func (l *TLexer) ComposeToken(NewName string) {
l.DEBUGLEXER("before compose " + NewName)
l.ret[len(l.ret)-1].TokenType = NewName
l.ret[len(l.ret)-1].Value += l.tempVal

Expand All @@ -176,11 +173,13 @@ func (l *TLexer) Inquote() bool {
return l.TriggerBy != ""
}

var (
Lex = &TLexer{
func Lexer(sentence string) []Token {
var Lex = &TLexer{
ret: []Token{},
lastStepToken: &TokenTypeBaseBehavior{Name: ""},
index: 0,
indent: []ITokenType{},
stepFind: -1,
sentence: "",
tempVal: "",

Expand All @@ -199,9 +198,6 @@ var (
maxLen: -1,
sizeOfTokenReversed: -1,
}
)

func LexerR(sentence string) []Token {
println("\n---------------------\n-----PRINT DEBUG-----\n---------------------\n")
Lex.SetSentence(sentence)
Lex.Step()
Expand All @@ -218,22 +214,21 @@ func LexerR(sentence string) []Token {
}

func (l *TLexer) DEBUGLEXER(s string) {
//println("\n-------------"+s+"-------------\nl.tempVal\t\t:", "\""+l.tempVal+"\"")
//println("l.TriggerBy\t\t:", l.TriggerBy)
//if (len(l.indent) - 1) >= 0 {
// println("l.indent name\t\t:", l.indent[0].Get()[len(l.indent[0].Get())-1])
//} else {
// println("l.indent name\t\t: None")
//}
//
//println("l.lastSTepToken\t\t:", l.lastStepToken.Get()[len(l.lastStepToken.Get())-1])
//println("l.isSpaces\t\t:", l.isSpaces)
//println("l.index\t\t\t:", l.index)
//println("l.prevIndex\t\t:", l.prevIndex)
//println("l.position\t\t:", l.position)
//println("l.line\t\t\t:", l.line)
//println("l.sizeOfTokenReversed\t:", l.sizeOfTokenReversed)
//println("l.sentence\t\t:", l.sentence)
//println("l.sentence readed\t:", l.sentence[:l.prevIndex]+"|")
println("\n-------------"+s+"-------------\nl.tempVal\t\t:", "\""+l.tempVal+"\"")
println("l.TriggerBy\t\t:", l.TriggerBy)
if (len(l.indent) - 1) >= 0 {
println("l.indent name\t\t:", l.indent[0].Get()[len(l.indent[0].Get())-1])
} else {
println("l.indent name\t\t: None")
}

println("l.lastSTepToken\t\t:", l.lastStepToken.Get()[len(l.lastStepToken.Get())-1])
println("l.isSpaces\t\t:", l.isSpaces)
println("l.index\t\t\t:", l.index)
println("l.prevIndex\t\t:", l.prevIndex)
println("l.position\t\t:", l.position)
println("l.line\t\t\t:", l.line)
println("l.sizeOfTokenReversed\t:", l.sizeOfTokenReversed)
println("l.sentence\t\t\t:", l.sentence)
println("l.sentence readed\t:", l.sentence[:l.prevIndex]+"|")
}
21 changes: 11 additions & 10 deletions lexer/TokenTypeBase.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,10 @@ type TokenTypeBaseBehavior struct {
//
// imply no special behavior
func (t *TokenTypeBaseBehavior) Resolve(l *TLexer) {
l.DEBUGLEXER("in resolve base")
index := -1

if (*l).TriggerBy != "" {
// in Trigger By Behavior
l.DEBUGLEXER("in resolve TriggerBy")

// verify the previous token created (even in a merging situation) to detect for exemple the end of a
// COMMENTGROUP "trigger by" behavior.
Expand All @@ -32,22 +30,25 @@ func (t *TokenTypeBaseBehavior) Resolve(l *TLexer) {

// related = what is the possible merged or composed result token if the actual token and the previous
// one merge or compose together.

related := t.Result[index]
l.DEBUGLEXER(t.Name)
triggerByToken := findNameInEveryTokenType((*l).TriggerBy, Every)

finded := -1
// update the lexer to acknoledge the new token to work with.
for _, v := range triggerByToken.InvolvedWith() {
for i, v := range triggerByToken.InvolvedWith() {
if v.Get()[len(v.Get())-1] == related.Name {
finded = i
l.indent[0] = &related
}
}
// compose the token BUT end the triggerBy
(*l).ComposeToken(t.Result[index].Name)
l.TriggerBy = ""
if finded != -1 {
// compose the token BUT end the triggerBy
(*l).ComposeToken(t.Result[index].Name)
l.TriggerBy = ""
// reset the reading head of our lexer.
l.prevIndex = l.index
}

// reset the reading head of our lexer.
l.prevIndex = l.index
}
} else {
// classic Behavior
Expand Down
1 change: 0 additions & 1 deletion lexer/TokenTypeComposite.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ type TokenTypeCompositeBehavior struct {
}

func (t *TokenTypeCompositeBehavior) Resolve(l *TLexer) {
l.DEBUGLEXER("in resolve composite")
if l.ret[len(l.ret)-1].TokenType == t.Name {
l.ComposeToken(t.Name)
l.prevIndex = l.index
Expand Down
10 changes: 8 additions & 2 deletions lexer/TokenTypeMerger.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ type TokenTypeMergerBehavior struct {
}

func (t *TokenTypeMergerBehavior) Resolve(l *TLexer) {
l.DEBUGLEXER("in resolve merger")
index := -1
if l.TriggerBy == "" {
if !(*l).isSpaces {
Expand All @@ -24,24 +23,26 @@ func (t *TokenTypeMergerBehavior) Resolve(l *TLexer) {
l.TriggerBy = t.Name
l.prevIndex = l.index
} else {
l.DEBUGLEXER("COMMENTGROUP TEST")
(*l).ComposeToken(t.Composite[index].Name)
l.TriggerBy = t.Composite[index].Name
l.prevIndex = l.index
}
} else {
if !(*l).isSpaces {

_, index = t.IsInvolvedWith(l)
} else {
(*l).isSpaces = false
}
println("ok", index)
if index == -1 {
if l.index > len(l.sentence) {
l.AddToken(t.Result[0].Name)
l.prevIndex = l.index
} else if l.sizeOfTokenReversed != -1 {
identified := l.tempVal[len(l.tempVal)-l.sizeOfTokenReversed:]
indexOfClose := t.IsClosedBySyntaxe(identified)
println("prout")
if indexOfClose != -1 {
//close , donc doit mettre RESULT+CLOSE en token
l.FindSyntax()
Expand All @@ -53,13 +54,18 @@ func (t *TokenTypeMergerBehavior) Resolve(l *TLexer) {
l.ComposeToken(l.TriggerBy)
l.tempVal = temp
l.TriggerBy = ""
if l.indent[0].Get()[len(l.indent[0].Get())-1] == "\n" {
l.line -= 1
}
l.indent[0].Resolve(l)
}
l.tempVal = ""
l.TriggerBy = ""
l.prevIndex = l.index
}
println("prout")
}
println("caca")
} else {
t.Resolve(l)
}
Expand Down
13 changes: 7 additions & 6 deletions lexer/TokenTypeSpaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,24 +6,25 @@ type TokenTypeSpacesBehavior struct {
}

func (t *TokenTypeSpacesBehavior) Resolve(l *TLexer) {
l.DEBUGLEXER("in resolve spaces")
if t.Name == "\n" {
}
if (*l).TriggerBy != "" {
l.DEBUGLEXER("in resolve TriggerBy")
findNameInEveryTokenType(l.TriggerBy, Every).Resolve(l)

} else {
if (*l).Inquote() {
findNameInEveryTokenType((*l).TriggerBy, Every).Resolve(l)

} else {
(*l).isSpaces = true
}
if t.Name == "\n" {
(*l).line++
(*l).position = 1
}

l.prevIndex = l.index
}
if t.Name == "\n" {
(*l).line++
(*l).position = 1
}

}

Expand Down
4 changes: 2 additions & 2 deletions lexer/TokenTypeTrigger.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ type TokenTypeTriggerBehavior struct {
}

func (t *TokenTypeTriggerBehavior) Resolve(l *TLexer) {
l.DEBUGLEXER("in resolve trigger")

if l.TriggerBy == "" {
l.TriggerBy = t.Name
Expand All @@ -24,6 +23,7 @@ func (t *TokenTypeTriggerBehavior) Resolve(l *TLexer) {
} else if l.sizeOfTokenReversed != -1 {
identified := l.tempVal[len(l.tempVal)-l.sizeOfTokenReversed:]
indexOfClose := t.IsClosedBySyntaxe(identified)
println("prout2")
if indexOfClose != -1 {
//close , donc doit mettre RESULT | CLOSE en token
l.FindSyntax()
Expand All @@ -32,13 +32,13 @@ func (t *TokenTypeTriggerBehavior) Resolve(l *TLexer) {
l.position -= 1
l.AddToken(t.Result[indexOfClose].Name)
l.position += 1
l.DEBUGLEXER("in resolve trigger AFTER ADD")
l.tempVal = temp
l.TriggerBy = ""
l.indent[0].Resolve(l)
l.TriggerBy = ""
l.prevIndex = l.index
}
println("prout2")
}

}
Expand Down
2 changes: 1 addition & 1 deletion lexer/lexer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ func tLexer(t *testing.T, tested testList, name string) {
result += "\n--------------------------------------------------\n--------------------------------------------------\n\t\t---" + name + "-INPUT---\n--------------------------------------------------\n" + code + "\n--------------------------------------------------"
result += "\n\t\t---DIFF LIST---\n--------------------------------------------------\n"
diff := 0
l := LexerR(code)
l := Lexer(code)
if l == nil {
result += "Expected a lexer, got nil\n--------------------------------------------------\n"
} else if len(l) != expectedLenth {
Expand Down
14 changes: 7 additions & 7 deletions lexer/lexer_test_list.go
Original file line number Diff line number Diff line change
Expand Up @@ -459,7 +459,7 @@ var (
},
}
testHashtag = testList{
input: "prout# in comment\n#/ in commentgroup\n and next ligne\n and test for / and #/ /#\nOutside of the comment group",
input: "prout# in comment\n/# in commentgroup\n and next ligne\n and test for / and /# #/\nOutside of the comment group",
output: []Token{
{
TokenType: TEXT,
Expand All @@ -469,13 +469,13 @@ var (
},
{
TokenType: COMMENT,
Value: " in comment",
Value: "# in comment\n",
Position: 6,
Line: 1,
},
{
TokenType: COMMENTGROUP,
Value: " in commentgroup\n and next ligne\n and test for / and #/ ",
Value: "/# in commentgroup\n and next ligne\n and test for / and /# #/",
Position: 1,
Line: 2,
},
Expand Down Expand Up @@ -518,7 +518,7 @@ var (
},
}
testHashtag2 = testList{
input: "prout# in comment\n#/ in commentgroup\n and next ligne\n and test for / and #/",
input: "prout# in comment\n/# in commentgroup\n and next ligne\n and test for / and #/",
output: []Token{
{
TokenType: TEXT,
Expand All @@ -528,20 +528,20 @@ var (
},
{
TokenType: COMMENT,
Value: ` in comment`,
Value: "# in comment\n",
Position: 6,
Line: 1,
},
{
TokenType: COMMENTGROUP,
Value: " in commentgroup\n and next ligne\n and test for / and #/",
Value: "/# in commentgroup\n and next ligne\n and test for / and #/",
Position: 1,
Line: 2,
},
{
TokenType: EOF,
Value: ``,
Position: 22,
Position: 23,
Line: 4,
},
},
Expand Down

0 comments on commit 0b9d461

Please sign in to comment.