diff --git a/src/build/errors/Error.go b/src/build/errors/Error.go index 0821e25..f0c54ef 100644 --- a/src/build/errors/Error.go +++ b/src/build/errors/Error.go @@ -14,13 +14,13 @@ type Error struct { Err error File *fs.File Stack string - Position int + Position token.Position } // New generates an error message at the current token position. // The error message is clickable in popular editors and leads you // directly to the faulty file at the given line and position. -func New(err error, file *fs.File, position int) *Error { +func New(err error, file *fs.File, position token.Position) *Error { return &Error{ Err: err, File: file, @@ -48,12 +48,12 @@ func (e *Error) Error() string { for _, t := range e.File.Tokens { if t.Position >= e.Position { - column = e.Position - lineStart + column = int(e.Position) - lineStart break } if t.Kind == token.NewLine { - lineStart = t.Position + lineStart = int(t.Position) line++ } } diff --git a/src/build/token/Position.go b/src/build/token/Position.go new file mode 100644 index 0000000..2227037 --- /dev/null +++ b/src/build/token/Position.go @@ -0,0 +1,4 @@ +package token + +// Position is the data type for storing file offsets. +type Position = uint32 diff --git a/src/build/token/Token.go b/src/build/token/Token.go index bf1208b..71dd1e5 100644 --- a/src/build/token/Token.go +++ b/src/build/token/Token.go @@ -10,13 +10,13 @@ import ( // This makes parsing easier and allows us to do better syntax checks. type Token struct { Bytes []byte - Position int + Position Position Kind Kind } // End returns the position after the token. -func (t *Token) End() int { - return t.Position + len(t.Bytes) +func (t *Token) End() Position { + return t.Position + Position(len(t.Bytes)) } // String creates a human readable representation for debugging purposes. diff --git a/src/build/token/Tokenize.go b/src/build/token/Tokenize.go index 343411c..649f376 100644 --- a/src/build/token/Tokenize.go +++ b/src/build/token/Tokenize.go @@ -18,11 +18,11 @@ var ( // Tokenize turns the file contents into a list of tokens. func Tokenize(buffer []byte) List { var ( - i int + i Position tokens = make(List, 0, 8+len(buffer)/2) ) - for i < len(buffer) { + for i < Position(len(buffer)) { switch buffer[i] { case ' ', '\t': case ',': @@ -42,11 +42,11 @@ func Tokenize(buffer []byte) List { case '\n': tokens = append(tokens, Token{Kind: NewLine, Position: i, Bytes: newLineBytes}) case '/': - if i+1 >= len(buffer) || buffer[i+1] != '/' { + if i+1 >= Position(len(buffer)) || buffer[i+1] != '/' { position := i i++ - for i < len(buffer) && isOperator(buffer[i]) { + for i < Position(len(buffer)) && isOperator(buffer[i]) { i++ } @@ -54,7 +54,7 @@ func Tokenize(buffer []byte) List { } else { position := i - for i < len(buffer) && buffer[i] != '\n' { + for i < Position(len(buffer)) && buffer[i] != '\n' { i++ } @@ -65,10 +65,10 @@ func Tokenize(buffer []byte) List { case '"': start := i - end := len(buffer) + end := Position(len(buffer)) i++ - for i < len(buffer) { + for i < Position(len(buffer)) { if buffer[i] == '"' { end = i + 1 i++ @@ -86,7 +86,7 @@ func Tokenize(buffer []byte) List { position := i i++ - for i < len(buffer) && isIdentifier(buffer[i]) { + for i < Position(len(buffer)) && isIdentifier(buffer[i]) { i++ } @@ -106,7 +106,7 @@ func Tokenize(buffer []byte) List { position := i i++ - for i < len(buffer) && isNumber(buffer[i]) { + for i < Position(len(buffer)) && isNumber(buffer[i]) { i++ } @@ -118,7 +118,7 @@ func Tokenize(buffer []byte) List { position := i i++ - for i < len(buffer) && isOperator(buffer[i]) { + for i < Position(len(buffer)) && isOperator(buffer[i]) { i++ }