Simplified file structure

This commit is contained in:
Eduard Urbach 2024-08-07 19:39:10 +02:00
parent cacee7260a
commit a466281307
Signed by: eduard
GPG key ID: 49226B848C78F6C8
219 changed files with 453 additions and 457 deletions

14
src/token/Count.go Normal file
View file

@ -0,0 +1,14 @@
package token
// Count counts how often the given token appears in the token list.
func Count(tokens []Token, buffer []byte, kind Kind, name string) uint8 {
count := uint8(0)
for _, t := range tokens {
if t.Kind == Identifier && t.Text(buffer) == name {
count++
}
}
return count
}

17
src/token/Count_test.go Normal file
View file

@ -0,0 +1,17 @@
package token_test
import (
"testing"
"git.akyoto.dev/cli/q/src/token"
"git.akyoto.dev/go/assert"
)
func TestCount(t *testing.T) {
buffer := []byte(`a b b c c c`)
tokens := token.Tokenize(buffer)
assert.Equal(t, token.Count(tokens, buffer, token.Identifier, "a"), 1)
assert.Equal(t, token.Count(tokens, buffer, token.Identifier, "b"), 2)
assert.Equal(t, token.Count(tokens, buffer, token.Identifier, "c"), 3)
assert.Equal(t, token.Count(tokens, buffer, token.Identifier, "d"), 0)
}

75
src/token/Kind.go Normal file
View file

@ -0,0 +1,75 @@
package token
// Kind represents the type of token.
type Kind uint8
const (
Invalid Kind = iota // Invalid is an invalid token.
EOF // EOF is the end of file.
NewLine // NewLine is the newline character.
Identifier // Identifier is a series of characters used to identify a variable or function.
Number // Number is a series of numerical characters.
Rune // Rune is a single unicode code point.
String // String is an uninterpreted series of characters in the source code.
Comment // Comment is a comment.
GroupStart // (
GroupEnd // )
BlockStart // {
BlockEnd // }
ArrayStart // [
ArrayEnd // ]
ReturnType // ->
_operators // <operators>
Add // +
Sub // -
Mul // *
Div // /
Mod // %
And // &
Or // |
Xor // ^
Shl // <<
Shr // >>
LogicalAnd // &&
LogicalOr // ||
_comparisons // <comparisons>
Equal // ==
NotEqual // !=
Less // <
Greater // >
LessEqual // <=
GreaterEqual // >=
_comparisonsEnd // </comparisons>
Define // :=
Period // .
Call // x()
Array // [x]
Separator // ,
_unary // <unary>
Not // ! (unary)
Negate // - (unary)
_unaryEnd // </unary>
_assignments // <assignments>
Assign // =
AddAssign // +=
SubAssign // -=
MulAssign // *=
DivAssign // /=
ModAssign // %=
AndAssign // &=
OrAssign // |=
XorAssign // ^=
ShlAssign // <<=
ShrAssign // >>=
_assignmentsEnd // </assignments>
_operatorsEnd // </operators>
_keywords // <keywords>
Assert // assert
Else // else
If // if
Import // import
Loop // loop
Return // return
Switch // switch
_keywordsEnd // </keywords>
)

4
src/token/Length.go Normal file
View file

@ -0,0 +1,4 @@
package token
// Length is the data type for storing token lengths.
type Length = uint16

78
src/token/List.go Normal file
View file

@ -0,0 +1,78 @@
package token
import (
"strings"
)
// List is a slice of tokens.
type List []Token
// IndexKind returns the position of a token kind within a token list.
func (list List) IndexKind(kind Kind) int {
for i, token := range list {
if token.Kind == kind {
return i
}
}
return -1
}
// LastIndexKind returns the position of the last token kind within a token list.
func (list List) LastIndexKind(kind Kind) int {
for i := len(list) - 1; i >= 0; i-- {
if list[i].Kind == kind {
return i
}
}
return -1
}
// Split calls the callback function on each set of tokens in a comma separated list.
func (list List) Split(call func(List) error) error {
start := 0
groupLevel := 0
for i, t := range list {
switch t.Kind {
case GroupStart, ArrayStart, BlockStart:
groupLevel++
case GroupEnd, ArrayEnd, BlockEnd:
groupLevel--
case Separator:
if groupLevel > 0 {
continue
}
parameter := list[start:i]
err := call(parameter)
if err != nil {
return err
}
start = i + 1
}
}
if start != len(list) {
parameter := list[start:]
return call(parameter)
}
return nil
}
// Text returns the concatenated token text.
func (list List) Text(source []byte) string {
tmp := strings.Builder{}
for _, t := range list {
tmp.WriteString(t.Text(source))
}
return tmp.String()
}

40
src/token/List_test.go Normal file
View file

@ -0,0 +1,40 @@
package token_test
import (
"fmt"
"testing"
"git.akyoto.dev/cli/q/src/token"
"git.akyoto.dev/go/assert"
)
func TestIndexKind(t *testing.T) {
tokens := token.Tokenize([]byte("a{{}}"))
assert.Equal(t, tokens.IndexKind(token.NewLine), -1)
assert.Equal(t, tokens.LastIndexKind(token.NewLine), -1)
assert.Equal(t, tokens.IndexKind(token.BlockStart), 1)
assert.Equal(t, tokens.LastIndexKind(token.BlockStart), 2)
assert.Equal(t, tokens.IndexKind(token.BlockEnd), 3)
assert.Equal(t, tokens.LastIndexKind(token.BlockEnd), 4)
}
func TestSplit(t *testing.T) {
src := []byte("1+2,3*4,5*6,7+8")
tokens := token.Tokenize(src)
parameters := []string{}
err := tokens.Split(func(parameter token.List) error {
parameters = append(parameters, parameter.Text(src))
return nil
})
assert.Nil(t, err)
assert.DeepEqual(t, parameters, []string{"1+2", "3*4", "5*6", "7+8"})
err = tokens.Split(func(parameter token.List) error {
return fmt.Errorf("error")
})
assert.NotNil(t, err)
assert.Equal(t, err.Error(), "error")
}

4
src/token/Position.go Normal file
View file

@ -0,0 +1,4 @@
package token
// Position is the data type for storing file offsets.
type Position = uint32

71
src/token/Token.go Normal file
View file

@ -0,0 +1,71 @@
package token
import (
"unsafe"
)
// Token represents a single element in a source file.
// The characters that make up an identifier are grouped into a single token.
// This makes parsing easier and allows us to do better syntax checks.
type Token struct {
Position Position
Length Length
Kind Kind
}
// Bytes returns the byte slice.
func (t Token) Bytes(buffer []byte) []byte {
return buffer[t.Position : t.Position+Position(t.Length)]
}
// End returns the position after the token.
func (t Token) End() Position {
return t.Position + Position(t.Length)
}
// IsAssignment returns true if the token is an assignment operator.
func (t Token) IsAssignment() bool {
return t.Kind > _assignments && t.Kind < _assignmentsEnd
}
// IsComparison returns true if the token is a comparison operator.
func (t Token) IsComparison() bool {
return t.Kind > _comparisons && t.Kind < _comparisonsEnd
}
// IsExpressionStart returns true if the token starts an expression.
func (t Token) IsExpressionStart() bool {
return t.Kind == GroupStart || t.Kind == ArrayStart || t.Kind == BlockStart
}
// IsKeyword returns true if the token is a keyword.
func (t Token) IsKeyword() bool {
return t.Kind > _keywords && t.Kind < _keywordsEnd
}
// IsNumeric returns true if the token is a number or rune.
func (t Token) IsNumeric() bool {
return t.Kind == Number || t.Kind == Rune
}
// IsOperator returns true if the token is an operator.
func (t Token) IsOperator() bool {
return t.Kind > _operators && t.Kind < _operatorsEnd
}
// IsUnaryOperator returns true if the token is a unary operator.
func (t Token) IsUnaryOperator() bool {
return t.Kind > _unary && t.Kind < _unaryEnd
}
// Reset resets the token to default values.
func (t *Token) Reset() {
t.Position = 0
t.Length = 0
t.Kind = Invalid
}
// Text returns the token text.
func (t Token) Text(buffer []byte) string {
return unsafe.String(unsafe.SliceData(t.Bytes(buffer)), t.Length)
}

52
src/token/Token_test.go Normal file
View file

@ -0,0 +1,52 @@
package token_test
import (
"testing"
"git.akyoto.dev/cli/q/src/token"
"git.akyoto.dev/go/assert"
)
func TestTokenEnd(t *testing.T) {
hello := token.Token{
Kind: token.Identifier,
Position: 0,
Length: 5,
}
assert.Equal(t, hello.End(), 5)
}
func TestTokenReset(t *testing.T) {
hello := token.Token{
Kind: token.Identifier,
Position: 1,
Length: 5,
}
hello.Reset()
assert.Equal(t, hello.Position, 0)
assert.Equal(t, hello.Length, 0)
assert.Equal(t, hello.Kind, token.Invalid)
}
func TestTokenText(t *testing.T) {
buffer := []byte("hello, world")
hello := token.Token{Kind: token.Identifier, Position: 0, Length: 5}
comma := token.Token{Kind: token.Separator, Position: 5, Length: 1}
world := token.Token{Kind: token.Identifier, Position: 7, Length: 5}
assert.Equal(t, hello.Text(buffer), "hello")
assert.Equal(t, comma.Text(buffer), ",")
assert.Equal(t, world.Text(buffer), "world")
}
func TestTokenGroups(t *testing.T) {
assignment := token.Token{Kind: token.Assign}
operator := token.Token{Kind: token.Add}
keyword := token.Token{Kind: token.If}
assert.True(t, assignment.IsAssignment())
assert.True(t, operator.IsOperator())
assert.True(t, keyword.IsKeyword())
}

318
src/token/Tokenize.go Normal file
View file

@ -0,0 +1,318 @@
package token
// Tokenize turns the file contents into a list of tokens.
func Tokenize(buffer []byte) List {
var (
i Position
tokens = make(List, 0, 8+len(buffer)/2)
)
for i < Position(len(buffer)) {
switch buffer[i] {
case ' ', '\t':
case ',':
tokens = append(tokens, Token{Kind: Separator, Position: i, Length: 1})
case '(':
tokens = append(tokens, Token{Kind: GroupStart, Position: i, Length: 1})
case ')':
tokens = append(tokens, Token{Kind: GroupEnd, Position: i, Length: 1})
case '{':
tokens = append(tokens, Token{Kind: BlockStart, Position: i, Length: 1})
case '}':
tokens = append(tokens, Token{Kind: BlockEnd, Position: i, Length: 1})
case '[':
tokens = append(tokens, Token{Kind: ArrayStart, Position: i, Length: 1})
case ']':
tokens = append(tokens, Token{Kind: ArrayEnd, Position: i, Length: 1})
case '\n':
tokens = append(tokens, Token{Kind: NewLine, Position: i, Length: 1})
case '-':
if len(tokens) == 0 || tokens[len(tokens)-1].IsOperator() || tokens[len(tokens)-1].IsExpressionStart() || tokens[len(tokens)-1].IsKeyword() {
tokens = append(tokens, Token{Kind: Negate, Position: i, Length: 1})
} else {
if i+1 < Position(len(buffer)) {
switch buffer[i+1] {
case '=':
tokens = append(tokens, Token{Kind: SubAssign, Position: i, Length: 2})
i++
case '>':
tokens = append(tokens, Token{Kind: ReturnType, Position: i, Length: 2})
i++
default:
tokens = append(tokens, Token{Kind: Sub, Position: i, Length: 1})
}
} else {
tokens = append(tokens, Token{Kind: Sub, Position: i, Length: 1})
}
}
case '/':
if i+1 < Position(len(buffer)) && buffer[i+1] == '/' {
position := i
for i < Position(len(buffer)) && buffer[i] != '\n' {
i++
}
tokens = append(tokens, Token{Kind: Comment, Position: position, Length: Length(i - position)})
} else {
position := i
i++
for i < Position(len(buffer)) && isOperator(buffer[i]) {
i++
}
kind := Invalid
switch string(buffer[position:i]) {
case "/":
kind = Div
case "/=":
kind = DivAssign
}
tokens = append(tokens, Token{Kind: kind, Position: position, Length: Length(i - position)})
}
continue
case '"', '\'':
limiter := buffer[i]
start := i
end := Position(len(buffer))
i++
for i < Position(len(buffer)) {
if buffer[i] == limiter && (buffer[i-1] != '\\' || buffer[i-2] == '\\') {
end = i + 1
i++
break
}
i++
}
kind := String
if limiter == '\'' {
kind = Rune
}
tokens = append(tokens, Token{Kind: kind, Position: start, Length: Length(end - start)})
continue
case '0':
position := i
i++
if i >= Position(len(buffer)) {
tokens = append(tokens, Token{Kind: Number, Position: position, Length: 1})
break
}
filter := isDigit
switch buffer[i] {
case 'x':
i++
filter = isHexDigit
case 'b':
i++
filter = isBinaryDigit
case 'o':
i++
filter = isOctalDigit
}
for i < Position(len(buffer)) && filter(buffer[i]) {
i++
}
tokens = append(tokens, Token{Kind: Number, Position: position, Length: Length(i - position)})
continue
default:
if isIdentifierStart(buffer[i]) {
position := i
i++
for i < Position(len(buffer)) && isIdentifier(buffer[i]) {
i++
}
identifier := buffer[position:i]
kind := Identifier
switch string(identifier) {
case "assert":
kind = Assert
case "if":
kind = If
case "else":
kind = Else
case "import":
kind = Import
case "loop":
kind = Loop
case "return":
kind = Return
case "switch":
kind = Switch
}
tokens = append(tokens, Token{Kind: kind, Position: position, Length: Length(len(identifier))})
continue
}
if isDigit(buffer[i]) {
position := i
i++
for i < Position(len(buffer)) && isDigit(buffer[i]) {
i++
}
last := len(tokens) - 1
if len(tokens) > 0 && tokens[last].Kind == Negate {
tokens[last].Kind = Number
tokens[last].Length = Length(i-position) + 1
} else {
tokens = append(tokens, Token{Kind: Number, Position: position, Length: Length(i - position)})
}
continue
}
if isOperator(buffer[i]) {
position := i
i++
for i < Position(len(buffer)) && isOperator(buffer[i]) {
i++
}
kind := Invalid
switch string(buffer[position:i]) {
case "!":
kind = Not
case "!=":
kind = NotEqual
case "%":
kind = Mod
case "%=":
kind = ModAssign
case "&":
kind = And
case "&&":
kind = LogicalAnd
case "&=":
kind = AndAssign
case "*":
kind = Mul
case "*=":
kind = MulAssign
case "+":
kind = Add
case "+=":
kind = AddAssign
// case "-":
// kind = Sub
// case "-=":
// kind = SubAssign
// case "->":
// kind = ReturnType
case ".":
kind = Period
case "/":
kind = Div
case "/=":
kind = DivAssign
case ":=":
kind = Define
case "<":
kind = Less
case "<<":
kind = Shl
case "<<=":
kind = ShlAssign
case "<=":
kind = LessEqual
case "=":
kind = Assign
case "==":
kind = Equal
case ">":
kind = Greater
case ">=":
kind = GreaterEqual
case ">>":
kind = Shr
case ">>=":
kind = ShrAssign
case "^":
kind = Xor
case "^=":
kind = XorAssign
case "|":
kind = Or
case "|=":
kind = OrAssign
case "||":
kind = LogicalOr
}
tokens = append(tokens, Token{Kind: kind, Position: position, Length: Length(i - position)})
continue
}
tokens = append(tokens, Token{Kind: Invalid, Position: i, Length: 1})
}
i++
}
tokens = append(tokens, Token{Kind: EOF, Position: i, Length: 0})
return tokens
}
func isIdentifier(c byte) bool {
return isLetter(c) || isDigit(c) || c == '_'
}
func isIdentifierStart(c byte) bool {
return isLetter(c) || c == '_'
}
func isLetter(c byte) bool {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}
func isDigit(c byte) bool {
return c >= '0' && c <= '9'
}
func isHexDigit(c byte) bool {
return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'F')
}
func isBinaryDigit(c byte) bool {
return c == '0' || c == '1'
}
func isOctalDigit(c byte) bool {
return c >= '0' && c <= '7'
}
func isOperator(c byte) bool {
switch c {
case '=', ':', '.', '+', '-', '*', '/', '<', '>', '&', '|', '^', '%', '!':
return true
default:
return false
}
}

411
src/token/Tokenize_test.go Normal file
View file

@ -0,0 +1,411 @@
package token_test
import (
"testing"
"git.akyoto.dev/cli/q/src/token"
"git.akyoto.dev/go/assert"
)
func TestFunction(t *testing.T) {
tokens := token.Tokenize([]byte("main(){}"))
expected := []token.Kind{
token.Identifier,
token.GroupStart,
token.GroupEnd,
token.BlockStart,
token.BlockEnd,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
}
func TestKeyword(t *testing.T) {
tokens := token.Tokenize([]byte("assert if import else loop return"))
expected := []token.Kind{
token.Assert,
token.If,
token.Import,
token.Else,
token.Loop,
token.Return,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
}
func TestArray(t *testing.T) {
tokens := token.Tokenize([]byte("array[i]"))
expected := []token.Kind{
token.Identifier,
token.ArrayStart,
token.Identifier,
token.ArrayEnd,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
}
func TestNewline(t *testing.T) {
tokens := token.Tokenize([]byte("\n\n"))
expected := []token.Kind{
token.NewLine,
token.NewLine,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
}
func TestNumber(t *testing.T) {
tokens := token.Tokenize([]byte(`123 456`))
expected := []token.Kind{
token.Number,
token.Number,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
}
func TestOperator(t *testing.T) {
tokens := token.Tokenize([]byte(`a + b - c * d / e`))
expected := []token.Kind{
token.Identifier,
token.Add,
token.Identifier,
token.Sub,
token.Identifier,
token.Mul,
token.Identifier,
token.Div,
token.Identifier,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
}
func TestOperatorAssign(t *testing.T) {
tokens := token.Tokenize([]byte(`a += b -= c *= d /= e &= f |= g ^= h <<= i >>= j`))
expected := []token.Kind{
token.Identifier,
token.AddAssign,
token.Identifier,
token.SubAssign,
token.Identifier,
token.MulAssign,
token.Identifier,
token.DivAssign,
token.Identifier,
token.AndAssign,
token.Identifier,
token.OrAssign,
token.Identifier,
token.XorAssign,
token.Identifier,
token.ShlAssign,
token.Identifier,
token.ShrAssign,
token.Identifier,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
}
func TestNegateFirstToken(t *testing.T) {
tokens := token.Tokenize([]byte(`-a`))
expected := []token.Kind{
token.Negate,
token.Identifier,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
}
func TestNegateAfterGroupStart(t *testing.T) {
tokens := token.Tokenize([]byte(`(-a)`))
expected := []token.Kind{
token.GroupStart,
token.Negate,
token.Identifier,
token.GroupEnd,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
}
func TestNegateSub(t *testing.T) {
tokens := token.Tokenize([]byte(`-a-b`))
expected := []token.Kind{
token.Negate,
token.Identifier,
token.Sub,
token.Identifier,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
}
func TestNegateAfterOperator(t *testing.T) {
tokens := token.Tokenize([]byte(`-a + -b`))
expected := []token.Kind{
token.Negate,
token.Identifier,
token.Add,
token.Negate,
token.Identifier,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
}
func TestNegateNumber(t *testing.T) {
tokens := token.Tokenize([]byte(`-1`))
expected := []token.Kind{
token.Number,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
}
func TestBinaryNumber(t *testing.T) {
tokens := token.Tokenize([]byte(`0b1010`))
expected := []token.Kind{
token.Number,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
}
func TestOctalNumber(t *testing.T) {
tokens := token.Tokenize([]byte(`0o755`))
expected := []token.Kind{
token.Number,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
}
func TestHexadecimalNumber(t *testing.T) {
tokens := token.Tokenize([]byte(`0xCAFE`))
expected := []token.Kind{
token.Number,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
}
func TestStandaloneZero(t *testing.T) {
tokens := token.Tokenize([]byte(`0`))
expected := []token.Kind{
token.Number,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
}
func TestLeadingZero(t *testing.T) {
tokens := token.Tokenize([]byte(`0123`))
expected := []token.Kind{
token.Number,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
}
func TestSeparator(t *testing.T) {
tokens := token.Tokenize([]byte("a,b,c"))
expected := []token.Kind{
token.Identifier,
token.Separator,
token.Identifier,
token.Separator,
token.Identifier,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
}
func TestComment(t *testing.T) {
tokens := token.Tokenize([]byte("// Hello\n// World"))
expected := []token.Kind{
token.Comment,
token.NewLine,
token.Comment,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
tokens = token.Tokenize([]byte("// Hello\n"))
expected = []token.Kind{
token.Comment,
token.NewLine,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
tokens = token.Tokenize([]byte(`// Hello`))
expected = []token.Kind{
token.Comment,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
tokens = token.Tokenize([]byte(`//`))
expected = []token.Kind{
token.Comment,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
tokens = token.Tokenize([]byte(`/`))
expected = []token.Kind{
token.Div,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
}
func TestInvalid(t *testing.T) {
tokens := token.Tokenize([]byte(`##`))
expected := []token.Kind{
token.Invalid,
token.Invalid,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
}
func TestString(t *testing.T) {
tokens := token.Tokenize([]byte(`"Hello" "World"`))
expected := []token.Kind{
token.String,
token.String,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
}
func TestStringMultiline(t *testing.T) {
tokens := token.Tokenize([]byte("\"Hello\nWorld\""))
expected := []token.Kind{
token.String,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
}
func TestStringEOF(t *testing.T) {
tokens := token.Tokenize([]byte(`"EOF`))
expected := []token.Kind{
token.String,
token.EOF,
}
for i, kind := range expected {
assert.Equal(t, tokens[i].Kind, kind)
}
}

26
src/token/bench_test.go Normal file
View file

@ -0,0 +1,26 @@
package token_test
import (
"bytes"
"testing"
"git.akyoto.dev/cli/q/src/token"
)
func BenchmarkLines(b *testing.B) {
b.Run("__1", bench(1))
b.Run("_10", bench(10))
b.Run("100", bench(100))
}
func bench(n int) func(b *testing.B) {
line := []byte("hello := 123\n")
return func(b *testing.B) {
input := bytes.Repeat(line, n)
for i := 0; i < b.N; i++ {
token.Tokenize(input)
}
}
}