diff --git a/go.mod b/go.mod index 20f9bf3e3..5de00cf16 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,6 @@ go 1.19 require ( github.com/ALTree/bigfloat v0.0.0-20220102081255-38c8b72a9924 github.com/google/go-cmp v0.5.8 - github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/google/uuid v1.3.0 github.com/jamesruan/go-rfc1924 v0.0.0-20170108144916-2767ca7c638f github.com/seancfoley/ipaddress-go v1.2.1 diff --git a/go.sum b/go.sum index 746e8eb81..526c5e2d7 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,6 @@ github.com/ALTree/bigfloat v0.0.0-20220102081255-38c8b72a9924 h1:DG4UyTVIujioxwJ github.com/ALTree/bigfloat v0.0.0-20220102081255-38c8b72a9924/go.mod h1:+NaH2gLeY6RPBPPQf4aRotPPStg+eXc8f9ZaE4vRfD4= github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/jamesruan/go-rfc1924 v0.0.0-20170108144916-2767ca7c638f h1:Ko4+g6K16vSyUrtd/pPXuQnWsiHe5BYptEtTxfwYwCc= diff --git a/tools/cli/types_test.go b/tools/cli/types_test.go index 4309502ae..d9c392420 100644 --- a/tools/cli/types_test.go +++ b/tools/cli/types_test.go @@ -8,7 +8,7 @@ import ( "strings" "testing" - "github.com/google/shlex" + "kitty/tools/utils/shlex" ) var _ = fmt.Print diff --git a/tools/cmd/at/shell.go b/tools/cmd/at/shell.go index d83f6ccde..68d564d14 100644 --- a/tools/cmd/at/shell.go +++ b/tools/cmd/at/shell.go @@ -12,13 +12,12 @@ import ( "strings" "time" - "github.com/google/shlex" - "kitty/tools/cli" "kitty/tools/cli/markup" "kitty/tools/tui/loop" "kitty/tools/tui/readline" "kitty/tools/utils" + "kitty/tools/utils/shlex" ) var _ = fmt.Print diff --git a/tools/tui/readline/history.go b/tools/tui/readline/history.go index 25d8b4ca8..6529713b4 100644 --- a/tools/tui/readline/history.go +++ b/tools/tui/readline/history.go @@ -11,9 +11,8 @@ import ( "time" "kitty/tools/utils" + "kitty/tools/utils/shlex" "kitty/tools/wcswidth" - - "github.com/google/shlex" ) var _ = fmt.Print diff --git a/tools/utils/shlex/shlex.go b/tools/utils/shlex/shlex.go new file mode 100644 index 000000000..d98308bce --- /dev/null +++ b/tools/utils/shlex/shlex.go @@ -0,0 +1,416 @@ +/* +Copyright 2012 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package shlex implements a simple lexer which splits input in to tokens using +shell-style rules for quoting and commenting. + +The basic use case uses the default ASCII lexer to split a string into sub-strings: + + shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"} + +To process a stream of strings: + + l := NewLexer(os.Stdin) + for ; token, err := l.Next(); err != nil { + // process token + } + +To access the raw token stream (which includes tokens for comments): + + t := NewTokenizer(os.Stdin) + for ; token, err := t.Next(); err != nil { + // process token + } + +*/ +package shlex + +import ( + "bufio" + "fmt" + "io" + "strings" +) + +// TokenType is a top-level token classification: A word, space, comment, unknown. +type TokenType int + +// runeTokenClass is the type of a UTF-8 character classification: A quote, space, escape. +type runeTokenClass int + +// the internal state used by the lexer state machine +type lexerState int + +// Token is a (type, value) pair representing a lexographical token. +type Token struct { + tokenType TokenType + value string +} + +// Equal reports whether tokens a, and b, are equal. +// Two tokens are equal if both their types and values are equal. A nil token can +// never be equal to another token. +func (a *Token) Equal(b *Token) bool { + if a == nil || b == nil { + return false + } + if a.tokenType != b.tokenType { + return false + } + return a.value == b.value +} + +// Named classes of UTF-8 runes +const ( + spaceRunes = " \t\r\n" + escapingQuoteRunes = `"` + nonEscapingQuoteRunes = "'" + escapeRunes = `\` + commentRunes = "#" +) + +// Classes of rune token +const ( + unknownRuneClass runeTokenClass = iota + spaceRuneClass + escapingQuoteRuneClass + nonEscapingQuoteRuneClass + escapeRuneClass + commentRuneClass + eofRuneClass +) + +// Classes of lexographic token +const ( + UnknownToken TokenType = iota + WordToken + SpaceToken + CommentToken +) + +// Lexer state machine states +const ( + startState lexerState = iota // no runes have been seen + inWordState // processing regular runes in a word + escapingState // we have just consumed an escape rune; the next rune is literal + escapingQuotedState // we have just consumed an escape rune within a quoted string + quotingEscapingState // we are within a quoted string that supports escaping ("...") + quotingState // we are within a string that does not support escaping ('...') + commentState // we are within a comment (everything following an unquoted or unescaped # +) + +// tokenClassifier is used for classifying rune characters. +type tokenClassifier map[rune]runeTokenClass + +func (typeMap tokenClassifier) addRuneClass(runes string, tokenType runeTokenClass) { + for _, runeChar := range runes { + typeMap[runeChar] = tokenType + } +} + +// newDefaultClassifier creates a new classifier for ASCII characters. +func newDefaultClassifier() tokenClassifier { + t := tokenClassifier{} + t.addRuneClass(spaceRunes, spaceRuneClass) + t.addRuneClass(escapingQuoteRunes, escapingQuoteRuneClass) + t.addRuneClass(nonEscapingQuoteRunes, nonEscapingQuoteRuneClass) + t.addRuneClass(escapeRunes, escapeRuneClass) + t.addRuneClass(commentRunes, commentRuneClass) + return t +} + +// ClassifyRune classifiees a rune +func (t tokenClassifier) ClassifyRune(runeVal rune) runeTokenClass { + return t[runeVal] +} + +// Lexer turns an input stream into a sequence of tokens. Whitespace and comments are skipped. +type Lexer Tokenizer + +// NewLexer creates a new lexer from an input stream. +func NewLexer(r io.Reader) *Lexer { + + return (*Lexer)(NewTokenizer(r)) +} + +// Next returns the next word, or an error. If there are no more words, +// the error will be io.EOF. +func (l *Lexer) Next() (string, error) { + for { + token, err := (*Tokenizer)(l).Next() + if err != nil { + return "", err + } + switch token.tokenType { + case WordToken: + return token.value, nil + case CommentToken: + // skip comments + default: + return "", fmt.Errorf("Unknown token type: %v", token.tokenType) + } + } +} + +// Tokenizer turns an input stream into a sequence of typed tokens +type Tokenizer struct { + input bufio.Reader + classifier tokenClassifier +} + +// NewTokenizer creates a new tokenizer from an input stream. +func NewTokenizer(r io.Reader) *Tokenizer { + input := bufio.NewReader(r) + classifier := newDefaultClassifier() + return &Tokenizer{ + input: *input, + classifier: classifier} +} + +// scanStream scans the stream for the next token using the internal state machine. +// It will panic if it encounters a rune which it does not know how to handle. +func (t *Tokenizer) scanStream() (*Token, error) { + state := startState + var tokenType TokenType + var value []rune + var nextRune rune + var nextRuneType runeTokenClass + var err error + + for { + nextRune, _, err = t.input.ReadRune() + nextRuneType = t.classifier.ClassifyRune(nextRune) + + if err == io.EOF { + nextRuneType = eofRuneClass + err = nil + } else if err != nil { + return nil, err + } + + switch state { + case startState: // no runes read yet + { + switch nextRuneType { + case eofRuneClass: + { + return nil, io.EOF + } + case spaceRuneClass: + { + } + case escapingQuoteRuneClass: + { + tokenType = WordToken + state = quotingEscapingState + } + case nonEscapingQuoteRuneClass: + { + tokenType = WordToken + state = quotingState + } + case escapeRuneClass: + { + tokenType = WordToken + state = escapingState + } + case commentRuneClass: + { + tokenType = CommentToken + state = commentState + } + default: + { + tokenType = WordToken + value = append(value, nextRune) + state = inWordState + } + } + } + case inWordState: // in a regular word + { + switch nextRuneType { + case eofRuneClass: + { + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case spaceRuneClass: + { + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case escapingQuoteRuneClass: + { + state = quotingEscapingState + } + case nonEscapingQuoteRuneClass: + { + state = quotingState + } + case escapeRuneClass: + { + state = escapingState + } + default: + { + value = append(value, nextRune) + } + } + } + case escapingState: // the rune after an escape character + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found after escape character") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + default: + { + state = inWordState + value = append(value, nextRune) + } + } + } + case escapingQuotedState: // the next rune after an escape character, in double quotes + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found after escape character") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + default: + { + state = quotingEscapingState + value = append(value, nextRune) + } + } + } + case quotingEscapingState: // in escaping double quotes + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found when expecting closing quote") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case escapingQuoteRuneClass: + { + state = inWordState + } + case escapeRuneClass: + { + state = escapingQuotedState + } + default: + { + value = append(value, nextRune) + } + } + } + case quotingState: // in non-escaping single quotes + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found when expecting closing quote") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case nonEscapingQuoteRuneClass: + { + state = inWordState + } + default: + { + value = append(value, nextRune) + } + } + } + case commentState: // in a comment + { + switch nextRuneType { + case eofRuneClass: + { + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case spaceRuneClass: + { + if nextRune == '\n' { + state = startState + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } else { + value = append(value, nextRune) + } + } + default: + { + value = append(value, nextRune) + } + } + } + default: + { + return nil, fmt.Errorf("Unexpected state: %v", state) + } + } + } +} + +// Next returns the next token in the stream. +func (t *Tokenizer) Next() (*Token, error) { + return t.scanStream() +} + +// Split partitions a string into a slice of strings. +func Split(s string) ([]string, error) { + l := NewLexer(strings.NewReader(s)) + subStrings := make([]string, 0) + for { + word, err := l.Next() + if err != nil { + if err == io.EOF { + return subStrings, nil + } + return subStrings, err + } + subStrings = append(subStrings, word) + } +} diff --git a/tools/utils/shlex/shlex_test.go b/tools/utils/shlex/shlex_test.go new file mode 100644 index 000000000..f9f9e0c79 --- /dev/null +++ b/tools/utils/shlex/shlex_test.go @@ -0,0 +1,101 @@ +/* +Copyright 2012 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package shlex + +import ( + "strings" + "testing" +) + +var ( + // one two "three four" "five \"six\"" seven#eight # nine # ten + // eleven 'twelve\' + testString = "one two \"three four\" \"five \\\"six\\\"\" seven#eight # nine # ten\n eleven 'twelve\\' thirteen=13 fourteen/14" +) + +func TestClassifier(t *testing.T) { + classifier := newDefaultClassifier() + tests := map[rune]runeTokenClass{ + ' ': spaceRuneClass, + '"': escapingQuoteRuneClass, + '\'': nonEscapingQuoteRuneClass, + '#': commentRuneClass} + for runeChar, want := range tests { + got := classifier.ClassifyRune(runeChar) + if got != want { + t.Errorf("ClassifyRune(%v) -> %v. Want: %v", runeChar, got, want) + } + } +} + +func TestTokenizer(t *testing.T) { + testInput := strings.NewReader(testString) + expectedTokens := []*Token{ + &Token{WordToken, "one"}, + &Token{WordToken, "two"}, + &Token{WordToken, "three four"}, + &Token{WordToken, "five \"six\""}, + &Token{WordToken, "seven#eight"}, + &Token{CommentToken, " nine # ten"}, + &Token{WordToken, "eleven"}, + &Token{WordToken, "twelve\\"}, + &Token{WordToken, "thirteen=13"}, + &Token{WordToken, "fourteen/14"}} + + tokenizer := NewTokenizer(testInput) + for i, want := range expectedTokens { + got, err := tokenizer.Next() + if err != nil { + t.Error(err) + } + if !got.Equal(want) { + t.Errorf("Tokenizer.Next()[%v] of %q -> %v. Want: %v", i, testString, got, want) + } + } +} + +func TestLexer(t *testing.T) { + testInput := strings.NewReader(testString) + expectedStrings := []string{"one", "two", "three four", "five \"six\"", "seven#eight", "eleven", "twelve\\", "thirteen=13", "fourteen/14"} + + lexer := NewLexer(testInput) + for i, want := range expectedStrings { + got, err := lexer.Next() + if err != nil { + t.Error(err) + } + if got != want { + t.Errorf("Lexer.Next()[%v] of %q -> %v. Want: %v", i, testString, got, want) + } + } +} + +func TestSplit(t *testing.T) { + want := []string{"one", "two", "three four", "five \"six\"", "seven#eight", "eleven", "twelve\\", "thirteen=13", "fourteen/14"} + got, err := Split(testString) + if err != nil { + t.Error(err) + } + if len(want) != len(got) { + t.Errorf("Split(%q) -> %v. Want: %v", testString, got, want) + } + for i := range got { + if got[i] != want[i] { + t.Errorf("Split(%q)[%v] -> %v. Want: %v", testString, i, got[i], want[i]) + } + } +} diff --git a/tools/utils/style/wrapper.go b/tools/utils/style/wrapper.go index a58fbbcc2..6f1687b3f 100644 --- a/tools/utils/style/wrapper.go +++ b/tools/utils/style/wrapper.go @@ -7,7 +7,7 @@ import ( "strconv" "strings" - "github.com/google/shlex" + "kitty/tools/utils/shlex" ) type escape_code interface {