Black Lives Matter. Support the Equal Justice Initiative.

Source file src/bufio/scan.go

Documentation: bufio

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package bufio
     6  
     7  import (
     8  	"bytes"
     9  	"errors"
    10  	"io"
    11  	"unicode/utf8"
    12  )
    13  
    14  // Scanner provides a convenient interface for reading data such as
    15  // a file of newline-delimited lines of text. Successive calls to
    16  // the Scan method will step through the 'tokens' of a file, skipping
    17  // the bytes between the tokens. The specification of a token is
    18  // defined by a split function of type SplitFunc; the default split
    19  // function breaks the input into lines with line termination stripped. Split
    20  // functions are defined in this package for scanning a file into
    21  // lines, bytes, UTF-8-encoded runes, and space-delimited words. The
    22  // client may instead provide a custom split function.
    23  //
    24  // Scanning stops unrecoverably at EOF, the first I/O error, or a token too
    25  // large to fit in the buffer. When a scan stops, the reader may have
    26  // advanced arbitrarily far past the last token. Programs that need more
    27  // control over error handling or large tokens, or must run sequential scans
    28  // on a reader, should use bufio.Reader instead.
    29  //
    30  type Scanner struct {
    31  	r            io.Reader // The reader provided by the client.
    32  	split        SplitFunc // The function to split the tokens.
    33  	maxTokenSize int       // Maximum size of a token; modified by tests.
    34  	token        []byte    // Last token returned by split.
    35  	buf          []byte    // Buffer used as argument to split.
    36  	start        int       // First non-processed byte in buf.
    37  	end          int       // End of data in buf.
    38  	err          error     // Sticky error.
    39  	empties      int       // Count of successive empty tokens.
    40  	scanCalled   bool      // Scan has been called; buffer is in use.
    41  	done         bool      // Scan has finished.
    42  }
    43  
    44  // SplitFunc is the signature of the split function used to tokenize the
    45  // input. The arguments are an initial substring of the remaining unprocessed
    46  // data and a flag, atEOF, that reports whether the Reader has no more data
    47  // to give. The return values are the number of bytes to advance the input
    48  // and the next token to return to the user, if any, plus an error, if any.
    49  //
    50  // Scanning stops if the function returns an error, in which case some of
    51  // the input may be discarded. If that error is ErrFinalToken, scanning
    52  // stops with no error.
    53  //
    54  // Otherwise, the Scanner advances the input. If the token is not nil,
    55  // the Scanner returns it to the user. If the token is nil, the
    56  // Scanner reads more data and continues scanning; if there is no more
    57  // data--if atEOF was true--the Scanner returns. If the data does not
    58  // yet hold a complete token, for instance if it has no newline while
    59  // scanning lines, a SplitFunc can return (0, nil, nil) to signal the
    60  // Scanner to read more data into the slice and try again with a
    61  // longer slice starting at the same point in the input.
    62  //
    63  // The function is never called with an empty data slice unless atEOF
    64  // is true. If atEOF is true, however, data may be non-empty and,
    65  // as always, holds unprocessed text.
    66  type SplitFunc func(data []byte, atEOF bool) (advance int, token []byte, err error)
    67  
    68  // Errors returned by Scanner.
    69  var (
    70  	ErrTooLong         = errors.New("bufio.Scanner: token too long")
    71  	ErrNegativeAdvance = errors.New("bufio.Scanner: SplitFunc returns negative advance count")
    72  	ErrAdvanceTooFar   = errors.New("bufio.Scanner: SplitFunc returns advance count beyond input")
    73  	ErrBadReadCount    = errors.New("bufio.Scanner: Read returned impossible count")
    74  )
    75  
    76  const (
    77  	// MaxScanTokenSize is the maximum size used to buffer a token
    78  	// unless the user provides an explicit buffer with Scanner.Buffer.
    79  	// The actual maximum token size may be smaller as the buffer
    80  	// may need to include, for instance, a newline.
    81  	MaxScanTokenSize = 64 * 1024
    82  
    83  	startBufSize = 4096 // Size of initial allocation for buffer.
    84  )
    85  
    86  // NewScanner returns a new Scanner to read from r.
    87  // The split function defaults to ScanLines.
    88  func NewScanner(r io.Reader) *Scanner {
    89  	return &Scanner{
    90  		r:            r,
    91  		split:        ScanLines,
    92  		maxTokenSize: MaxScanTokenSize,
    93  	}
    94  }
    95  
    96  // Err returns the first non-EOF error that was encountered by the Scanner.
    97  func (s *Scanner) Err() error {
    98  	if s.err == io.EOF {
    99  		return nil
   100  	}
   101  	return s.err
   102  }
   103  
   104  // Bytes returns the most recent token generated by a call to Scan.
   105  // The underlying array may point to data that will be overwritten
   106  // by a subsequent call to Scan. It does no allocation.
   107  func (s *Scanner) Bytes() []byte {
   108  	return s.token
   109  }
   110  
   111  // Text returns the most recent token generated by a call to Scan
   112  // as a newly allocated string holding its bytes.
   113  func (s *Scanner) Text() string {
   114  	return string(s.token)
   115  }
   116  
   117  // ErrFinalToken is a special sentinel error value. It is intended to be
   118  // returned by a Split function to indicate that the token being delivered
   119  // with the error is the last token and scanning should stop after this one.
   120  // After ErrFinalToken is received by Scan, scanning stops with no error.
   121  // The value is useful to stop processing early or when it is necessary to
   122  // deliver a final empty token. One could achieve the same behavior
   123  // with a custom error value but providing one here is tidier.
   124  // See the emptyFinalToken example for a use of this value.
   125  var ErrFinalToken = errors.New("final token")
   126  
   127  // Scan advances the Scanner to the next token, which will then be
   128  // available through the Bytes or Text method. It returns false when the
   129  // scan stops, either by reaching the end of the input or an error.
   130  // After Scan returns false, the Err method will return any error that
   131  // occurred during scanning, except that if it was io.EOF, Err
   132  // will return nil.
   133  // Scan panics if the split function returns too many empty
   134  // tokens without advancing the input. This is a common error mode for
   135  // scanners.
   136  func (s *Scanner) Scan() bool {
   137  	if s.done {
   138  		return false
   139  	}
   140  	s.scanCalled = true
   141  	// Loop until we have a token.
   142  	for {
   143  		// See if we can get a token with what we already have.
   144  		// If we've run out of data but have an error, give the split function
   145  		// a chance to recover any remaining, possibly empty token.
   146  		if s.end > s.start || s.err != nil {
   147  			advance, token, err := s.split(s.buf[s.start:s.end], s.err != nil)
   148  			if err != nil {
   149  				if err == ErrFinalToken {
   150  					s.token = token
   151  					s.done = true
   152  					return true
   153  				}
   154  				s.setErr(err)
   155  				return false
   156  			}
   157  			if !s.advance(advance) {
   158  				return false
   159  			}
   160  			s.token = token
   161  			if token != nil {
   162  				if s.err == nil || advance > 0 {
   163  					s.empties = 0
   164  				} else {
   165  					// Returning tokens not advancing input at EOF.
   166  					s.empties++
   167  					if s.empties > maxConsecutiveEmptyReads {
   168  						panic("bufio.Scan: too many empty tokens without progressing")
   169  					}
   170  				}
   171  				return true
   172  			}
   173  		}
   174  		// We cannot generate a token with what we are holding.
   175  		// If we've already hit EOF or an I/O error, we are done.
   176  		if s.err != nil {
   177  			// Shut it down.
   178  			s.start = 0
   179  			s.end = 0
   180  			return false
   181  		}
   182  		// Must read more data.
   183  		// First, shift data to beginning of buffer if there's lots of empty space
   184  		// or space is needed.
   185  		if s.start > 0 && (s.end == len(s.buf) || s.start > len(s.buf)/2) {
   186  			copy(s.buf, s.buf[s.start:s.end])
   187  			s.end -= s.start
   188  			s.start = 0
   189  		}
   190  		// Is the buffer full? If so, resize.
   191  		if s.end == len(s.buf) {
   192  			// Guarantee no overflow in the multiplication below.
   193  			const maxInt = int(^uint(0) >> 1)
   194  			if len(s.buf) >= s.maxTokenSize || len(s.buf) > maxInt/2 {
   195  				s.setErr(ErrTooLong)
   196  				return false
   197  			}
   198  			newSize := len(s.buf) * 2
   199  			if newSize == 0 {
   200  				newSize = startBufSize
   201  			}
   202  			if newSize > s.maxTokenSize {
   203  				newSize = s.maxTokenSize
   204  			}
   205  			newBuf := make([]byte, newSize)
   206  			copy(newBuf, s.buf[s.start:s.end])
   207  			s.buf = newBuf
   208  			s.end -= s.start
   209  			s.start = 0
   210  		}
   211  		// Finally we can read some input. Make sure we don't get stuck with
   212  		// a misbehaving Reader. Officially we don't need to do this, but let's
   213  		// be extra careful: Scanner is for safe, simple jobs.
   214  		for loop := 0; ; {
   215  			n, err := s.r.Read(s.buf[s.end:len(s.buf)])
   216  			if n < 0 || len(s.buf)-s.end < n {
   217  				s.setErr(ErrBadReadCount)
   218  				break
   219  			}
   220  			s.end += n
   221  			if err != nil {
   222  				s.setErr(err)
   223  				break
   224  			}
   225  			if n > 0 {
   226  				s.empties = 0
   227  				break
   228  			}
   229  			loop++
   230  			if loop > maxConsecutiveEmptyReads {
   231  				s.setErr(io.ErrNoProgress)
   232  				break
   233  			}
   234  		}
   235  	}
   236  }
   237  
   238  // advance consumes n bytes of the buffer. It reports whether the advance was legal.
   239  func (s *Scanner) advance(n int) bool {
   240  	if n < 0 {
   241  		s.setErr(ErrNegativeAdvance)
   242  		return false
   243  	}
   244  	if n > s.end-s.start {
   245  		s.setErr(ErrAdvanceTooFar)
   246  		return false
   247  	}
   248  	s.start += n
   249  	return true
   250  }
   251  
   252  // setErr records the first error encountered.
   253  func (s *Scanner) setErr(err error) {
   254  	if s.err == nil || s.err == io.EOF {
   255  		s.err = err
   256  	}
   257  }
   258  
   259  // Buffer sets the initial buffer to use when scanning and the maximum
   260  // size of buffer that may be allocated during scanning. The maximum
   261  // token size is the larger of max and cap(buf). If max <= cap(buf),
   262  // Scan will use this buffer only and do no allocation.
   263  //
   264  // By default, Scan uses an internal buffer and sets the
   265  // maximum token size to MaxScanTokenSize.
   266  //
   267  // Buffer panics if it is called after scanning has started.
   268  func (s *Scanner) Buffer(buf []byte, max int) {
   269  	if s.scanCalled {
   270  		panic("Buffer called after Scan")
   271  	}
   272  	s.buf = buf[0:cap(buf)]
   273  	s.maxTokenSize = max
   274  }
   275  
   276  // Split sets the split function for the Scanner.
   277  // The default split function is ScanLines.
   278  //
   279  // Split panics if it is called after scanning has started.
   280  func (s *Scanner) Split(split SplitFunc) {
   281  	if s.scanCalled {
   282  		panic("Split called after Scan")
   283  	}
   284  	s.split = split
   285  }
   286  
   287  // Split functions
   288  
   289  // ScanBytes is a split function for a Scanner that returns each byte as a token.
   290  func ScanBytes(data []byte, atEOF bool) (advance int, token []byte, err error) {
   291  	if atEOF && len(data) == 0 {
   292  		return 0, nil, nil
   293  	}
   294  	return 1, data[0:1], nil
   295  }
   296  
   297  var errorRune = []byte(string(utf8.RuneError))
   298  
   299  // ScanRunes is a split function for a Scanner that returns each
   300  // UTF-8-encoded rune as a token. The sequence of runes returned is
   301  // equivalent to that from a range loop over the input as a string, which
   302  // means that erroneous UTF-8 encodings translate to U+FFFD = "\xef\xbf\xbd".
   303  // Because of the Scan interface, this makes it impossible for the client to
   304  // distinguish correctly encoded replacement runes from encoding errors.
   305  func ScanRunes(data []byte, atEOF bool) (advance int, token []byte, err error) {
   306  	if atEOF && len(data) == 0 {
   307  		return 0, nil, nil
   308  	}
   309  
   310  	// Fast path 1: ASCII.
   311  	if data[0] < utf8.RuneSelf {
   312  		return 1, data[0:1], nil
   313  	}
   314  
   315  	// Fast path 2: Correct UTF-8 decode without error.
   316  	_, width := utf8.DecodeRune(data)
   317  	if width > 1 {
   318  		// It's a valid encoding. Width cannot be one for a correctly encoded
   319  		// non-ASCII rune.
   320  		return width, data[0:width], nil
   321  	}
   322  
   323  	// We know it's an error: we have width==1 and implicitly r==utf8.RuneError.
   324  	// Is the error because there wasn't a full rune to be decoded?
   325  	// FullRune distinguishes correctly between erroneous and incomplete encodings.
   326  	if !atEOF && !utf8.FullRune(data) {
   327  		// Incomplete; get more bytes.
   328  		return 0, nil, nil
   329  	}
   330  
   331  	// We have a real UTF-8 encoding error. Return a properly encoded error rune
   332  	// but advance only one byte. This matches the behavior of a range loop over
   333  	// an incorrectly encoded string.
   334  	return 1, errorRune, nil
   335  }
   336  
   337  // dropCR drops a terminal \r from the data.
   338  func dropCR(data []byte) []byte {
   339  	if len(data) > 0 && data[len(data)-1] == '\r' {
   340  		return data[0 : len(data)-1]
   341  	}
   342  	return data
   343  }
   344  
   345  // ScanLines is a split function for a Scanner that returns each line of
   346  // text, stripped of any trailing end-of-line marker. The returned line may
   347  // be empty. The end-of-line marker is one optional carriage return followed
   348  // by one mandatory newline. In regular expression notation, it is `\r?\n`.
   349  // The last non-empty line of input will be returned even if it has no
   350  // newline.
   351  func ScanLines(data []byte, atEOF bool) (advance int, token []byte, err error) {
   352  	if atEOF && len(data) == 0 {
   353  		return 0, nil, nil
   354  	}
   355  	if i := bytes.IndexByte(data, '\n'); i >= 0 {
   356  		// We have a full newline-terminated line.
   357  		return i + 1, dropCR(data[0:i]), nil
   358  	}
   359  	// If we're at EOF, we have a final, non-terminated line. Return it.
   360  	if atEOF {
   361  		return len(data), dropCR(data), nil
   362  	}
   363  	// Request more data.
   364  	return 0, nil, nil
   365  }
   366  
   367  // isSpace reports whether the character is a Unicode white space character.
   368  // We avoid dependency on the unicode package, but check validity of the implementation
   369  // in the tests.
   370  func isSpace(r rune) bool {
   371  	if r <= '\u00FF' {
   372  		// Obvious ASCII ones: \t through \r plus space. Plus two Latin-1 oddballs.
   373  		switch r {
   374  		case ' ', '\t', '\n', '\v', '\f', '\r':
   375  			return true
   376  		case '\u0085', '\u00A0':
   377  			return true
   378  		}
   379  		return false
   380  	}
   381  	// High-valued ones.
   382  	if '\u2000' <= r && r <= '\u200a' {
   383  		return true
   384  	}
   385  	switch r {
   386  	case '\u1680', '\u2028', '\u2029', '\u202f', '\u205f', '\u3000':
   387  		return true
   388  	}
   389  	return false
   390  }
   391  
   392  // ScanWords is a split function for a Scanner that returns each
   393  // space-separated word of text, with surrounding spaces deleted. It will
   394  // never return an empty string. The definition of space is set by
   395  // unicode.IsSpace.
   396  func ScanWords(data []byte, atEOF bool) (advance int, token []byte, err error) {
   397  	// Skip leading spaces.
   398  	start := 0
   399  	for width := 0; start < len(data); start += width {
   400  		var r rune
   401  		r, width = utf8.DecodeRune(data[start:])
   402  		if !isSpace(r) {
   403  			break
   404  		}
   405  	}
   406  	// Scan until space, marking end of word.
   407  	for width, i := 0, start; i < len(data); i += width {
   408  		var r rune
   409  		r, width = utf8.DecodeRune(data[i:])
   410  		if isSpace(r) {
   411  			return i + width, data[start:i], nil
   412  		}
   413  	}
   414  	// If we're at EOF, we have a final, non-empty, non-terminated word. Return it.
   415  	if atEOF && len(data) > start {
   416  		return len(data), data[start:], nil
   417  	}
   418  	// Request more data.
   419  	return start, nil, nil
   420  }
   421  

View as plain text