@@ -7,7 +7,7 @@ use rustc_ast::ast::{self, AttrStyle};
77use rustc_ast:: token:: { self , CommentKind , Delimiter , Token , TokenKind } ;
88use rustc_ast:: tokenstream:: TokenStream ;
99use rustc_ast:: util:: unicode:: contains_text_flow_control_chars;
10- use rustc_errors:: { error_code, Applicability , DiagCtxt , Diagnostic , StashKey } ;
10+ use rustc_errors:: { error_code, Applicability , DiagCtxt , DiagnosticBuilder , StashKey } ;
1111use rustc_lexer:: unescape:: { self , EscapeError , Mode } ;
1212use rustc_lexer:: { Base , DocStyle , RawStrError } ;
1313use rustc_lexer:: { Cursor , LiteralKind } ;
@@ -42,12 +42,12 @@ pub struct UnmatchedDelim {
4242 pub candidate_span : Option < Span > ,
4343}
4444
45- pub ( crate ) fn parse_token_trees < ' a > (
46- sess : & ' a ParseSess ,
47- mut src : & ' a str ,
45+ pub ( crate ) fn parse_token_trees < ' sess , ' src > (
46+ sess : & ' sess ParseSess ,
47+ mut src : & ' src str ,
4848 mut start_pos : BytePos ,
4949 override_span : Option < Span > ,
50- ) -> Result < TokenStream , Vec < Diagnostic > > {
50+ ) -> Result < TokenStream , Vec < DiagnosticBuilder < ' sess > > > {
5151 // Skip `#!`, if present.
5252 if let Some ( shebang_len) = rustc_lexer:: strip_shebang ( src) {
5353 src = & src[ shebang_len..] ;
@@ -76,39 +76,39 @@ pub(crate) fn parse_token_trees<'a>(
7676 let mut buffer = Vec :: with_capacity ( 1 ) ;
7777 for unmatched in unmatched_delims {
7878 if let Some ( err) = make_unclosed_delims_error ( unmatched, sess) {
79- err . buffer ( & mut buffer ) ;
79+ buffer. push ( err ) ;
8080 }
8181 }
8282 if let Err ( errs) = res {
8383 // Add unclosing delimiter or diff marker errors
8484 for err in errs {
85- err . buffer ( & mut buffer ) ;
85+ buffer. push ( err ) ;
8686 }
8787 }
8888 Err ( buffer)
8989 }
9090 }
9191}
9292
93- struct StringReader < ' a > {
94- sess : & ' a ParseSess ,
93+ struct StringReader < ' sess , ' src > {
94+ sess : & ' sess ParseSess ,
9595 /// Initial position, read-only.
9696 start_pos : BytePos ,
9797 /// The absolute offset within the source_map of the current character.
9898 pos : BytePos ,
9999 /// Source text to tokenize.
100- src : & ' a str ,
100+ src : & ' src str ,
101101 /// Cursor for getting lexer tokens.
102- cursor : Cursor < ' a > ,
102+ cursor : Cursor < ' src > ,
103103 override_span : Option < Span > ,
104104 /// When a "unknown start of token: \u{a0}" has already been emitted earlier
105105 /// in this file, it's safe to treat further occurrences of the non-breaking
106106 /// space character as whitespace.
107107 nbsp_is_whitespace : bool ,
108108}
109109
110- impl < ' a > StringReader < ' a > {
111- pub fn dcx ( & self ) -> & ' a DiagCtxt {
110+ impl < ' sess , ' src > StringReader < ' sess , ' src > {
111+ pub fn dcx ( & self ) -> & ' sess DiagCtxt {
112112 & self . sess . dcx
113113 }
114114
@@ -526,7 +526,7 @@ impl<'a> StringReader<'a> {
526526
527527 /// Slice of the source text from `start` up to but excluding `self.pos`,
528528 /// meaning the slice does not include the character `self.ch`.
529- fn str_from ( & self , start : BytePos ) -> & ' a str {
529+ fn str_from ( & self , start : BytePos ) -> & ' src str {
530530 self . str_from_to ( start, self . pos )
531531 }
532532
@@ -537,12 +537,12 @@ impl<'a> StringReader<'a> {
537537 }
538538
539539 /// Slice of the source text spanning from `start` up to but excluding `end`.
540- fn str_from_to ( & self , start : BytePos , end : BytePos ) -> & ' a str {
540+ fn str_from_to ( & self , start : BytePos , end : BytePos ) -> & ' src str {
541541 & self . src [ self . src_index ( start) ..self . src_index ( end) ]
542542 }
543543
544544 /// Slice of the source text spanning from `start` until the end
545- fn str_from_to_end ( & self , start : BytePos ) -> & ' a str {
545+ fn str_from_to_end ( & self , start : BytePos ) -> & ' src str {
546546 & self . src [ self . src_index ( start) ..]
547547 }
548548
0 commit comments