@@ -19,12 +19,13 @@ use rustc_lint_defs::BuiltinLintDiag;
19
19
use rustc_lint_defs::builtin::{
20
20
RUST_2021_INCOMPATIBLE_OR_PATTERNS, SEMICOLON_IN_EXPRESSIONS_FROM_MACROS,
21
21
};
22
- use rustc_parse::parser::{ParseNtResult, Parser, Recovery};
22
+ use rustc_parse::exp;
23
+ use rustc_parse::parser::{Parser, Recovery};
23
24
use rustc_session::Session;
24
25
use rustc_session::parse::ParseSess;
25
26
use rustc_span::edition::Edition;
26
27
use rustc_span::hygiene::Transparency;
27
- use rustc_span::{Ident, MacroRulesNormalizedIdent, Span, kw, sym};
28
+ use rustc_span::{Ident, Span, kw, sym};
28
29
use tracing::{debug, instrument, trace, trace_span};
29
30
30
31
use super::macro_parser::{NamedMatches, NamedParseResult};
@@ -34,8 +35,6 @@ use crate::base::{
34
35
SyntaxExtensionKind, TTMacroExpander,
35
36
};
36
37
use crate::expand::{AstFragment, AstFragmentKind, ensure_complete_parse, parse_ast_fragment};
37
- use crate::mbe::diagnostics::{annotate_doc_comment, parse_failure_msg};
38
- use crate::mbe::macro_parser::NamedMatch::*;
39
38
use crate::mbe::macro_parser::{Error, ErrorReported, Failure, MatcherLoc, Success, TtParser};
40
39
use crate::mbe::transcribe::transcribe;
41
40
use crate::mbe::{self, KleeneOp, macro_check};
@@ -168,11 +167,6 @@ pub(super) trait Tracker<'matcher> {
168
167
fn recovery() -> Recovery {
169
168
Recovery::Forbidden
170
169
}
171
-
172
- fn set_expected_token(&mut self, _tok: &'matcher Token) {}
173
- fn get_expected_token(&self) -> Option<&'matcher Token> {
174
- None
175
- }
176
170
}
177
171
178
172
/// A noop tracker that is used in the hot path of the expansion, has zero overhead thanks to
@@ -360,11 +354,6 @@ pub(super) fn try_match_macro<'matcher, T: Tracker<'matcher>>(
360
354
Err(CanRetry::Yes)
361
355
}
362
356
363
- // Note that macro-by-example's input is also matched against a token tree:
364
- // $( $lhs:tt => $rhs:tt );+
365
- //
366
- // Holy self-referential!
367
-
368
357
/// Converts a macro item into a syntax extension.
369
358
pub fn compile_declarative_macro(
370
359
sess: &Session,
@@ -390,157 +379,66 @@ pub fn compile_declarative_macro(
390
379
};
391
380
let dummy_syn_ext = |guar| (mk_syn_ext(Arc::new(DummyExpander(guar))), Vec::new());
392
381
393
- let lhs_nm = Ident::new(sym::lhs, span);
394
- let rhs_nm = Ident::new(sym::rhs, span);
395
- let tt_spec = NonterminalKind::TT;
396
382
let macro_rules = macro_def.macro_rules;
383
+ let exp_sep = if macro_rules { exp!(Semi) } else { exp!(Comma) };
397
384
398
- // Parse the macro_rules! invocation
399
-
400
- // The pattern that macro_rules matches.
401
- // The grammar for macro_rules! is:
402
- // $( $lhs:tt => $rhs:tt );+
403
- // ...quasiquoting this would be nice.
404
- // These spans won't matter, anyways
405
- let argument_gram = vec![
406
- mbe::TokenTree::Sequence(
407
- DelimSpan::dummy(),
408
- mbe::SequenceRepetition {
409
- tts: vec![
410
- mbe::TokenTree::MetaVarDecl { span, name: lhs_nm, kind: tt_spec },
411
- mbe::TokenTree::token(token::FatArrow, span),
412
- mbe::TokenTree::MetaVarDecl { span, name: rhs_nm, kind: tt_spec },
413
- ],
414
- separator: Some(Token::new(
415
- if macro_rules { token::Semi } else { token::Comma },
416
- span,
417
- )),
418
- kleene: mbe::KleeneToken::new(mbe::KleeneOp::OneOrMore, span),
419
- num_captures: 2,
420
- },
421
- ),
422
- // to phase into semicolon-termination instead of semicolon-separation
423
- mbe::TokenTree::Sequence(
424
- DelimSpan::dummy(),
425
- mbe::SequenceRepetition {
426
- tts: vec![mbe::TokenTree::token(
427
- if macro_rules { token::Semi } else { token::Comma },
428
- span,
429
- )],
430
- separator: None,
431
- kleene: mbe::KleeneToken::new(mbe::KleeneOp::ZeroOrMore, span),
432
- num_captures: 0,
433
- },
434
- ),
435
- ];
436
- // Convert it into `MatcherLoc` form.
437
- let argument_gram = mbe::macro_parser::compute_locs(&argument_gram);
438
-
439
- let create_parser = || {
440
- let body = macro_def.body.tokens.clone();
441
- Parser::new(&sess.psess, body, rustc_parse::MACRO_ARGUMENTS)
442
- };
443
-
444
- let parser = create_parser();
445
- let mut tt_parser =
446
- TtParser::new(Ident::with_dummy_span(if macro_rules { kw::MacroRules } else { kw::Macro }));
447
- let argument_map =
448
- match tt_parser.parse_tt(&mut Cow::Owned(parser), &argument_gram, &mut NoopTracker) {
449
- Success(m) => m,
450
- Failure(()) => {
451
- debug!("failed to parse macro tt");
452
- // The fast `NoopTracker` doesn't have any info on failure, so we need to retry it
453
- // with another one that gives us the information we need.
454
- // For this we need to reclone the macro body as the previous parser consumed it.
455
- let retry_parser = create_parser();
456
-
457
- let mut track = diagnostics::FailureForwarder::new();
458
- let parse_result =
459
- tt_parser.parse_tt(&mut Cow::Owned(retry_parser), &argument_gram, &mut track);
460
- let Failure((token, _, msg)) = parse_result else {
461
- unreachable!("matcher returned something other than Failure after retry");
462
- };
463
-
464
- let s = parse_failure_msg(&token, track.get_expected_token());
465
- let sp = token.span.substitute_dummy(span);
466
- let mut err = sess.dcx().struct_span_err(sp, s);
467
- err.span_label(sp, msg);
468
- annotate_doc_comment(&mut err, sess.source_map(), sp);
469
- let guar = err.emit();
470
- return dummy_syn_ext(guar);
471
- }
472
- Error(sp, msg) => {
473
- let guar = sess.dcx().span_err(sp.substitute_dummy(span), msg);
474
- return dummy_syn_ext(guar);
475
- }
476
- ErrorReported(guar) => {
477
- return dummy_syn_ext(guar);
478
- }
479
- };
385
+ let body = macro_def.body.tokens.clone();
386
+ let mut p = Parser::new(&sess.psess, body, rustc_parse::MACRO_ARGUMENTS);
480
387
388
+ // Don't abort iteration early, so that multiple errors can be reported.
481
389
let mut guar = None;
482
390
let mut check_emission = |ret: Result<(), ErrorGuaranteed>| guar = guar.or(ret.err());
483
391
484
- // Extract the arguments:
485
- let lhses = match &argument_map[&MacroRulesNormalizedIdent::new(lhs_nm)] {
486
- MatchedSeq(s) => s
487
- .iter()
488
- .map(|m| {
489
- if let MatchedSingle(ParseNtResult::Tt(tt)) = m {
490
- let tt = mbe::quoted::parse(
491
- &TokenStream::new(vec![tt.clone()]),
492
- true,
493
- sess,
494
- node_id,
495
- features,
496
- edition,
497
- )
498
- .pop()
499
- .unwrap();
500
- // We don't handle errors here, the driver will abort
501
- // after parsing/expansion. We can report every error in every macro this way.
502
- check_emission(check_lhs_nt_follows(sess, node_id, &tt));
503
- return tt;
504
- }
505
- sess.dcx().span_bug(span, "wrong-structured lhs")
506
- })
507
- .collect::<Vec<mbe::TokenTree>>(),
508
- _ => sess.dcx().span_bug(span, "wrong-structured lhs"),
509
- };
392
+ let mut lhses = Vec::new();
393
+ let mut rhses = Vec::new();
510
394
511
- let rhses = match &argument_map[&MacroRulesNormalizedIdent::new(rhs_nm)] {
512
- MatchedSeq(s) => s
513
- .iter()
514
- .map(|m| {
515
- if let MatchedSingle(ParseNtResult::Tt(tt)) = m {
516
- return mbe::quoted::parse(
517
- &TokenStream::new(vec![tt.clone()]),
518
- false,
519
- sess,
520
- node_id,
521
- features,
522
- edition,
523
- )
524
- .pop()
525
- .unwrap();
526
- }
527
- sess.dcx().span_bug(span, "wrong-structured rhs")
528
- })
529
- .collect::<Vec<mbe::TokenTree>>(),
530
- _ => sess.dcx().span_bug(span, "wrong-structured rhs"),
531
- };
532
-
533
- for rhs in &rhses {
534
- check_emission(check_rhs(sess, rhs));
395
+ while p.token != token::Eof {
396
+ let lhs_tt = p.parse_token_tree();
397
+ let lhs_tt = mbe::quoted::parse(
398
+ &TokenStream::new(vec![lhs_tt]),
399
+ true, // LHS
400
+ sess,
401
+ node_id,
402
+ features,
403
+ edition,
404
+ )
405
+ .pop()
406
+ .unwrap();
407
+ // We don't handle errors here, the driver will abort after parsing/expansion. We can
408
+ // report every error in every macro this way.
409
+ check_emission(check_lhs_nt_follows(sess, node_id, &lhs_tt));
410
+ check_emission(check_lhs_no_empty_seq(sess, slice::from_ref(&lhs_tt)));
411
+ if let Err(e) = p.expect(exp!(FatArrow)) {
412
+ return dummy_syn_ext(e.emit());
413
+ }
414
+ let rhs_tt = p.parse_token_tree();
415
+ let rhs_tt = mbe::quoted::parse(
416
+ &TokenStream::new(vec![rhs_tt]),
417
+ false, // RHS
418
+ sess,
419
+ node_id,
420
+ features,
421
+ edition,
422
+ )
423
+ .pop()
424
+ .unwrap();
425
+ check_emission(check_rhs(sess, &rhs_tt));
426
+ check_emission(macro_check::check_meta_variables(&sess.psess, node_id, &lhs_tt, &rhs_tt));
427
+ lhses.push(lhs_tt);
428
+ rhses.push(rhs_tt);
429
+ if p.token == token::Eof {
430
+ break;
431
+ }
432
+ if let Err(e) = p.expect(exp_sep) {
433
+ return dummy_syn_ext(e.emit());
434
+ }
535
435
}
536
436
537
- // Don't abort iteration early, so that errors for multiple lhses can be reported.
538
- for lhs in &lhses {
539
- check_emission(check_lhs_no_empty_seq(sess, slice::from_ref(lhs)) );
437
+ if lhses.is_empty() {
438
+ let guar = sess.dcx().span_err(span, "macros must contain at least one rule");
439
+ return dummy_syn_ext(guar );
540
440
}
541
441
542
- check_emission(macro_check::check_meta_variables(&sess.psess, node_id, span, &lhses, &rhses));
543
-
544
442
let transparency = find_attr!(attrs, AttributeKind::MacroTransparency(x) => *x)
545
443
.unwrap_or(Transparency::fallback(macro_rules));
546
444
0 commit comments