1 //===--- PPCaching.cpp - Handle caching lexed tokens ----------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements pieces of the Preprocessor interface that manage the
11 // caching of lexed tokens.
13 //===----------------------------------------------------------------------===//
15 #include "clang/Lex/Preprocessor.h"
16 using namespace clang;
18 // EnableBacktrackAtThisPos - From the point that this method is called, and
19 // until CommitBacktrackedTokens() or Backtrack() is called, the Preprocessor
20 // keeps track of the lexed tokens so that a subsequent Backtrack() call will
21 // make the Preprocessor re-lex the same tokens.
23 // Nested backtracks are allowed, meaning that EnableBacktrackAtThisPos can
24 // be called multiple times and CommitBacktrackedTokens/Backtrack calls will
25 // be combined with the EnableBacktrackAtThisPos calls in reverse order.
26 void Preprocessor::EnableBacktrackAtThisPos() {
27 BacktrackPositions.push_back(CachedLexPos);
28 EnterCachingLexMode();
31 // Disable the last EnableBacktrackAtThisPos call.
32 void Preprocessor::CommitBacktrackedTokens() {
33 assert(!BacktrackPositions.empty()
34 && "EnableBacktrackAtThisPos was not called!");
35 BacktrackPositions.pop_back();
38 Preprocessor::CachedTokensRange Preprocessor::LastCachedTokenRange() {
39 assert(isBacktrackEnabled());
40 auto PrevCachedLexPos = BacktrackPositions.back();
41 return CachedTokensRange{PrevCachedLexPos, CachedLexPos};
44 void Preprocessor::EraseCachedTokens(CachedTokensRange TokenRange) {
45 assert(TokenRange.Begin <= TokenRange.End);
46 if (CachedLexPos == TokenRange.Begin && TokenRange.Begin != TokenRange.End) {
47 // We have backtracked to the start of the token range as we want to consume
48 // them again. Erase the tokens only after consuming then.
49 assert(!CachedTokenRangeToErase);
50 CachedTokenRangeToErase = TokenRange;
53 // The cached tokens were committed, so they should be erased now.
54 assert(TokenRange.End == CachedLexPos);
55 CachedTokens.erase(CachedTokens.begin() + TokenRange.Begin,
56 CachedTokens.begin() + TokenRange.End);
57 CachedLexPos = TokenRange.Begin;
61 // Make Preprocessor re-lex the tokens that were lexed since
62 // EnableBacktrackAtThisPos() was previously called.
63 void Preprocessor::Backtrack() {
64 assert(!BacktrackPositions.empty()
65 && "EnableBacktrackAtThisPos was not called!");
66 CachedLexPos = BacktrackPositions.back();
67 BacktrackPositions.pop_back();
68 recomputeCurLexerKind();
71 void Preprocessor::CachingLex(Token &Result) {
72 if (!InCachingLexMode())
75 if (CachedLexPos < CachedTokens.size()) {
76 Result = CachedTokens[CachedLexPos++];
77 // Erase the some of the cached tokens after they are consumed when
79 if (CachedTokenRangeToErase &&
80 CachedTokenRangeToErase->End == CachedLexPos) {
81 EraseCachedTokens(*CachedTokenRangeToErase);
82 CachedTokenRangeToErase = None;
90 if (isBacktrackEnabled()) {
91 // Cache the lexed token.
92 EnterCachingLexMode();
93 CachedTokens.push_back(Result);
98 if (CachedLexPos < CachedTokens.size()) {
99 EnterCachingLexMode();
101 // All cached tokens were consumed.
102 CachedTokens.clear();
107 void Preprocessor::EnterCachingLexMode() {
108 if (InCachingLexMode()) {
109 assert(CurLexerKind == CLK_CachingLexer && "Unexpected lexer kind");
113 PushIncludeMacroStack();
114 CurLexerKind = CLK_CachingLexer;
118 const Token &Preprocessor::PeekAhead(unsigned N) {
119 assert(CachedLexPos + N > CachedTokens.size() && "Confused caching.");
120 ExitCachingLexMode();
121 for (size_t C = CachedLexPos + N - CachedTokens.size(); C > 0; --C) {
122 CachedTokens.push_back(Token());
123 Lex(CachedTokens.back());
125 EnterCachingLexMode();
126 return CachedTokens.back();
129 void Preprocessor::AnnotatePreviousCachedTokens(const Token &Tok) {
130 assert(Tok.isAnnotation() && "Expected annotation token");
131 assert(CachedLexPos != 0 && "Expected to have some cached tokens");
132 assert(CachedTokens[CachedLexPos-1].getLastLoc() == Tok.getAnnotationEndLoc()
133 && "The annotation should be until the most recent cached token");
135 // Start from the end of the cached tokens list and look for the token
136 // that is the beginning of the annotation token.
137 for (CachedTokensTy::size_type i = CachedLexPos; i != 0; --i) {
138 CachedTokensTy::iterator AnnotBegin = CachedTokens.begin() + i-1;
139 if (AnnotBegin->getLocation() == Tok.getLocation()) {
140 assert((BacktrackPositions.empty() || BacktrackPositions.back() <= i) &&
141 "The backtrack pos points inside the annotated tokens!");
142 // Replace the cached tokens with the single annotation token.
143 if (i < CachedLexPos)
144 CachedTokens.erase(AnnotBegin + 1, CachedTokens.begin() + CachedLexPos);
152 bool Preprocessor::IsPreviousCachedToken(const Token &Tok) const {
153 // There's currently no cached token...
157 const Token LastCachedTok = CachedTokens[CachedLexPos - 1];
158 if (LastCachedTok.getKind() != Tok.getKind())
162 if ((!getSourceManager().isInSameSLocAddrSpace(
163 Tok.getLocation(), getLastCachedTokenLocation(), &RelOffset)) ||
170 void Preprocessor::ReplacePreviousCachedToken(ArrayRef<Token> NewToks) {
171 assert(CachedLexPos != 0 && "Expected to have some cached tokens");
172 CachedTokens.insert(CachedTokens.begin() + CachedLexPos - 1, NewToks.begin(),
174 CachedTokens.erase(CachedTokens.begin() + CachedLexPos - 1 + NewToks.size());
175 CachedLexPos += NewToks.size() - 1;