Implemented support for # type: ignore comments.

This commit is contained in:
Eric Traut 2019-09-26 19:20:07 -07:00
parent e221d413b0
commit e0e87b3eb7
18 changed files with 191 additions and 57 deletions

View File

@ -11,8 +11,9 @@
import * as fs from 'fs';
import { ConfigOptions, ExecutionEnvironment } from '../common/configOptions';
import { combinePaths, ensureTrailingDirectorySeparator, getDirectoryPath, getFileExtension, getFileSystemEntries,
getPathComponents, isDirectory, isFile, stripFileExtension, stripTrailingDirectorySeparator } from '../common/pathUtils';
import { combinePaths, ensureTrailingDirectorySeparator, getDirectoryPath,
getFileExtension, getFileSystemEntries, getPathComponents, isDirectory,
isFile, stripFileExtension, stripTrailingDirectorySeparator } from '../common/pathUtils';
import { versionToString } from '../common/pythonVersion';
import * as StringUtils from '../common/stringUtils';
import { ImplicitImport, ImportResult, ImportType } from './importResult';

View File

@ -87,7 +87,7 @@ export function getTextEditsForAutoImportSymbolAddition(symbolName: string,
if (priorImport) {
const insertionOffset = TextRange.getEnd(priorImport);
const insertionPosition = convertOffsetToPosition(insertionOffset, parseResults.lines);
const insertionPosition = convertOffsetToPosition(insertionOffset, parseResults.tokenizerOutput.lines);
textEditList.push({
range: { start: insertionPosition, end: insertionPosition },
@ -125,7 +125,7 @@ export function getTextEditsForAutoImportInsertion(symbolName: string, importSta
if (importType < curImportType) {
if (!insertBefore && prevImportType < importType) {
// Add an extra line to create a new group.
newImportStatement = parseResults.predominantLineEndSequence + newImportStatement;
newImportStatement = parseResults.tokenizerOutput.predominantEndOfLineSequence + newImportStatement;
}
break;
}
@ -139,7 +139,7 @@ export function getTextEditsForAutoImportInsertion(symbolName: string, importSta
if (curImport.followsNonImportStatement) {
if (importType > prevImportType) {
// Add an extra line to create a new group.
newImportStatement = parseResults.predominantLineEndSequence + newImportStatement;
newImportStatement = parseResults.tokenizerOutput.predominantEndOfLineSequence + newImportStatement;
}
break;
}
@ -149,7 +149,7 @@ export function getTextEditsForAutoImportInsertion(symbolName: string, importSta
if (importType > curImportType) {
// Add an extra line to create a new group.
newImportStatement = parseResults.predominantLineEndSequence + newImportStatement;
newImportStatement = parseResults.tokenizerOutput.predominantEndOfLineSequence + newImportStatement;
}
}
@ -166,14 +166,14 @@ export function getTextEditsForAutoImportInsertion(symbolName: string, importSta
if (insertionImport) {
if (insertBefore) {
newImportStatement = newImportStatement + parseResults.predominantLineEndSequence;
newImportStatement = newImportStatement + parseResults.tokenizerOutput.predominantEndOfLineSequence;
} else {
newImportStatement = parseResults.predominantLineEndSequence + newImportStatement;
newImportStatement = parseResults.tokenizerOutput.predominantEndOfLineSequence + newImportStatement;
}
insertionPosition = convertOffsetToPosition(
insertBefore ? insertionImport.node.start : TextRange.getEnd(insertionImport.node),
parseResults.lines);
parseResults.tokenizerOutput.lines);
} else {
insertionPosition = { line: 0, column: 0 };
}
@ -203,24 +203,24 @@ export function getTextEditsForAutoImportInsertion(symbolName: string, importSta
if (stopHere) {
insertionPosition = convertOffsetToPosition(statement.start,
parseResults.lines);
parseResults.tokenizerOutput.lines);
addNewLineBefore = false;
break;
} else {
insertionPosition = convertOffsetToPosition(
statement.start + statement.length,
parseResults.lines);
parseResults.tokenizerOutput.lines);
addNewLineBefore = true;
}
}
newImportStatement += parseResults.predominantLineEndSequence +
parseResults.predominantLineEndSequence;
newImportStatement += parseResults.tokenizerOutput.predominantEndOfLineSequence +
parseResults.tokenizerOutput.predominantEndOfLineSequence;
if (addNewLineBefore) {
newImportStatement = parseResults.predominantLineEndSequence + newImportStatement;
newImportStatement = parseResults.tokenizerOutput.predominantEndOfLineSequence + newImportStatement;
} else {
newImportStatement += parseResults.predominantLineEndSequence;
newImportStatement += parseResults.tokenizerOutput.predominantEndOfLineSequence;
}
}

View File

@ -211,6 +211,21 @@ export class SourceFile {
this._analysisJob.bindDiagnostics,
this._analysisJob.typeAnalysisFinalDiagnostics);
// Filter the diagnostics based on "type: ignore" lines.
const typeIgnoreLines = this._analysisJob.parseResults ?
this._analysisJob.parseResults.tokenizerOutput.typeIgnoreLines : {};
if (Object.keys(typeIgnoreLines).length > 0) {
diagList = diagList.filter(d => {
for (let line = d.range.start.line; line <= d.range.end.line; line++) {
if (typeIgnoreLines[line]) {
return false;
}
}
return true;
});
}
if (options.diagnosticSettings.reportImportCycles !== 'none' && this._analysisJob.circularDependencies.length > 0) {
const category = options.diagnosticSettings.reportImportCycles === 'warning' ?
DiagnosticCategory.Warning : DiagnosticCategory.Error;
@ -247,6 +262,12 @@ export class SourceFile {
diagList = [];
}
// If there is a "type: ignore" comment at the top of the file, clear
// the diagnostic list.
if (this._analysisJob.parseResults && this._analysisJob.parseResults.tokenizerOutput.typeIgnoreAll) {
diagList = [];
}
return diagList;
}
@ -431,6 +452,7 @@ export class SourceFile {
// Parse the token stream, building the abstract syntax tree.
const parser = new Parser();
const parseResults = parser.parseSourceFile(fileContents!, parseOptions, diagSink);
assert(parseResults !== undefined && parseResults.tokenizerOutput !== undefined);
this._analysisJob.parseResults = parseResults;
// Resolve imports.
@ -445,7 +467,7 @@ export class SourceFile {
strictFileSpec => strictFileSpec.regExp.test(this._filePath)) !== undefined;
this._analysisJob.diagnosticSettings = CommentUtils.getFileLevelDirectives(
this._analysisJob.parseResults.tokens, configOptions.diagnosticSettings,
this._analysisJob.parseResults.tokenizerOutput.tokens, configOptions.diagnosticSettings,
useStrict);
} catch (e) {
const message: string = (e.stack ? e.stack.toString() : undefined) ||
@ -459,10 +481,14 @@ export class SourceFile {
parseTree: ModuleNode.create({ start: 0, length: 0 }),
importedModules: [],
futureImports: new StringMap<boolean>(),
tokens: new TextRangeCollection<Token>([]),
lines: new TextRangeCollection<TextRange>([]),
predominantLineEndSequence: '\n',
predominantTabSequence: ' '
tokenizerOutput: {
tokens: new TextRangeCollection<Token>([]),
lines: new TextRangeCollection<TextRange>([]),
typeIgnoreAll: false,
typeIgnoreLines: {},
predominantEndOfLineSequence: '\n',
predominantTabSequence: ' '
}
};
this._analysisJob.imports = undefined;
this._analysisJob.builtinsImport = undefined;
@ -719,7 +745,7 @@ export class SourceFile {
private _buildFileInfo(configOptions: ConfigOptions, importMap?: ImportMap, builtinsScope?: Scope) {
assert(this._analysisJob.parseResults !== undefined);
const analysisDiagnostics = new TextRangeDiagnosticSink(this._analysisJob.parseResults!.lines);
const analysisDiagnostics = new TextRangeDiagnosticSink(this._analysisJob.parseResults!.tokenizerOutput.lines);
const fileInfo: AnalyzerFileInfo = {
importMap: importMap || {},
@ -729,7 +755,7 @@ export class SourceFile {
diagnosticSink: analysisDiagnostics,
executionEnvironment: configOptions.findExecEnvironment(this._filePath),
diagnosticSettings: this._analysisJob.diagnosticSettings,
lines: this._analysisJob.parseResults!.lines,
lines: this._analysisJob.parseResults!.tokenizerOutput.lines,
filePath: this._filePath,
isStubFile: this._isStubFile,
isTypingStubFile: this._isTypingStubFile,

View File

@ -120,8 +120,8 @@ export class TypeStubWriter extends ParseTreeWalker {
write() {
const parseResults = this._sourceFile.getParseResults()!;
this._lineEnd = parseResults.predominantLineEndSequence;
this._tab = parseResults.predominantTabSequence;
this._lineEnd = parseResults.tokenizerOutput.predominantEndOfLineSequence;
this._tab = parseResults.tokenizerOutput.predominantTabSequence;
this.walk(parseResults.parseTree);

View File

@ -179,7 +179,7 @@ export class CompletionProvider {
}
getCompletionsForPosition(): CompletionList | undefined {
const offset = convertPositionToOffset(this._position, this._parseResults.lines);
const offset = convertPositionToOffset(this._position, this._parseResults.tokenizerOutput.lines);
if (offset === undefined) {
return undefined;
}
@ -218,7 +218,7 @@ export class CompletionProvider {
}
// Get the text on that line prior to the insertion point.
const lineTextRange = this._parseResults.lines.getItemAt(this._position.line);
const lineTextRange = this._parseResults.tokenizerOutput.lines.getItemAt(this._position.line);
const textOnLine = this._fileContents.substr(lineTextRange.start, lineTextRange.length);
const priorText = textOnLine.substr(0, this._position.column);
const priorWordIndex = priorText.search(/\w+$/);
@ -303,12 +303,12 @@ export class CompletionProvider {
}
private _isWithinCommentOrString(offset: number, priorText: string): boolean {
const tokenIndex = this._parseResults.tokens.getItemAtPosition(offset);
const tokenIndex = this._parseResults.tokenizerOutput.tokens.getItemAtPosition(offset);
if (tokenIndex < 0) {
return false;
}
const token = this._parseResults.tokens.getItemAt(tokenIndex);
const token = this._parseResults.tokenizerOutput.tokens.getItemAt(tokenIndex);
if (token.type === TokenType.String) {
return true;

View File

@ -30,7 +30,7 @@ export class DefinitionProvider {
static getDefinitionsForPosition(parseResults: ParseResults,
position: DiagnosticTextPosition): DocumentTextRange[] | undefined {
const offset = convertPositionToOffset(position, parseResults.lines);
const offset = convertPositionToOffset(position, parseResults.tokenizerOutput.lines);
if (offset === undefined) {
return undefined;
}

View File

@ -36,7 +36,7 @@ export class HoverProvider {
static getHoverForPosition(parseResults: ParseResults, position: DiagnosticTextPosition,
importMap: ImportMap): HoverResults | undefined {
const offset = convertPositionToOffset(position, parseResults.lines);
const offset = convertPositionToOffset(position, parseResults.tokenizerOutput.lines);
if (offset === undefined) {
return undefined;
}
@ -49,8 +49,8 @@ export class HoverProvider {
const results: HoverResults = {
parts: [],
range: {
start: convertOffsetToPosition(node.start, parseResults.lines),
end: convertOffsetToPosition(TextRange.getEnd(node), parseResults.lines)
start: convertOffsetToPosition(node.start, parseResults.tokenizerOutput.lines),
end: convertOffsetToPosition(TextRange.getEnd(node), parseResults.tokenizerOutput.lines)
}
};

View File

@ -107,9 +107,9 @@ export class ImportSorter {
const lastStatement = statements[statementLimit - 1].node;
return {
start: convertOffsetToPosition(
statements[0].node.start, this._parseResults.lines),
statements[0].node.start, this._parseResults.tokenizerOutput.lines),
end: convertOffsetToPosition(
TextRange.getEnd(lastStatement), this._parseResults.lines)
TextRange.getEnd(lastStatement), this._parseResults.tokenizerOutput.lines)
};
}
@ -134,10 +134,10 @@ export class ImportSorter {
range: {
start: convertOffsetToPosition(
statements[secondaryBlockStart].node.start,
this._parseResults.lines),
this._parseResults.tokenizerOutput.lines),
end: convertOffsetToPosition(
TextRange.getEnd(statements[secondaryBlockLimit - 1].node),
this._parseResults.lines)
this._parseResults.tokenizerOutput.lines)
},
replacementText: ''
});
@ -157,7 +157,7 @@ export class ImportSorter {
// Insert a blank space between import type groups.
const curImportType = this._getImportGroup(statement);
if (prevImportGroup !== curImportType) {
importText += this._parseResults.predominantLineEndSequence;
importText += this._parseResults.tokenizerOutput.predominantEndOfLineSequence;
prevImportGroup = curImportType;
}
@ -172,7 +172,7 @@ export class ImportSorter {
// If this isn't the last statement, add a newline.
if (statement !== sortedStatements[sortedStatements.length - 1]) {
importLine += this._parseResults.predominantLineEndSequence;
importLine += this._parseResults.tokenizerOutput.predominantEndOfLineSequence;
}
importText += importLine;
@ -213,7 +213,7 @@ export class ImportSorter {
let nextSymbolIndex = 0;
while (nextSymbolIndex < symbols.length) {
let curTextLine = this._parseResults.predominantTabSequence + symbols[nextSymbolIndex];
let curTextLine = this._parseResults.tokenizerOutput.predominantTabSequence + symbols[nextSymbolIndex];
if (nextSymbolIndex < symbols.length - 1) {
curTextLine += ',';
} else {
@ -245,7 +245,7 @@ export class ImportSorter {
cumulativeText += curTextLine;
if (nextSymbolIndex < symbols.length) {
cumulativeText += this._parseResults.predominantLineEndSequence;
cumulativeText += this._parseResults.tokenizerOutput.predominantEndOfLineSequence;
}
}

View File

@ -55,9 +55,9 @@ function _addMissingOptionalToParam(parseResults: ParseResults,
const editActions: TextEditAction[] = [];
const startPos = convertOffsetToPosition(
node.typeAnnotation.start, parseResults.lines);
node.typeAnnotation.start, parseResults.tokenizerOutput.lines);
const endPos = convertOffsetToPosition(
TextRange.getEnd(node.typeAnnotation), parseResults.lines);
TextRange.getEnd(node.typeAnnotation), parseResults.tokenizerOutput.lines);
editActions.push({
range: { start: startPos, end: startPos },

View File

@ -57,8 +57,8 @@ class FindReferencesTreeWalker extends ParseTreeWalker {
this._referencesResult.locations.push({
path: this._filePath,
range: {
start: convertOffsetToPosition(node.start, this._parseResults.lines),
end: convertOffsetToPosition(TextRange.getEnd(node), this._parseResults.lines)
start: convertOffsetToPosition(node.start, this._parseResults.tokenizerOutput.lines),
end: convertOffsetToPosition(TextRange.getEnd(node), this._parseResults.tokenizerOutput.lines)
}
});
}
@ -79,7 +79,7 @@ export class ReferencesProvider {
position: DiagnosticTextPosition, includeDeclaration: boolean):
ReferencesResult | undefined {
const offset = convertPositionToOffset(position, parseResults.lines);
const offset = convertPositionToOffset(position, parseResults.tokenizerOutput.lines);
if (offset === undefined) {
return undefined;
}

View File

@ -42,7 +42,7 @@ export class SignatureHelpProvider {
static getSignatureHelpForPosition(parseResults: ParseResults, fileContents: string,
position: DiagnosticTextPosition): SignatureHelpResults | undefined {
const offset = convertPositionToOffset(position, parseResults.lines);
const offset = convertPositionToOffset(position, parseResults.tokenizerOutput.lines);
if (offset === undefined) {
return undefined;
}

View File

@ -62,10 +62,7 @@ export interface ParseResults {
parseTree: ModuleNode;
importedModules: ModuleImport[];
futureImports: StringMap<boolean>;
tokens: TextRangeCollection<Token>;
lines: TextRangeCollection<TextRange>;
predominantLineEndSequence: string;
predominantTabSequence: string;
tokenizerOutput: TokenizerOutput;
}
export interface ParseExpressionTextResults {
@ -130,14 +127,12 @@ export class Parser {
}
});
assert(this._tokenizerOutput !== undefined);
return {
parseTree: moduleNode,
importedModules: this._importedModules,
futureImports: this._futureImportMap,
tokens: this._tokenizerOutput!.tokens,
lines: this._tokenizerOutput!.lines,
predominantLineEndSequence: this._tokenizerOutput!.predominantEndOfLineSequence,
predominantTabSequence: this._tokenizerOutput!.predominantTabSequence
tokenizerOutput: this._tokenizerOutput!
};
}
@ -2385,7 +2380,7 @@ export class Parser {
const interTokenContents = this._fileContents!.substring(
curToken.start + curToken.length, nextToken.start);
const commentRegEx = /^(\s*#\s*type:\s*)([^\r\n]*)/;
const commentRegEx = /^(\s*#\s*type\:\s*)([^\r\n]*)/;
const match = interTokenContents.match(commentRegEx);
if (!match) {
return undefined;
@ -2393,6 +2388,12 @@ export class Parser {
// Synthesize a string token and StringNode.
const typeString = match[2];
// Ignore all "ignore" comments.
if (typeString.trim().match(/^ignore(\s|$)'/)) {
return undefined;
}
const tokenOffset = curToken.start + curToken.length + match[1].length;
const stringToken = StringToken.create(tokenOffset,
typeString.length, StringTokenFlags.None, typeString, 0, undefined);

View File

@ -107,9 +107,22 @@ const _operatorInfo: { [key: number]: OperatorFlags } = {
const _byteOrderMarker = 0xFEFF;
export interface TokenizerOutput {
// List of all tokens.
tokens: TextRangeCollection<Token>;
// List of ranges that comprise the lines.
lines: TextRangeCollection<TextRange>;
// Map of all line numbers that end in a "type: ignore" comment.
typeIgnoreLines: { [line: number]: boolean };
// Program starts with a "type: ignore" comment.
typeIgnoreAll: boolean;
// Line-end sequence ('/n', '/r', or '/r/n').
predominantEndOfLineSequence: string;
// Tab sequence ('/t or consecutaive spaces).
predominantTabSequence: string;
}
@ -125,6 +138,8 @@ export class Tokenizer {
private _parenDepth = 0;
private _lineRanges: TextRange[] = [];
private _indentAmounts: number[] = [];
private _typeIgnoreAll = false;
private _typeIgnoreLines: { [line: number]: boolean } = {};
private _comments: Comment[] | undefined;
// Total times CR, CR/LF, and LF are used to terminate
@ -221,6 +236,8 @@ export class Tokenizer {
return {
tokens: new TextRangeCollection(this._tokens),
lines: new TextRangeCollection(this._lineRanges),
typeIgnoreLines: this._typeIgnoreLines,
typeIgnoreAll: this._typeIgnoreAll,
predominantEndOfLineSequence,
predominantTabSequence
};
@ -792,6 +809,14 @@ export class Tokenizer {
const value = this._cs.getText().substr(start, length);
const comment = Comment.create(start, length, value);
if (value.match(/^\s*type\:\s*ignore(\s|$)/)) {
if (this._tokens.findIndex(t => t.type !== TokenType.NewLine && t && t.type !== TokenType.Indent) < 0) {
this._typeIgnoreAll = true;
} else {
this._typeIgnoreLines[this._lineRanges.length] = true;
}
}
if (this._comments) {
this._comments.push(comment);
} else {

View File

@ -0,0 +1,10 @@
# This sample tests the type: ignore for the entire file.
# type: ignore
# The "type: ignore" should suppress these errors.
a: int = 3
b = len(a)
for for for

View File

@ -74,7 +74,7 @@ export function parseSampleFile(fileName: string, diagSink: DiagnosticSink,
export function buildAnalyzerFileInfo(filePath: string, parseResults: ParseResults,
configOptions: ConfigOptions): AnalyzerFileInfo {
const analysisDiagnostics = new TextRangeDiagnosticSink(parseResults.lines);
const analysisDiagnostics = new TextRangeDiagnosticSink(parseResults.tokenizerOutput.lines);
const fileInfo: AnalyzerFileInfo = {
importMap: {},
@ -83,7 +83,7 @@ export function buildAnalyzerFileInfo(filePath: string, parseResults: ParseResul
diagnosticSink: analysisDiagnostics,
executionEnvironment: configOptions.findExecEnvironment(filePath),
diagnosticSettings: cloneDiagnosticSettings(configOptions.diagnosticSettings),
lines: parseResults.lines,
lines: parseResults.tokenizerOutput.lines,
filePath,
isStubFile: filePath.endsWith('.pyi'),
isTypingStubFile: false,

View File

@ -1160,3 +1160,42 @@ test('Identifiers1', () => {
const token4 = results.tokens.getItemAt(4);
assert.equal(token4.type, TokenType.Identifier);
});
test ('TypeIgnoreAll1', () => {
const t = new Tokenizer();
const results = t.tokenize('\n#type:ignore\n"test"');
assert.equal(results.typeIgnoreAll, true);
});
test ('TypeIgnoreAll2', () => {
const t = new Tokenizer();
const results = t.tokenize('\n# type: ignore ssss\n');
assert.equal(results.typeIgnoreAll, true);
});
test ('TypeIgnoreAll3', () => {
const t = new Tokenizer();
const results = t.tokenize('\n# type: ignoressss\n');
assert.equal(results.typeIgnoreAll, false);
});
test ('TypeIgnoreAll3', () => {
const t = new Tokenizer();
const results = t.tokenize('\n"hello"\n# type: ignore\n');
assert.equal(results.typeIgnoreAll, false);
});
test ('TypeIgnoreLine1', () => {
const t = new Tokenizer();
const results = t.tokenize('\na = 3 # type: ignore\n"test" # type:ignore');
assert.equal(Object.keys(results.typeIgnoreLines).length, 2);
assert.equal(results.typeIgnoreLines[1], true);
assert.equal(results.typeIgnoreLines[2], true);
});
test ('TypeIgnoreLine2', () => {
const t = new Tokenizer();
const results = t.tokenize('a = 3 # type: ignores\n"test" # type:ignore');
assert.equal(Object.keys(results.typeIgnoreLines).length, 1);
assert.equal(results.typeIgnoreLines[1], true);
});

View File

@ -651,3 +651,15 @@ test('TypedDict6', () => {
validateResults(analysisResults, 12);
});
test('TypeIgnore1', () => {
const analysisResults = TestUtils.typeAnalyzeSampleFiles(['typeIgnore1.py']);
validateResults(analysisResults, 0);
});
test('TypeIgnore2', () => {
const analysisResults = TestUtils.typeAnalyzeSampleFiles(['typeIgnore2.py']);
validateResults(analysisResults, 0);
});

View File

@ -0,0 +1,20 @@
# This sample tests the type: ignore for individual lines.
from typing import Dict
a: int = 3
b = len(a) # type: ignore
for for for # type: ignore
c: Dict[str, str] = {
3: 3,
'hello': 3,
3.2: 2.4
} #type:ignore # something