From a764bc9822e113a18c954286a62b26fde6514a64 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 13 Feb 2018 14:47:16 -0800 Subject: [PATCH 01/83] Undo changes --- pythonFiles/completion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pythonFiles/completion.py b/pythonFiles/completion.py index e530be32b367..99f8582c614c 100644 --- a/pythonFiles/completion.py +++ b/pythonFiles/completion.py @@ -88,7 +88,7 @@ def _generate_signature(self, completion): return '' return '%s(%s)' % ( completion.name, - ', '.join(p.description[6:] for p in completion.params if p)) + ', '.join(self._get_param_name(p.description) for p in completion.params if p)) def _get_call_signatures(self, script): """Extract call signatures from jedi.api.Script object in failsafe way. From 9d1b2cc85c3563ccc9b7a242206ae6a5b6d644f6 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 13 Feb 2018 15:44:21 -0800 Subject: [PATCH 02/83] Test fixes --- pythonFiles/completion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pythonFiles/completion.py b/pythonFiles/completion.py index 99f8582c614c..e530be32b367 100644 --- a/pythonFiles/completion.py +++ b/pythonFiles/completion.py @@ -88,7 +88,7 @@ def _generate_signature(self, completion): return '' return '%s(%s)' % ( completion.name, - ', '.join(self._get_param_name(p.description) for p in completion.params if p)) + ', '.join(p.description[6:] for p in completion.params if p)) def _get_call_signatures(self, script): """Extract call signatures from jedi.api.Script object in failsafe way. From a91291a072bc9730252c199969e7a40a698ca2d2 Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Thu, 1 Mar 2018 22:03:47 -0800 Subject: [PATCH 03/83] Increase timeout --- src/test/index.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/index.ts b/src/test/index.ts index 135ca5d8b6c1..22ffd45a0675 100644 --- a/src/test/index.ts +++ b/src/test/index.ts @@ -21,7 +21,7 @@ const grep = IS_CI_SERVER && IS_CI_SERVER_TEST_DEBUGGER ? 'Debug' : undefined; const options: MochaSetupOptions & { retries: number } = { ui: 'tdd', useColors: true, - timeout: 25000, + timeout: 35000, retries: 3, grep }; From bf266af260b16e1021511a7274c3d6576182179f Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 6 Mar 2018 16:26:53 -0800 Subject: [PATCH 04/83] Remove double event listening --- src/client/providers/linterProvider.ts | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/client/providers/linterProvider.ts b/src/client/providers/linterProvider.ts index fb66aab3971b..27aa85ffa61f 100644 --- a/src/client/providers/linterProvider.ts +++ b/src/client/providers/linterProvider.ts @@ -9,7 +9,6 @@ import { ConfigSettingMonitor } from '../common/configSettingMonitor'; import { isTestExecution } from '../common/constants'; import { IFileSystem } from '../common/platform/types'; import { IConfigurationService } from '../common/types'; -import { IInterpreterService } from '../interpreter/contracts'; import { IServiceContainer } from '../ioc/types'; import { ILinterManager, ILintingEngine } from '../linters/types'; @@ -17,7 +16,6 @@ export class LinterProvider implements vscode.Disposable { private context: vscode.ExtensionContext; private disposables: vscode.Disposable[]; private configMonitor: ConfigSettingMonitor; - private interpreterService: IInterpreterService; private documents: IDocumentManager; private configuration: IConfigurationService; private linterManager: ILinterManager; @@ -31,12 +29,9 @@ export class LinterProvider implements vscode.Disposable { this.fs = serviceContainer.get(IFileSystem); this.engine = serviceContainer.get(ILintingEngine); this.linterManager = serviceContainer.get(ILinterManager); - this.interpreterService = serviceContainer.get(IInterpreterService); this.documents = serviceContainer.get(IDocumentManager); this.configuration = serviceContainer.get(IConfigurationService); - this.disposables.push(this.interpreterService.onDidChangeInterpreter(() => this.engine.lintOpenPythonFiles())); - this.documents.onDidOpenTextDocument(e => this.onDocumentOpened(e), this.context.subscriptions); this.documents.onDidCloseTextDocument(e => this.onDocumentClosed(e), this.context.subscriptions); this.documents.onDidSaveTextDocument((e) => this.onDocumentSaved(e), this.context.subscriptions); From 7bc6bd643e5ec53eb71a259ad2a5a43a643f7e33 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 6 Mar 2018 16:35:39 -0800 Subject: [PATCH 05/83] Remove test --- src/test/linters/lint.provider.test.ts | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/src/test/linters/lint.provider.test.ts b/src/test/linters/lint.provider.test.ts index 023ee86223be..51e49d3d35b9 100644 --- a/src/test/linters/lint.provider.test.ts +++ b/src/test/linters/lint.provider.test.ts @@ -113,16 +113,6 @@ suite('Linting - Provider', () => { engine.verify(x => x.lintDocument(document.object, 'save'), TypeMoq.Times.never()); }); - test('Lint on change interpreters', () => { - const e = new vscode.EventEmitter(); - interpreterService.setup(x => x.onDidChangeInterpreter).returns(() => e.event); - - // tslint:disable-next-line:no-unused-variable - const provider = new LinterProvider(context.object, serviceContainer); - e.fire(); - engine.verify(x => x.lintOpenPythonFiles(), TypeMoq.Times.once()); - }); - test('Lint on save pylintrc', async () => { docManager.setup(x => x.onDidSaveTextDocument).returns(() => emitter.event); document.setup(x => x.uri).returns(() => vscode.Uri.file('.pylintrc')); From 8ce8b48db3aedb785026b2fe22071b40d2f6c048 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 6 Mar 2018 17:02:12 -0800 Subject: [PATCH 06/83] Revert "Remove test" This reverts commit e240c3fd117c38b9e6fdcbdd1ba2715789fefe48. --- src/test/linters/lint.provider.test.ts | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/test/linters/lint.provider.test.ts b/src/test/linters/lint.provider.test.ts index 51e49d3d35b9..023ee86223be 100644 --- a/src/test/linters/lint.provider.test.ts +++ b/src/test/linters/lint.provider.test.ts @@ -113,6 +113,16 @@ suite('Linting - Provider', () => { engine.verify(x => x.lintDocument(document.object, 'save'), TypeMoq.Times.never()); }); + test('Lint on change interpreters', () => { + const e = new vscode.EventEmitter(); + interpreterService.setup(x => x.onDidChangeInterpreter).returns(() => e.event); + + // tslint:disable-next-line:no-unused-variable + const provider = new LinterProvider(context.object, serviceContainer); + e.fire(); + engine.verify(x => x.lintOpenPythonFiles(), TypeMoq.Times.once()); + }); + test('Lint on save pylintrc', async () => { docManager.setup(x => x.onDidSaveTextDocument).returns(() => emitter.event); document.setup(x => x.uri).returns(() => vscode.Uri.file('.pylintrc')); From e3a549e58e0f888d79364e353cbcdf00d86b3416 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 6 Mar 2018 17:02:47 -0800 Subject: [PATCH 07/83] Revert "Remove double event listening" This reverts commit af573be27372a79d5589e2134002cc753bb54f2a. --- src/client/providers/linterProvider.ts | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/client/providers/linterProvider.ts b/src/client/providers/linterProvider.ts index 27aa85ffa61f..fb66aab3971b 100644 --- a/src/client/providers/linterProvider.ts +++ b/src/client/providers/linterProvider.ts @@ -9,6 +9,7 @@ import { ConfigSettingMonitor } from '../common/configSettingMonitor'; import { isTestExecution } from '../common/constants'; import { IFileSystem } from '../common/platform/types'; import { IConfigurationService } from '../common/types'; +import { IInterpreterService } from '../interpreter/contracts'; import { IServiceContainer } from '../ioc/types'; import { ILinterManager, ILintingEngine } from '../linters/types'; @@ -16,6 +17,7 @@ export class LinterProvider implements vscode.Disposable { private context: vscode.ExtensionContext; private disposables: vscode.Disposable[]; private configMonitor: ConfigSettingMonitor; + private interpreterService: IInterpreterService; private documents: IDocumentManager; private configuration: IConfigurationService; private linterManager: ILinterManager; @@ -29,9 +31,12 @@ export class LinterProvider implements vscode.Disposable { this.fs = serviceContainer.get(IFileSystem); this.engine = serviceContainer.get(ILintingEngine); this.linterManager = serviceContainer.get(ILinterManager); + this.interpreterService = serviceContainer.get(IInterpreterService); this.documents = serviceContainer.get(IDocumentManager); this.configuration = serviceContainer.get(IConfigurationService); + this.disposables.push(this.interpreterService.onDidChangeInterpreter(() => this.engine.lintOpenPythonFiles())); + this.documents.onDidOpenTextDocument(e => this.onDocumentOpened(e), this.context.subscriptions); this.documents.onDidCloseTextDocument(e => this.onDocumentClosed(e), this.context.subscriptions); this.documents.onDidSaveTextDocument((e) => this.onDocumentSaved(e), this.context.subscriptions); From 92e8c1ee2e264784b76a250d1f39b157ba198bbe Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 27 Mar 2018 15:32:47 -0700 Subject: [PATCH 08/83] #1096 The if statement is automatically formatted incorrectly --- src/client/formatters/lineFormatter.ts | 10 ++++++---- src/test/format/extension.onEnterFormat.test.ts | 7 ++++++- src/test/pythonFiles/formatting/fileToFormatOnEnter.py | 1 + 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/src/client/formatters/lineFormatter.ts b/src/client/formatters/lineFormatter.ts index 4b3bff70aa8d..835e7e82b92a 100644 --- a/src/client/formatters/lineFormatter.ts +++ b/src/client/formatters/lineFormatter.ts @@ -5,14 +5,15 @@ import Char from 'typescript-char'; import { BraceCounter } from '../language/braceCounter'; import { TextBuilder } from '../language/textBuilder'; +import { TextRangeCollection } from '../language/textRangeCollection'; import { Tokenizer } from '../language/tokenizer'; import { ITextRangeCollection, IToken, TokenType } from '../language/types'; export class LineFormatter { - private builder: TextBuilder; - private tokens: ITextRangeCollection; - private braceCounter: BraceCounter; - private text: string; + private builder = new TextBuilder(); + private tokens: ITextRangeCollection = new TextRangeCollection([]); + private braceCounter = new BraceCounter(); + private text = ''; // tslint:disable-next-line:cyclomatic-complexity public formatLine(text: string): string { @@ -123,6 +124,7 @@ export class LineFormatter { if (this.isBraceType(t.type)) { this.braceCounter.countBrace(t); } + this.builder.softAppendSpace(); this.builder.append(this.text.substring(t.start, t.end)); } diff --git a/src/test/format/extension.onEnterFormat.test.ts b/src/test/format/extension.onEnterFormat.test.ts index 74597ce19be7..23e5cbecb8fa 100644 --- a/src/test/format/extension.onEnterFormat.test.ts +++ b/src/test/format/extension.onEnterFormat.test.ts @@ -59,8 +59,13 @@ suite('Formatting - OnEnter provider', () => { assert.equal(text, 'x.y', 'Line ending with period was reformatted'); }); - test('Formatting line ending in string', async () => { + test('Formatting line with unknown neighboring tokens', async () => { const text = await formatAtPosition(9, 0); + assert.equal(text, 'if x <= 1:', 'Line with unknown neighboring tokens was not formatted'); + }); + + test('Formatting line ending in string', async () => { + const text = await formatAtPosition(10, 0); assert.equal(text, 'x + """', 'Line ending in multiline string was not formatted'); }); diff --git a/src/test/pythonFiles/formatting/fileToFormatOnEnter.py b/src/test/pythonFiles/formatting/fileToFormatOnEnter.py index bbd025363098..67d533125ab2 100644 --- a/src/test/pythonFiles/formatting/fileToFormatOnEnter.py +++ b/src/test/pythonFiles/formatting/fileToFormatOnEnter.py @@ -6,4 +6,5 @@ x+1 # @x x.y +if x<=1: x+""" From b540a1dc43b376e05ddb29bbf6a16dd1fb51843e Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 27 Mar 2018 15:35:31 -0700 Subject: [PATCH 09/83] Merge fix --- src/test/index.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/index.ts b/src/test/index.ts index 22ffd45a0675..135ca5d8b6c1 100644 --- a/src/test/index.ts +++ b/src/test/index.ts @@ -21,7 +21,7 @@ const grep = IS_CI_SERVER && IS_CI_SERVER_TEST_DEBUGGER ? 'Debug' : undefined; const options: MochaSetupOptions & { retries: number } = { ui: 'tdd', useColors: true, - timeout: 35000, + timeout: 25000, retries: 3, grep }; From 7b0573ed946ec6ed34d077b0f824f2a7aaaba613 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 27 Mar 2018 16:08:23 -0700 Subject: [PATCH 10/83] Add more tests --- src/client/formatters/lineFormatter.ts | 41 ++++++++++++++++--- .../format/extension.onEnterFormat.test.ts | 12 +++++- .../formatting/fileToFormatOnEnter.py | 2 + 3 files changed, 48 insertions(+), 7 deletions(-) diff --git a/src/client/formatters/lineFormatter.ts b/src/client/formatters/lineFormatter.ts index 835e7e82b92a..7684c9337755 100644 --- a/src/client/formatters/lineFormatter.ts +++ b/src/client/formatters/lineFormatter.ts @@ -73,7 +73,7 @@ export class LineFormatter { break; default: - this.handleOther(t); + this.handleOther(t, i); break; } } @@ -109,10 +109,7 @@ export class LineFormatter { if (this.braceCounter.isOpened(TokenType.OpenBrace)) { // Check if this is = in function arguments. If so, do not // add spaces around it. - const prev = this.tokens.getItemAt(index - 1); - const prevPrev = this.tokens.getItemAt(index - 2); - if (prev.type === TokenType.Identifier && - (prevPrev.type === TokenType.Comma || prevPrev.type === TokenType.OpenBrace)) { + if (this.isEqualsInsideArguments(index)) { this.builder.append('='); return true; } @@ -120,14 +117,46 @@ export class LineFormatter { return false; } - private handleOther(t: IToken): void { + private handleOther(t: IToken, index: number): void { if (this.isBraceType(t.type)) { this.braceCounter.countBrace(t); + this.builder.append(this.text.substring(t.start, t.end)); + return; + } + + if (this.isEqualsInsideArguments(index - 1)) { + // Don't add space around = inside function arguments + this.builder.append(this.text.substring(t.start, t.end)); + return; + } + + if (index > 0) { + const prev = this.tokens.getItemAt(index - 1); + if (this.isOpenBraceType(prev.type)) { + // Don't insert space after (, [ or { + this.builder.append(this.text.substring(t.start, t.end)); + return; + } } + + // In general, keep tokes separated this.builder.softAppendSpace(); this.builder.append(this.text.substring(t.start, t.end)); } + private isEqualsInsideArguments(index: number): boolean { + if (index < 2) { + return false; + } + const prev = this.tokens.getItemAt(index - 1); + const prevPrev = this.tokens.getItemAt(index - 2); + if (prev.type === TokenType.Identifier && + (prevPrev.type === TokenType.Comma || prevPrev.type === TokenType.OpenBrace)) { + return true; + } + return false; + } + private isOpenBraceType(type: TokenType): boolean { return type === TokenType.OpenBrace || type === TokenType.OpenBracket || type === TokenType.OpenCurly; } diff --git a/src/test/format/extension.onEnterFormat.test.ts b/src/test/format/extension.onEnterFormat.test.ts index 23e5cbecb8fa..e34aeb0a0ed3 100644 --- a/src/test/format/extension.onEnterFormat.test.ts +++ b/src/test/format/extension.onEnterFormat.test.ts @@ -64,8 +64,18 @@ suite('Formatting - OnEnter provider', () => { assert.equal(text, 'if x <= 1:', 'Line with unknown neighboring tokens was not formatted'); }); - test('Formatting line ending in string', async () => { + test('Formatting method definition with arguments', async () => { const text = await formatAtPosition(10, 0); + assert.equal(text, 'def __init__(self, age=23)', 'Method definition with arguments was not formatted'); + }); + + test('Formatting space after open brace', async () => { + const text = await formatAtPosition(11, 0); + assert.equal(text, 'while(1)', 'Method definition with arguments was not formatted'); + }); + + test('Formatting line ending in string', async () => { + const text = await formatAtPosition(12, 0); assert.equal(text, 'x + """', 'Line ending in multiline string was not formatted'); }); diff --git a/src/test/pythonFiles/formatting/fileToFormatOnEnter.py b/src/test/pythonFiles/formatting/fileToFormatOnEnter.py index 67d533125ab2..779167118ffc 100644 --- a/src/test/pythonFiles/formatting/fileToFormatOnEnter.py +++ b/src/test/pythonFiles/formatting/fileToFormatOnEnter.py @@ -7,4 +7,6 @@ @x x.y if x<=1: +def __init__(self, age = 23) +while(1) x+""" From facb10613596b28f82e672266f130e57b4311847 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 27 Mar 2018 16:30:21 -0700 Subject: [PATCH 11/83] More tests --- src/test/format/extension.onEnterFormat.test.ts | 11 ++++++++--- .../pythonFiles/formatting/fileToFormatOnEnter.py | 1 + 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/test/format/extension.onEnterFormat.test.ts b/src/test/format/extension.onEnterFormat.test.ts index e34aeb0a0ed3..689129da78c8 100644 --- a/src/test/format/extension.onEnterFormat.test.ts +++ b/src/test/format/extension.onEnterFormat.test.ts @@ -64,18 +64,23 @@ suite('Formatting - OnEnter provider', () => { assert.equal(text, 'if x <= 1:', 'Line with unknown neighboring tokens was not formatted'); }); - test('Formatting method definition with arguments', async () => { + test('Formatting line with unknown neighboring tokens', async () => { const text = await formatAtPosition(10, 0); + assert.equal(text, 'if 1 <= x:', 'Line with unknown neighboring tokens was not formatted'); + }); + + test('Formatting method definition with arguments', async () => { + const text = await formatAtPosition(11, 0); assert.equal(text, 'def __init__(self, age=23)', 'Method definition with arguments was not formatted'); }); test('Formatting space after open brace', async () => { - const text = await formatAtPosition(11, 0); + const text = await formatAtPosition(12, 0); assert.equal(text, 'while(1)', 'Method definition with arguments was not formatted'); }); test('Formatting line ending in string', async () => { - const text = await formatAtPosition(12, 0); + const text = await formatAtPosition(13, 0); assert.equal(text, 'x + """', 'Line ending in multiline string was not formatted'); }); diff --git a/src/test/pythonFiles/formatting/fileToFormatOnEnter.py b/src/test/pythonFiles/formatting/fileToFormatOnEnter.py index 779167118ffc..8adfd1fa1233 100644 --- a/src/test/pythonFiles/formatting/fileToFormatOnEnter.py +++ b/src/test/pythonFiles/formatting/fileToFormatOnEnter.py @@ -7,6 +7,7 @@ @x x.y if x<=1: +if 1<=x: def __init__(self, age = 23) while(1) x+""" From f113881370f26a52f159862bb822f0119225013c Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 27 Mar 2018 16:32:06 -0700 Subject: [PATCH 12/83] Typo --- src/client/formatters/lineFormatter.ts | 2 +- src/test/format/extension.onEnterFormat.test.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/formatters/lineFormatter.ts b/src/client/formatters/lineFormatter.ts index 7684c9337755..56e9b6cf7237 100644 --- a/src/client/formatters/lineFormatter.ts +++ b/src/client/formatters/lineFormatter.ts @@ -139,7 +139,7 @@ export class LineFormatter { } } - // In general, keep tokes separated + // In general, keep tokens separated this.builder.softAppendSpace(); this.builder.append(this.text.substring(t.start, t.end)); } diff --git a/src/test/format/extension.onEnterFormat.test.ts b/src/test/format/extension.onEnterFormat.test.ts index 689129da78c8..8f594d5e2559 100644 --- a/src/test/format/extension.onEnterFormat.test.ts +++ b/src/test/format/extension.onEnterFormat.test.ts @@ -76,7 +76,7 @@ suite('Formatting - OnEnter provider', () => { test('Formatting space after open brace', async () => { const text = await formatAtPosition(12, 0); - assert.equal(text, 'while(1)', 'Method definition with arguments was not formatted'); + assert.equal(text, 'while(1)', 'Space after open brace was not formatted'); }); test('Formatting line ending in string', async () => { From 3e76718c097e23b52a9ffbe5de27f18f512f3f5f Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Tue, 27 Mar 2018 18:15:11 -0700 Subject: [PATCH 13/83] Test --- src/client/formatters/lineFormatter.ts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/client/formatters/lineFormatter.ts b/src/client/formatters/lineFormatter.ts index 56e9b6cf7237..4c30d8c17baa 100644 --- a/src/client/formatters/lineFormatter.ts +++ b/src/client/formatters/lineFormatter.ts @@ -72,6 +72,10 @@ export class LineFormatter { this.builder.append(this.text.substring(t.start, t.end)); break; + case TokenType.Semicolon: + this.builder.append(';'); + break; + default: this.handleOther(t, i); break; @@ -132,7 +136,7 @@ export class LineFormatter { if (index > 0) { const prev = this.tokens.getItemAt(index - 1); - if (this.isOpenBraceType(prev.type)) { + if (this.isOpenBraceType(prev.type) || prev.type === TokenType.Colon) { // Don't insert space after (, [ or { this.builder.append(this.text.substring(t.start, t.end)); return; From 6e85dc68a3516175546fc6c65cddede03a58bcea Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Tue, 27 Mar 2018 21:47:51 -0700 Subject: [PATCH 14/83] Also better handle multiline arguments --- src/client/formatters/lineFormatter.ts | 47 ++++++++++++++----- .../format/extension.lineFormatter.test.ts | 16 +++++++ 2 files changed, 50 insertions(+), 13 deletions(-) diff --git a/src/client/formatters/lineFormatter.ts b/src/client/formatters/lineFormatter.ts index 4c30d8c17baa..694af69ea5ff 100644 --- a/src/client/formatters/lineFormatter.ts +++ b/src/client/formatters/lineFormatter.ts @@ -90,7 +90,7 @@ export class LineFormatter { const opCode = this.text.charCodeAt(t.start); switch (opCode) { case Char.Equal: - if (index >= 2 && this.handleEqual(t, index)) { + if (this.handleEqual(t, index)) { return; } break; @@ -110,13 +110,13 @@ export class LineFormatter { } private handleEqual(t: IToken, index: number): boolean { - if (this.braceCounter.isOpened(TokenType.OpenBrace)) { - // Check if this is = in function arguments. If so, do not - // add spaces around it. - if (this.isEqualsInsideArguments(index)) { - this.builder.append('='); - return true; - } + if (this.isMultipleStatements(index) && !this.braceCounter.isOpened(TokenType.OpenBrace)) { + return false; // x = 1; x, y = y, x + } + // Check if this is = in function arguments. If so, do not add spaces around it. + if (this.isEqualsInsideArguments(index)) { + this.builder.append('='); + return true; } return false; } @@ -149,14 +149,23 @@ export class LineFormatter { } private isEqualsInsideArguments(index: number): boolean { - if (index < 2) { + if (index < 1) { return false; } const prev = this.tokens.getItemAt(index - 1); - const prevPrev = this.tokens.getItemAt(index - 2); - if (prev.type === TokenType.Identifier && - (prevPrev.type === TokenType.Comma || prevPrev.type === TokenType.OpenBrace)) { - return true; + if (prev.type === TokenType.Identifier) { + if (index >= 2) { + // (x=1 or ,x=1 + const prevPrev = this.tokens.getItemAt(index - 2); + return prevPrev.type === TokenType.Comma || prevPrev.type === TokenType.OpenBrace; + } else if (index < this.tokens.count - 2) { + const next = this.tokens.getItemAt(index + 1); + const nextNext = this.tokens.getItemAt(index + 2); + // x=1, or x=1) + if (this.isValueType(next.type)) { + return nextNext.type === TokenType.Comma || nextNext.type === TokenType.CloseBrace; + } + } } return false; } @@ -170,4 +179,16 @@ export class LineFormatter { private isBraceType(type: TokenType): boolean { return this.isOpenBraceType(type) || this.isCloseBraceType(type); } + private isValueType(type: TokenType): boolean { + return type === TokenType.Identifier || type === TokenType.Unknown || + type === TokenType.Number || type === TokenType.String; + } + private isMultipleStatements(index: number): boolean { + for (let i = index; i >= 0; i -= 1) { + if (this.tokens.getItemAt(i).type === TokenType.Semicolon) { + return true; + } + } + return false; + } } diff --git a/src/test/format/extension.lineFormatter.test.ts b/src/test/format/extension.lineFormatter.test.ts index 842cb02d735d..79de72c5774a 100644 --- a/src/test/format/extension.lineFormatter.test.ts +++ b/src/test/format/extension.lineFormatter.test.ts @@ -65,4 +65,20 @@ suite('Formatting - line formatter', () => { const actual = formatter.formatLine(' # comment'); assert.equal(actual, ' # comment'); }); + test('Equals in first argument', () => { + const actual = formatter.formatLine('foo(x =0)'); + assert.equal(actual, 'foo(x=0)'); + }); + test('Equals in second argument', () => { + const actual = formatter.formatLine('foo(x,y= \"a\",'); + assert.equal(actual, 'foo(x, y=\"a\",'); + }); + test('Equals in multiline arguments', () => { + const actual = formatter.formatLine('x = 1,y =-2)'); + assert.equal(actual, 'x=1, y=-2)'); + }); + test('Equals in multiline arguments starting comma', () => { + const actual = formatter.formatLine(',x = 1,y =m)'); + assert.equal(actual, ', x=1, y=m)'); + }); }); From 99e037c0a2533c37faa3e7da54125f7fe1d7ae27 Mon Sep 17 00:00:00 2001 From: Brett Cannon Date: Wed, 28 Mar 2018 10:09:43 -0700 Subject: [PATCH 15/83] Add a couple missing periods [skip ci] --- src/client/formatters/lineFormatter.ts | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/client/formatters/lineFormatter.ts b/src/client/formatters/lineFormatter.ts index 694af69ea5ff..fc347235a525 100644 --- a/src/client/formatters/lineFormatter.ts +++ b/src/client/formatters/lineFormatter.ts @@ -28,7 +28,7 @@ export class LineFormatter { const ws = this.text.substr(0, this.tokens.getItemAt(0).start); if (ws.length > 0) { - this.builder.append(ws); // Preserve leading indentation + this.builder.append(ws); // Preserve leading indentation. } for (let i = 0; i < this.tokens.count; i += 1) { @@ -56,16 +56,16 @@ export class LineFormatter { break; case TokenType.Colon: - // x: 1 if not in slice, x[1:y] if inside the slice + // x: 1 if not in slice, x[1:y] if inside the slice. this.builder.append(':'); if (!this.braceCounter.isOpened(TokenType.OpenBracket) && (next && next.type !== TokenType.Colon)) { - // Not inside opened [[ ... ] sequence + // Not inside opened [[ ... ] sequence. this.builder.softAppendSpace(); } break; case TokenType.Comment: - // add space before in-line comment + // Add space before in-line comment. if (prev) { this.builder.softAppendSpace(); } @@ -129,7 +129,7 @@ export class LineFormatter { } if (this.isEqualsInsideArguments(index - 1)) { - // Don't add space around = inside function arguments + // Don't add space around = inside function arguments. this.builder.append(this.text.substring(t.start, t.end)); return; } @@ -137,13 +137,13 @@ export class LineFormatter { if (index > 0) { const prev = this.tokens.getItemAt(index - 1); if (this.isOpenBraceType(prev.type) || prev.type === TokenType.Colon) { - // Don't insert space after (, [ or { + // Don't insert space after (, [ or { . this.builder.append(this.text.substring(t.start, t.end)); return; } } - // In general, keep tokens separated + // In general, keep tokens separated. this.builder.softAppendSpace(); this.builder.append(this.text.substring(t.start, t.end)); } From 3caeab70e0a0952c87d92166a776366851185c81 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 13 Feb 2018 14:47:16 -0800 Subject: [PATCH 16/83] Undo changes --- pythonFiles/completion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pythonFiles/completion.py b/pythonFiles/completion.py index e530be32b367..99f8582c614c 100644 --- a/pythonFiles/completion.py +++ b/pythonFiles/completion.py @@ -88,7 +88,7 @@ def _generate_signature(self, completion): return '' return '%s(%s)' % ( completion.name, - ', '.join(p.description[6:] for p in completion.params if p)) + ', '.join(self._get_param_name(p.description) for p in completion.params if p)) def _get_call_signatures(self, script): """Extract call signatures from jedi.api.Script object in failsafe way. From eeb1f11f7132e3c9c3458651d4720370b48f06d1 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 13 Feb 2018 15:44:21 -0800 Subject: [PATCH 17/83] Test fixes --- pythonFiles/completion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pythonFiles/completion.py b/pythonFiles/completion.py index 99f8582c614c..e530be32b367 100644 --- a/pythonFiles/completion.py +++ b/pythonFiles/completion.py @@ -88,7 +88,7 @@ def _generate_signature(self, completion): return '' return '%s(%s)' % ( completion.name, - ', '.join(self._get_param_name(p.description) for p in completion.params if p)) + ', '.join(p.description[6:] for p in completion.params if p)) def _get_call_signatures(self, script): """Extract call signatures from jedi.api.Script object in failsafe way. From f5f78c732e7a2f8639118d55ad25eec6d41aa729 Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Thu, 1 Mar 2018 22:03:47 -0800 Subject: [PATCH 18/83] Increase timeout --- src/test/index.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/index.ts b/src/test/index.ts index 135ca5d8b6c1..22ffd45a0675 100644 --- a/src/test/index.ts +++ b/src/test/index.ts @@ -21,7 +21,7 @@ const grep = IS_CI_SERVER && IS_CI_SERVER_TEST_DEBUGGER ? 'Debug' : undefined; const options: MochaSetupOptions & { retries: number } = { ui: 'tdd', useColors: true, - timeout: 25000, + timeout: 35000, retries: 3, grep }; From 88744daf193d21c55c72de7acd0d162602152b9d Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 6 Mar 2018 16:26:53 -0800 Subject: [PATCH 19/83] Remove double event listening --- src/client/providers/linterProvider.ts | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/client/providers/linterProvider.ts b/src/client/providers/linterProvider.ts index fb66aab3971b..27aa85ffa61f 100644 --- a/src/client/providers/linterProvider.ts +++ b/src/client/providers/linterProvider.ts @@ -9,7 +9,6 @@ import { ConfigSettingMonitor } from '../common/configSettingMonitor'; import { isTestExecution } from '../common/constants'; import { IFileSystem } from '../common/platform/types'; import { IConfigurationService } from '../common/types'; -import { IInterpreterService } from '../interpreter/contracts'; import { IServiceContainer } from '../ioc/types'; import { ILinterManager, ILintingEngine } from '../linters/types'; @@ -17,7 +16,6 @@ export class LinterProvider implements vscode.Disposable { private context: vscode.ExtensionContext; private disposables: vscode.Disposable[]; private configMonitor: ConfigSettingMonitor; - private interpreterService: IInterpreterService; private documents: IDocumentManager; private configuration: IConfigurationService; private linterManager: ILinterManager; @@ -31,12 +29,9 @@ export class LinterProvider implements vscode.Disposable { this.fs = serviceContainer.get(IFileSystem); this.engine = serviceContainer.get(ILintingEngine); this.linterManager = serviceContainer.get(ILinterManager); - this.interpreterService = serviceContainer.get(IInterpreterService); this.documents = serviceContainer.get(IDocumentManager); this.configuration = serviceContainer.get(IConfigurationService); - this.disposables.push(this.interpreterService.onDidChangeInterpreter(() => this.engine.lintOpenPythonFiles())); - this.documents.onDidOpenTextDocument(e => this.onDocumentOpened(e), this.context.subscriptions); this.documents.onDidCloseTextDocument(e => this.onDocumentClosed(e), this.context.subscriptions); this.documents.onDidSaveTextDocument((e) => this.onDocumentSaved(e), this.context.subscriptions); From 65dde44f59ccf2754aa8203e933c9d96ef3be338 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 6 Mar 2018 16:35:39 -0800 Subject: [PATCH 20/83] Remove test --- src/test/linters/lint.provider.test.ts | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/src/test/linters/lint.provider.test.ts b/src/test/linters/lint.provider.test.ts index 023ee86223be..51e49d3d35b9 100644 --- a/src/test/linters/lint.provider.test.ts +++ b/src/test/linters/lint.provider.test.ts @@ -113,16 +113,6 @@ suite('Linting - Provider', () => { engine.verify(x => x.lintDocument(document.object, 'save'), TypeMoq.Times.never()); }); - test('Lint on change interpreters', () => { - const e = new vscode.EventEmitter(); - interpreterService.setup(x => x.onDidChangeInterpreter).returns(() => e.event); - - // tslint:disable-next-line:no-unused-variable - const provider = new LinterProvider(context.object, serviceContainer); - e.fire(); - engine.verify(x => x.lintOpenPythonFiles(), TypeMoq.Times.once()); - }); - test('Lint on save pylintrc', async () => { docManager.setup(x => x.onDidSaveTextDocument).returns(() => emitter.event); document.setup(x => x.uri).returns(() => vscode.Uri.file('.pylintrc')); From c513f717892d167d0256e1465c3427f61aa65891 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 6 Mar 2018 17:02:12 -0800 Subject: [PATCH 21/83] Revert "Remove test" This reverts commit e240c3fd117c38b9e6fdcbdd1ba2715789fefe48. --- src/test/linters/lint.provider.test.ts | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/test/linters/lint.provider.test.ts b/src/test/linters/lint.provider.test.ts index 51e49d3d35b9..023ee86223be 100644 --- a/src/test/linters/lint.provider.test.ts +++ b/src/test/linters/lint.provider.test.ts @@ -113,6 +113,16 @@ suite('Linting - Provider', () => { engine.verify(x => x.lintDocument(document.object, 'save'), TypeMoq.Times.never()); }); + test('Lint on change interpreters', () => { + const e = new vscode.EventEmitter(); + interpreterService.setup(x => x.onDidChangeInterpreter).returns(() => e.event); + + // tslint:disable-next-line:no-unused-variable + const provider = new LinterProvider(context.object, serviceContainer); + e.fire(); + engine.verify(x => x.lintOpenPythonFiles(), TypeMoq.Times.once()); + }); + test('Lint on save pylintrc', async () => { docManager.setup(x => x.onDidSaveTextDocument).returns(() => emitter.event); document.setup(x => x.uri).returns(() => vscode.Uri.file('.pylintrc')); From ccb3886f22c08589c124c709372b46f3d5d54ee2 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 6 Mar 2018 17:02:47 -0800 Subject: [PATCH 22/83] Revert "Remove double event listening" This reverts commit af573be27372a79d5589e2134002cc753bb54f2a. --- src/client/providers/linterProvider.ts | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/client/providers/linterProvider.ts b/src/client/providers/linterProvider.ts index 27aa85ffa61f..fb66aab3971b 100644 --- a/src/client/providers/linterProvider.ts +++ b/src/client/providers/linterProvider.ts @@ -9,6 +9,7 @@ import { ConfigSettingMonitor } from '../common/configSettingMonitor'; import { isTestExecution } from '../common/constants'; import { IFileSystem } from '../common/platform/types'; import { IConfigurationService } from '../common/types'; +import { IInterpreterService } from '../interpreter/contracts'; import { IServiceContainer } from '../ioc/types'; import { ILinterManager, ILintingEngine } from '../linters/types'; @@ -16,6 +17,7 @@ export class LinterProvider implements vscode.Disposable { private context: vscode.ExtensionContext; private disposables: vscode.Disposable[]; private configMonitor: ConfigSettingMonitor; + private interpreterService: IInterpreterService; private documents: IDocumentManager; private configuration: IConfigurationService; private linterManager: ILinterManager; @@ -29,9 +31,12 @@ export class LinterProvider implements vscode.Disposable { this.fs = serviceContainer.get(IFileSystem); this.engine = serviceContainer.get(ILintingEngine); this.linterManager = serviceContainer.get(ILinterManager); + this.interpreterService = serviceContainer.get(IInterpreterService); this.documents = serviceContainer.get(IDocumentManager); this.configuration = serviceContainer.get(IConfigurationService); + this.disposables.push(this.interpreterService.onDidChangeInterpreter(() => this.engine.lintOpenPythonFiles())); + this.documents.onDidOpenTextDocument(e => this.onDocumentOpened(e), this.context.subscriptions); this.documents.onDidCloseTextDocument(e => this.onDocumentClosed(e), this.context.subscriptions); this.documents.onDidSaveTextDocument((e) => this.onDocumentSaved(e), this.context.subscriptions); From 106f4dba19b160315ecb5b88509573dafefe8397 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 27 Mar 2018 15:35:31 -0700 Subject: [PATCH 23/83] Merge fix --- src/test/index.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/index.ts b/src/test/index.ts index 22ffd45a0675..135ca5d8b6c1 100644 --- a/src/test/index.ts +++ b/src/test/index.ts @@ -21,7 +21,7 @@ const grep = IS_CI_SERVER && IS_CI_SERVER_TEST_DEBUGGER ? 'Debug' : undefined; const options: MochaSetupOptions & { retries: number } = { ui: 'tdd', useColors: true, - timeout: 35000, + timeout: 25000, retries: 3, grep }; From e1da6a66086a17e998c0d5485c0ea845e5029935 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 5 Apr 2018 10:31:41 -0700 Subject: [PATCH 24/83] #1257 On type formatting errors for args and kwargs --- src/client/formatters/lineFormatter.ts | 13 ++++++++++--- src/test/index.ts | 2 +- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/client/formatters/lineFormatter.ts b/src/client/formatters/lineFormatter.ts index fc347235a525..046533952464 100644 --- a/src/client/formatters/lineFormatter.ts +++ b/src/client/formatters/lineFormatter.ts @@ -95,15 +95,22 @@ export class LineFormatter { } break; case Char.Period: - this.builder.append('.'); - return; case Char.At: - this.builder.append('@'); + case Char.ExclamationMark: + this.builder.append(this.text[t.start]); return; default: break; } } + // Do not append space if operator is preceded by '(' or ',' as in foo(**kwarg) + if (index > 0) { + const prev = this.tokens.getItemAt(index - 1); + if (this.isOpenBraceType(prev.type) || prev.type === TokenType.Comma) { + this.builder.append(this.text.substring(t.start, t.end)); + return; + } + } this.builder.softAppendSpace(); this.builder.append(this.text.substring(t.start, t.end)); this.builder.softAppendSpace(); diff --git a/src/test/index.ts b/src/test/index.ts index 135ca5d8b6c1..848b18152792 100644 --- a/src/test/index.ts +++ b/src/test/index.ts @@ -13,7 +13,7 @@ process.env.IS_MULTI_ROOT_TEST = IS_MULTI_ROOT_TEST.toString(); // If running on CI server and we're running the debugger tests, then ensure we only run debug tests. // We do this to ensure we only run debugger test, as debugger tests are very flaky on CI. // So the solution is to run them separately and first on CI. -const grep = IS_CI_SERVER && IS_CI_SERVER_TEST_DEBUGGER ? 'Debug' : undefined; +const grep = IS_CI_SERVER && IS_CI_SERVER_TEST_DEBUGGER ? 'Debug' : 'line formatter'; // You can directly control Mocha options by uncommenting the following lines. // See https://github.com/mochajs/mocha/wiki/Using-mocha-programmatically#set-options for more info. From e78f0fba4412361f7144dad8bb7502478763d727 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 5 Apr 2018 11:05:39 -0700 Subject: [PATCH 25/83] Handle f-strings --- src/client/language/tokenizer.ts | 12 ++++-- .../format/extension.lineFormatter.test.ts | 6 ++- src/test/language/tokenizer.test.ts | 37 +++++++++++++++++++ 3 files changed, 51 insertions(+), 4 deletions(-) diff --git a/src/client/language/tokenizer.ts b/src/client/language/tokenizer.ts index c481c4201ac0..fcb29ed8b9a3 100644 --- a/src/client/language/tokenizer.ts +++ b/src/client/language/tokenizer.ts @@ -85,10 +85,16 @@ export class Tokenizer implements ITokenizer { } } + // tslint:disable-next-line:cyclomatic-complexity private handleCharacter(): boolean { + // f-strings + const fString = this.cs.currentChar === Char.f && (this.cs.nextChar === Char.SingleQuote || this.cs.nextChar === Char.DoubleQuote); + if (fString) { + this.cs.moveNext(); + } const quoteType = this.getQuoteType(); if (quoteType !== QuoteType.None) { - this.handleString(quoteType); + this.handleString(quoteType, fString); return true; } if (this.cs.currentChar === Char.Hash) { @@ -342,8 +348,8 @@ export class Tokenizer implements ITokenizer { return QuoteType.None; } - private handleString(quoteType: QuoteType): void { - const start = this.cs.position; + private handleString(quoteType: QuoteType, fString: boolean): void { + const start = fString ? this.cs.position - 1 : this.cs.position; if (quoteType === QuoteType.Single || quoteType === QuoteType.Double) { this.cs.moveNext(); this.skipToSingleEndQuote(quoteType === QuoteType.Single diff --git a/src/test/format/extension.lineFormatter.test.ts b/src/test/format/extension.lineFormatter.test.ts index 79de72c5774a..3325c19382a2 100644 --- a/src/test/format/extension.lineFormatter.test.ts +++ b/src/test/format/extension.lineFormatter.test.ts @@ -73,7 +73,7 @@ suite('Formatting - line formatter', () => { const actual = formatter.formatLine('foo(x,y= \"a\",'); assert.equal(actual, 'foo(x, y=\"a\",'); }); - test('Equals in multiline arguments', () => { + test('Equals in multiline arguments', () => { const actual = formatter.formatLine('x = 1,y =-2)'); assert.equal(actual, 'x=1, y=-2)'); }); @@ -81,4 +81,8 @@ suite('Formatting - line formatter', () => { const actual = formatter.formatLine(',x = 1,y =m)'); assert.equal(actual, ', x=1, y=m)'); }); + test('Operators without following space', () => { + const actual = formatter.formatLine('foo( *a, ** b, ! c)'); + assert.equal(actual, 'foo(*a, **b, !c)'); + }); }); diff --git a/src/test/language/tokenizer.test.ts b/src/test/language/tokenizer.test.ts index 1d2bf15d2b7b..8d37f49dd791 100644 --- a/src/test/language/tokenizer.test.ts +++ b/src/test/language/tokenizer.test.ts @@ -79,6 +79,43 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(0).type, TokenType.String); assert.equal(tokens.getItemAt(0).length, 12); }); + test('Strings: single quoted f-string ', async () => { + const t = new Tokenizer(); + // tslint:disable-next-line:quotemark + const tokens = t.tokenize("a+f'quoted'"); + assert.equal(tokens.count, 3); + assert.equal(tokens.getItemAt(0).type, TokenType.Identifier); + assert.equal(tokens.getItemAt(1).type, TokenType.Operator); + assert.equal(tokens.getItemAt(2).type, TokenType.String); + assert.equal(tokens.getItemAt(2).length, 9); + }); + test('Strings: double quoted f-string ', async () => { + const t = new Tokenizer(); + const tokens = t.tokenize('x(1,f"quoted")'); + assert.equal(tokens.count, 6); + assert.equal(tokens.getItemAt(0).type, TokenType.Identifier); + assert.equal(tokens.getItemAt(1).type, TokenType.OpenBrace); + assert.equal(tokens.getItemAt(2).type, TokenType.Number); + assert.equal(tokens.getItemAt(3).type, TokenType.Comma); + assert.equal(tokens.getItemAt(4).type, TokenType.String); + assert.equal(tokens.getItemAt(4).length, 9); + assert.equal(tokens.getItemAt(5).type, TokenType.CloseBrace); + }); + test('Strings: single quoted multiline f-string ', async () => { + const t = new Tokenizer(); + // tslint:disable-next-line:quotemark + const tokens = t.tokenize("f'''quoted'''"); + assert.equal(tokens.count, 1); + assert.equal(tokens.getItemAt(0).type, TokenType.String); + assert.equal(tokens.getItemAt(0).length, 13); + }); + test('Strings: double quoted multiline f-string ', async () => { + const t = new Tokenizer(); + const tokens = t.tokenize('f"""quoted """'); + assert.equal(tokens.count, 1); + assert.equal(tokens.getItemAt(0).type, TokenType.String); + assert.equal(tokens.getItemAt(0).length, 14); + }); test('Comments', async () => { const t = new Tokenizer(); const tokens = t.tokenize(' #co"""mment1\n\t\n#comm\'ent2 '); From 725cf7199757073e927a3b874a02d1c43e92e2c2 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 5 Apr 2018 11:33:50 -0700 Subject: [PATCH 26/83] Stop importing from test code --- src/client/extension.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/extension.ts b/src/client/extension.ts index e86959d62255..1617b18cf44c 100644 --- a/src/client/extension.ts +++ b/src/client/extension.ts @@ -11,7 +11,6 @@ import { extensions, IndentAction, languages, Memento, OutputChannel, window } from 'vscode'; -import { IS_ANALYSIS_ENGINE_TEST } from '../test/constants'; import { AnalysisExtensionActivator } from './activation/analysis'; import { ClassicExtensionActivator } from './activation/classic'; import { IExtensionActivator } from './activation/types'; @@ -75,7 +74,8 @@ export async function activate(context: ExtensionContext) { const configuration = serviceManager.get(IConfigurationService); const pythonSettings = configuration.getSettings(); - const activator: IExtensionActivator = IS_ANALYSIS_ENGINE_TEST || !pythonSettings.jediEnabled + const analysisEngineTest = process.env.VSC_PYTHON_ANALYSIS === '1'; + const activator: IExtensionActivator = analysisEngineTest || !pythonSettings.jediEnabled ? new AnalysisExtensionActivator(serviceManager, pythonSettings) : new ClassicExtensionActivator(serviceManager, pythonSettings); From 5cd6d45931c2da9ee3685b0c61daa4bd9b9b9f39 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 5 Apr 2018 11:54:47 -0700 Subject: [PATCH 27/83] #1308 Single line statements leading to an indentation on the next line --- src/client/extension.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/client/extension.ts b/src/client/extension.ts index 1617b18cf44c..10abaf6ef80d 100644 --- a/src/client/extension.ts +++ b/src/client/extension.ts @@ -107,6 +107,10 @@ export async function activate(context: ExtensionContext) { // tslint:disable-next-line:no-non-null-assertion languages.setLanguageConfiguration(PYTHON.language!, { onEnterRules: [ + { + beforeText: /:\s*pass\s*$/, + action: { indentAction: IndentAction.None } + }, { beforeText: /^\s*(?:def|class|for|if|elif|else|while|try|with|finally|except|async)\b.*/, action: { indentAction: IndentAction.Indent } From 27613db0db2efdbbc6468c7a0e4ff06e3c34cdc1 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 5 Apr 2018 12:06:09 -0700 Subject: [PATCH 28/83] #726 editing python after inline if statement invalid indent --- src/client/extension.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/extension.ts b/src/client/extension.ts index 10abaf6ef80d..2328ff6ef58f 100644 --- a/src/client/extension.ts +++ b/src/client/extension.ts @@ -108,11 +108,11 @@ export async function activate(context: ExtensionContext) { languages.setLanguageConfiguration(PYTHON.language!, { onEnterRules: [ { - beforeText: /:\s*pass\s*$/, + beforeText: /^\s*(?:def|class|for|if|elif|else|while|try|with|finally|except)\b.*:\s*\S+/, action: { indentAction: IndentAction.None } }, { - beforeText: /^\s*(?:def|class|for|if|elif|else|while|try|with|finally|except|async)\b.*/, + beforeText: /^\s*(?:def|class|for|if|elif|else|while|try|with|finally|except|async)\b.*:\s*/, action: { indentAction: IndentAction.Indent } }, { From 8061a209b63c57faeb1c46271793db9e7010ae59 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 5 Apr 2018 12:36:39 -0700 Subject: [PATCH 29/83] Undo change --- src/test/index.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/index.ts b/src/test/index.ts index 848b18152792..135ca5d8b6c1 100644 --- a/src/test/index.ts +++ b/src/test/index.ts @@ -13,7 +13,7 @@ process.env.IS_MULTI_ROOT_TEST = IS_MULTI_ROOT_TEST.toString(); // If running on CI server and we're running the debugger tests, then ensure we only run debug tests. // We do this to ensure we only run debugger test, as debugger tests are very flaky on CI. // So the solution is to run them separately and first on CI. -const grep = IS_CI_SERVER && IS_CI_SERVER_TEST_DEBUGGER ? 'Debug' : 'line formatter'; +const grep = IS_CI_SERVER && IS_CI_SERVER_TEST_DEBUGGER ? 'Debug' : undefined; // You can directly control Mocha options by uncommenting the following lines. // See https://github.com/mochajs/mocha/wiki/Using-mocha-programmatically#set-options for more info. From 17dc292c0aa25696886906129764c3360c74bf8f Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 5 Apr 2018 15:59:29 -0700 Subject: [PATCH 30/83] Move constant --- src/client/common/constants.ts | 3 +++ src/client/extension.ts | 5 ++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/client/common/constants.ts b/src/client/common/constants.ts index 2995452cb2f7..de0c7d260a9f 100644 --- a/src/client/common/constants.ts +++ b/src/client/common/constants.ts @@ -69,5 +69,8 @@ export function isTestExecution(): boolean { // tslint:disable-next-line:interface-name no-string-literal return process.env['VSC_PYTHON_CI_TEST'] === '1'; } +export function isPythonAnalysisEngineTest(): boolean { + return process.env.VSC_PYTHON_ANALYSIS === '1'; +} export const EXTENSION_ROOT_DIR = path.join(__dirname, '..', '..', '..'); diff --git a/src/client/extension.ts b/src/client/extension.ts index 2328ff6ef58f..04457bb99a15 100644 --- a/src/client/extension.ts +++ b/src/client/extension.ts @@ -15,7 +15,7 @@ import { AnalysisExtensionActivator } from './activation/analysis'; import { ClassicExtensionActivator } from './activation/classic'; import { IExtensionActivator } from './activation/types'; import { PythonSettings } from './common/configSettings'; -import { STANDARD_OUTPUT_CHANNEL } from './common/constants'; +import { isPythonAnalysisEngineTest, STANDARD_OUTPUT_CHANNEL } from './common/constants'; import { FeatureDeprecationManager } from './common/featureDeprecationManager'; import { createDeferred } from './common/helpers'; import { PythonInstaller } from './common/installer/pythonInstallation'; @@ -74,8 +74,7 @@ export async function activate(context: ExtensionContext) { const configuration = serviceManager.get(IConfigurationService); const pythonSettings = configuration.getSettings(); - const analysisEngineTest = process.env.VSC_PYTHON_ANALYSIS === '1'; - const activator: IExtensionActivator = analysisEngineTest || !pythonSettings.jediEnabled + const activator: IExtensionActivator = isPythonAnalysisEngineTest() || !pythonSettings.jediEnabled ? new AnalysisExtensionActivator(serviceManager, pythonSettings) : new ClassicExtensionActivator(serviceManager, pythonSettings); From 65964b9dc67d5c6a571b8e4ea8636825c464fa4f Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 10 Apr 2018 14:16:11 -0700 Subject: [PATCH 31/83] Harden LS startup error checks --- src/client/activation/analysis.ts | 36 +++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 9 deletions(-) diff --git a/src/client/activation/analysis.ts b/src/client/activation/analysis.ts index 554daace8d96..cfc03a4d0d5d 100644 --- a/src/client/activation/analysis.ts +++ b/src/client/activation/analysis.ts @@ -3,9 +3,11 @@ import * as path from 'path'; import { ExtensionContext, OutputChannel } from 'vscode'; -import { Disposable, LanguageClient, LanguageClientOptions, ServerOptions } from 'vscode-languageclient'; +import { Message } from 'vscode-jsonrpc'; +import { CloseAction, Disposable, ErrorAction, ErrorHandler, LanguageClient, LanguageClientOptions, ServerOptions } from 'vscode-languageclient'; import { IApplicationShell } from '../common/application/types'; import { isTestExecution, STANDARD_OUTPUT_CHANNEL } from '../common/constants'; +import { createDeferred, Deferred } from '../common/helpers'; import { IFileSystem, IPlatformService } from '../common/platform/types'; import { IProcessService } from '../common/process/types'; import { StopWatch } from '../common/stopWatch'; @@ -21,6 +23,18 @@ const dotNetCommand = 'dotnet'; const languageClientName = 'Python Tools'; const analysisEngineFolder = 'analysis'; +class LanguageServerStartupErrorHandler implements ErrorHandler { + constructor(private readonly deferred: Deferred) { } + public error(error: Error, message: Message, count: number): ErrorAction { + this.deferred.reject(); + return ErrorAction.Shutdown; + } + public closed(): CloseAction { + this.deferred.reject(); + return CloseAction.DoNotRestart; + } +} + export class AnalysisExtensionActivator implements IExtensionActivator { private readonly configuration: IConfigurationService; private readonly appShell: IApplicationShell; @@ -92,16 +106,23 @@ export class AnalysisExtensionActivator implements IExtensionActivator { private async tryStartLanguageClient(context: ExtensionContext, lc: LanguageClient): Promise { let disposable: Disposable | undefined; + const deferred = createDeferred(); try { + lc.clientOptions.errorHandler = new LanguageServerStartupErrorHandler(deferred); + disposable = lc.start(); - await lc.onReady(); + lc.onReady() + .then(() => deferred.resolve()) + .catch(ex => deferred.reject()); + await deferred.promise; + this.output.appendLine(`Language server ready: ${this.sw.elapsedTime} ms`); context.subscriptions.push(disposable); } catch (ex) { if (disposable) { disposable.dispose(); - throw ex; } + throw ex; } } @@ -157,12 +178,8 @@ export class AnalysisExtensionActivator implements IExtensionActivator { // tslint:disable-next-line:no-string-literal properties['SearchPaths'] = searchPaths; - if (isTestExecution()) { - // tslint:disable-next-line:no-string-literal - properties['TestEnvironment'] = true; - } - const selector: string[] = [PYTHON]; + // Options to control the language client return { // Register the server for Python documents @@ -181,7 +198,8 @@ export class AnalysisExtensionActivator implements IExtensionActivator { trimDocumentationText: false, maxDocumentationTextLength: 0 }, - asyncStartup: true + asyncStartup: true, + testEnvironment: isTestExecution() } }; } From 4bf5a4cd83173a18966e34ff76ed7ed0ccd98df0 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 10 Apr 2018 14:33:56 -0700 Subject: [PATCH 32/83] #1364 Intellisense doesn't work after specific const string --- src/client/language/tokenizer.ts | 3 +++ src/test/language/tokenizer.test.ts | 17 +++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/src/client/language/tokenizer.ts b/src/client/language/tokenizer.ts index fcb29ed8b9a3..e1c8c4b03d9e 100644 --- a/src/client/language/tokenizer.ts +++ b/src/client/language/tokenizer.ts @@ -366,6 +366,9 @@ export class Tokenizer implements ITokenizer { private skipToSingleEndQuote(quote: number): void { while (!this.cs.isEndOfStream()) { + if (this.cs.currentChar === Char.LineFeed || this.cs.currentChar === Char.CarriageReturn) { + return; // Unterminated single-line string + } if (this.cs.currentChar === Char.Backslash && this.cs.nextChar === quote) { this.cs.advance(2); continue; diff --git a/src/test/language/tokenizer.test.ts b/src/test/language/tokenizer.test.ts index 8d37f49dd791..202f0c774297 100644 --- a/src/test/language/tokenizer.test.ts +++ b/src/test/language/tokenizer.test.ts @@ -116,6 +116,23 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(0).type, TokenType.String); assert.equal(tokens.getItemAt(0).length, 14); }); + test('Strings: escape at the end of single quoted string ', async () => { + const t = new Tokenizer(); + // tslint:disable-next-line:quotemark + const tokens = t.tokenize("'quoted\\'\nx"); + assert.equal(tokens.count, 2); + assert.equal(tokens.getItemAt(0).type, TokenType.String); + assert.equal(tokens.getItemAt(0).length, 9); + assert.equal(tokens.getItemAt(1).type, TokenType.Identifier); + }); + test('Strings: escape at the end of double quoted string ', async () => { + const t = new Tokenizer(); + const tokens = t.tokenize('"quoted\\"\nx'); + assert.equal(tokens.count, 2); + assert.equal(tokens.getItemAt(0).type, TokenType.String); + assert.equal(tokens.getItemAt(0).length, 9); + assert.equal(tokens.getItemAt(1).type, TokenType.Identifier); + }); test('Comments', async () => { const t = new Tokenizer(); const tokens = t.tokenize(' #co"""mment1\n\t\n#comm\'ent2 '); From ddbd295e593a4b052b605103d82b95cbf1c7d1d1 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 12 Apr 2018 12:01:47 -0700 Subject: [PATCH 33/83] Telemetry for the analysis enging --- src/client/activation/analysis.ts | 19 ++++++++++++++++++- src/client/telemetry/constants.ts | 4 ++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/src/client/activation/analysis.ts b/src/client/activation/analysis.ts index cfc03a4d0d5d..9fcbb8ade27b 100644 --- a/src/client/activation/analysis.ts +++ b/src/client/activation/analysis.ts @@ -13,6 +13,13 @@ import { IProcessService } from '../common/process/types'; import { StopWatch } from '../common/stopWatch'; import { IConfigurationService, IOutputChannel, IPythonSettings } from '../common/types'; import { IServiceContainer } from '../ioc/types'; +import { + PYTHON_ANALYSIS_ENGINE_DOWNLOADED, + PYTHON_ANALYSIS_ENGINE_ENABLED, + PYTHON_ANALYSIS_ENGINE_ERROR, + PYTHON_ANALYSIS_ENGINE_STARTUP +} from '../telemetry/constants'; +import { getTelemetryReporter } from '../telemetry/telemetry'; import { AnalysisEngineDownloader } from './downloader'; import { InterpreterDataService } from './interpreterDataService'; import { PlatformData } from './platformData'; @@ -26,7 +33,7 @@ const analysisEngineFolder = 'analysis'; class LanguageServerStartupErrorHandler implements ErrorHandler { constructor(private readonly deferred: Deferred) { } public error(error: Error, message: Message, count: number): ErrorAction { - this.deferred.reject(); + this.deferred.reject(error); return ErrorAction.Shutdown; } public closed(): CloseAction { @@ -71,6 +78,9 @@ export class AnalysisExtensionActivator implements IExtensionActivator { const mscorlib = path.join(context.extensionPath, analysisEngineFolder, 'mscorlib.dll'); let downloadPackage = false; + const reporter = getTelemetryReporter(); + reporter.sendTelemetryEvent(PYTHON_ANALYSIS_ENGINE_ENABLED); + if (!await this.fs.fileExistsAsync(mscorlib)) { // Depends on .NET Runtime or SDK this.languageClient = this.createSimpleLanguageClient(context, clientOptions); @@ -80,6 +90,7 @@ export class AnalysisExtensionActivator implements IExtensionActivator { } catch (ex) { if (await this.isDotNetInstalled()) { this.appShell.showErrorMessage(`.NET Runtime appears to be installed but the language server did not start. Error ${ex}`); + reporter.sendTelemetryEvent(PYTHON_ANALYSIS_ENGINE_ERROR, { error: 'Failed to start (MSIL)' }); return false; } // No .NET Runtime, no mscorlib - need to download self-contained package. @@ -90,6 +101,7 @@ export class AnalysisExtensionActivator implements IExtensionActivator { if (downloadPackage) { const downloader = new AnalysisEngineDownloader(this.services, analysisEngineFolder); await downloader.downloadAnalysisEngine(context); + reporter.sendTelemetryEvent(PYTHON_ANALYSIS_ENGINE_DOWNLOADED); } const serverModule = path.join(context.extensionPath, analysisEngineFolder, this.platformData.getEngineExecutableName()); @@ -100,6 +112,7 @@ export class AnalysisExtensionActivator implements IExtensionActivator { return true; } catch (ex) { this.appShell.showErrorMessage(`Language server failed to start. Error ${ex}`); + reporter.sendTelemetryEvent(PYTHON_ANALYSIS_ENGINE_ERROR, { error: 'Failed to start (platform)' }); return false; } } @@ -108,6 +121,7 @@ export class AnalysisExtensionActivator implements IExtensionActivator { let disposable: Disposable | undefined; const deferred = createDeferred(); try { + const sw = new StopWatch(); lc.clientOptions.errorHandler = new LanguageServerStartupErrorHandler(deferred); disposable = lc.start(); @@ -118,6 +132,9 @@ export class AnalysisExtensionActivator implements IExtensionActivator { this.output.appendLine(`Language server ready: ${this.sw.elapsedTime} ms`); context.subscriptions.push(disposable); + + const reporter = getTelemetryReporter(); + reporter.sendTelemetryEvent(PYTHON_ANALYSIS_ENGINE_STARTUP, {}, { startup_time: sw.elapsedTime }); } catch (ex) { if (disposable) { disposable.dispose(); diff --git a/src/client/telemetry/constants.ts b/src/client/telemetry/constants.ts index bf02b07c63c7..be0cdc8a2c21 100644 --- a/src/client/telemetry/constants.ts +++ b/src/client/telemetry/constants.ts @@ -30,3 +30,7 @@ export const UNITTEST_STOP = 'UNITTEST.STOP'; export const UNITTEST_RUN = 'UNITTEST.RUN'; export const UNITTEST_DISCOVER = 'UNITTEST.DISCOVER'; export const UNITTEST_VIEW_OUTPUT = 'UNITTEST.VIEW_OUTPUT'; +export const PYTHON_ANALYSIS_ENGINE_ENABLED = 'PYTHON_ANALYSIS_ENGINE.ENABLED'; +export const PYTHON_ANALYSIS_ENGINE_DOWNLOADED = 'PYTHON_ANALYSIS_ENGINE.DOWNLOADED'; +export const PYTHON_ANALYSIS_ENGINE_ERROR = 'PYTHON_ANALYSIS_ENGINE.ERROR'; +export const PYTHON_ANALYSIS_ENGINE_STARTUP = 'PYTHON_ANALYSIS_ENGINE.STARTUP'; From d4afb6c26b2efe00584c8c4e056529080b771d14 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Fri, 13 Apr 2018 10:28:53 -0700 Subject: [PATCH 34/83] PR feedback --- src/client/activation/analysis.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/activation/analysis.ts b/src/client/activation/analysis.ts index 9fcbb8ade27b..f6702f075ddc 100644 --- a/src/client/activation/analysis.ts +++ b/src/client/activation/analysis.ts @@ -127,7 +127,7 @@ export class AnalysisExtensionActivator implements IExtensionActivator { disposable = lc.start(); lc.onReady() .then(() => deferred.resolve()) - .catch(ex => deferred.reject()); + .catch(deferred.reject); await deferred.promise; this.output.appendLine(`Language server ready: ${this.sw.elapsedTime} ms`); From 12186b8a1e5779142270d67486ea4842bddd806f Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Mon, 16 Apr 2018 10:24:35 -0700 Subject: [PATCH 35/83] Fix typo --- src/client/common/configSettings.ts | 11 ++++++----- src/client/common/types.ts | 8 ++++++-- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/client/common/configSettings.ts b/src/client/common/configSettings.ts index b88b82ce65bf..62d75c0ec0ba 100644 --- a/src/client/common/configSettings.ts +++ b/src/client/common/configSettings.ts @@ -6,7 +6,7 @@ import * as path from 'path'; import { ConfigurationTarget, DiagnosticSeverity, Disposable, Uri, workspace } from 'vscode'; import { isTestExecution } from './constants'; import { - IAutoCompeteSettings, + IAutoCompleteSettings, IFormattingSettings, ILintingSettings, IPythonSettings, @@ -35,7 +35,7 @@ export class PythonSettings extends EventEmitter implements IPythonSettings { public devOptions: string[] = []; public linting?: ILintingSettings; public formatting?: IFormattingSettings; - public autoComplete?: IAutoCompeteSettings; + public autoComplete?: IAutoCompleteSettings; public unitTest?: IUnitTestSettings; public terminal?: ITerminalSettings; public sortImports?: ISortImportSettings; @@ -219,9 +219,9 @@ export class PythonSettings extends EventEmitter implements IPythonSettings { this.formatting.yapfPath = getAbsolutePath(systemVariables.resolveAny(this.formatting.yapfPath), workspaceRoot); // tslint:disable-next-line:no-backbone-get-set-outside-model no-non-null-assertion - const autoCompleteSettings = systemVariables.resolveAny(pythonSettings.get('autoComplete'))!; + const autoCompleteSettings = systemVariables.resolveAny(pythonSettings.get('autoComplete'))!; if (this.autoComplete) { - Object.assign(this.autoComplete, autoCompleteSettings); + Object.assign(this.autoComplete, autoCompleteSettings); } else { this.autoComplete = autoCompleteSettings; } @@ -229,7 +229,8 @@ export class PythonSettings extends EventEmitter implements IPythonSettings { this.autoComplete = this.autoComplete ? this.autoComplete : { extraPaths: [], addBrackets: false, - preloadModules: [] + preloadModules: [], + showAdvancedMembers: false }; // tslint:disable-next-line:no-backbone-get-set-outside-model no-non-null-assertion diff --git a/src/client/common/types.ts b/src/client/common/types.ts index f64617178288..5e16a4557786 100644 --- a/src/client/common/types.ts +++ b/src/client/common/types.ts @@ -105,7 +105,7 @@ export interface IPythonSettings { readonly linting?: ILintingSettings; readonly formatting?: IFormattingSettings; readonly unitTest?: IUnitTestSettings; - readonly autoComplete?: IAutoCompeteSettings; + readonly autoComplete?: IAutoCompleteSettings; readonly terminal?: ITerminalSettings; readonly sortImports?: ISortImportSettings; readonly workspaceSymbols?: IWorkspaceSymbolSettings; @@ -194,10 +194,11 @@ export interface IFormattingSettings { yapfPath: string; readonly yapfArgs: string[]; } -export interface IAutoCompeteSettings { +export interface IAutoCompleteSettings { readonly addBrackets: boolean; readonly extraPaths: string[]; readonly preloadModules: string[]; + readonly showAdvancedMembers: boolean; } export interface IWorkspaceSymbolSettings { readonly enabled: boolean; @@ -212,6 +213,9 @@ export interface ITerminalSettings { readonly launchArgs: string[]; readonly activateEnvironment: boolean; } +export interface IPythonAnalysisEngineSettings { + readonly showAdvancedMembers: boolean; +} export const IConfigurationService = Symbol('IConfigurationService'); From ca90529fbf3e5c4820d70c3549583ee1ff4b5b12 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Mon, 16 Apr 2018 11:17:53 -0700 Subject: [PATCH 36/83] Test baseline update --- src/test/definitions/hover.ptvs.test.ts | 126 +++++++++++++++------- src/test/signature/signature.ptvs.test.ts | 15 ++- 2 files changed, 95 insertions(+), 46 deletions(-) diff --git a/src/test/definitions/hover.ptvs.test.ts b/src/test/definitions/hover.ptvs.test.ts index 8c3b981ca4bb..d2a456efd4bd 100644 --- a/src/test/definitions/hover.ptvs.test.ts +++ b/src/test/definitions/hover.ptvs.test.ts @@ -49,10 +49,15 @@ suite('Hover Definition (Analysis Engine)', () => { assert.equal(`${def[0].range!.end.line},${def[0].range!.end.character}`, '30,11', 'End position is incorrect'); assert.equal(def[0].contents.length, 1, 'Invalid content items'); - const lines = normalizeMarkedString(def[0].contents[0]).splitLines(); - assert.equal(lines.length, 2, 'incorrect number of lines'); - assert.equal(lines[0].trim(), 'obj.method1: method method1 of one.Class1 objects', 'function signature line #1 is incorrect'); - assert.equal(lines[1].trim(), 'This is method1', 'function signature line #2 is incorrect'); + const actual = normalizeMarkedString(def[0].contents[0]).splitLines(); + const expected = [ + 'obj.method1:', + 'method method1 of one.Class1 objects', + '```html', + 'This is method1', + '```' + ]; + verifySignatureLines(actual, expected); }); test('Across files', async () => { @@ -61,10 +66,15 @@ suite('Hover Definition (Analysis Engine)', () => { assert.equal(`${def[0].range!.start.line},${def[0].range!.start.character}`, '1,0', 'Start position is incorrect'); assert.equal(`${def[0].range!.end.line},${def[0].range!.end.character}`, '1,12', 'End position is incorrect'); - const lines = normalizeMarkedString(def[0].contents[0]).splitLines(); - assert.equal(lines.length, 2, 'incorrect number of lines'); - assert.equal(lines[0].trim(), 'two.ct().fun: method fun of two.ct objects', 'function signature line #1 is incorrect'); - assert.equal(lines[1].trim(), 'This is fun', 'function signature line #2 is incorrect'); + const actual = normalizeMarkedString(def[0].contents[0]).splitLines(); + const expected = [ + 'two.ct().fun:', + 'method fun of two.ct objects', + '```html', + 'This is fun', + '```' + ]; + verifySignatureLines(actual, expected); }); test('With Unicode Characters', async () => { @@ -73,13 +83,18 @@ suite('Hover Definition (Analysis Engine)', () => { assert.equal(`${def[0].range!.start.line},${def[0].range!.start.character}`, '25,0', 'Start position is incorrect'); assert.equal(`${def[0].range!.end.line},${def[0].range!.end.character}`, '25,7', 'End position is incorrect'); - const lines = normalizeMarkedString(def[0].contents[0]).splitLines(); - assert.equal(lines.length, 5, 'incorrect number of lines'); - assert.equal(lines[0].trim(), 'Foo.bar: def four.Foo.bar()', 'function signature line #1 is incorrect'); - assert.equal(lines[1].trim(), '说明 - keep this line, it works', 'function signature line #2 is incorrect'); - assert.equal(lines[2].trim(), 'delete following line, it works', 'function signature line #3 is incorrect'); - assert.equal(lines[3].trim(), '如果存在需要等待审批或正在执行的任务,将不刷新页面', 'function signature line #4 is incorrect'); - assert.equal(lines[4].trim(), 'declared in Foo', 'function signature line #5 is incorrect'); + const actual = normalizeMarkedString(def[0].contents[0]).splitLines(); + const expected = [ + 'Foo.bar:', + 'four.Foo.bar() -> bool', + '```html', + '说明 - keep this line, it works', + 'delete following line, it works', + '如果存在需要等待审批或正在执行的任务,将不刷新页面', + '```', + 'declared in Foo' + ]; + verifySignatureLines(actual, expected); }); test('Across files with Unicode Characters', async () => { @@ -88,11 +103,16 @@ suite('Hover Definition (Analysis Engine)', () => { assert.equal(`${def[0].range!.start.line},${def[0].range!.start.character}`, '1,0', 'Start position is incorrect'); assert.equal(`${def[0].range!.end.line},${def[0].range!.end.character}`, '1,16', 'End position is incorrect'); - const lines = normalizeMarkedString(def[0].contents[0]).splitLines(); - assert.equal(lines.length, 3, 'incorrect number of lines'); - assert.equal(lines[0].trim(), 'four.showMessage: def four.showMessage()', 'function signature line #1 is incorrect'); - assert.equal(lines[1].trim(), 'Кюм ут жэмпэр пошжим льаборэж, коммюны янтэрэсщэт нам ед, декта игнота ныморэ жят эи.', 'function signature line #2 is incorrect'); - assert.equal(lines[2].trim(), 'Шэа декам экшырки эи, эи зыд эррэм докэндё, векж факэтэ пэрчыквюэрёж ку.', 'function signature line #3 is incorrect'); + const actual = normalizeMarkedString(def[0].contents[0]).splitLines(); + const expected = [ + 'four.showMessage:', + 'four.showMessage()', + '```html', + 'Кюм ут жэмпэр пошжим льаборэж, коммюны янтэрэсщэт нам ед, декта игнота ныморэ жят эи.', + 'Шэа декам экшырки эи, эи зыд эррэм докэндё, векж факэтэ пэрчыквюэрёж ку.', + '```' + ]; + verifySignatureLines(actual, expected); }); test('Nothing for keywords (class)', async () => { @@ -111,10 +131,22 @@ suite('Hover Definition (Analysis Engine)', () => { assert.equal(`${def[0].range!.start.line},${def[0].range!.start.character}`, '11,7', 'Start position is incorrect'); assert.equal(`${def[0].range!.end.line},${def[0].range!.end.character}`, '11,18', 'End position is incorrect'); - const lines = normalizeMarkedString(def[0].contents[0]).splitLines(); - assert.equal(lines.length, 9, 'incorrect number of lines'); - assert.equal(lines[0].trim(), 'misc.Random: class misc.Random(_random.Random)', 'function signature line #1 is incorrect'); - assert.equal(lines[1].trim(), 'Random number generator base class used by bound module functions.', 'function signature line #2 is incorrect'); + const actual = normalizeMarkedString(def[0].contents[0]).splitLines(); + const expected = [ + 'misc.Random:', + 'class misc.Random(_random.Random)', + 'Random number generator base class used by bound module functions.', + '```html', + 'Used to instantiate instances of Random to get generators that don\'t', + 'share state.', + 'Class Random can also be subclassed if you want to use a different basic', + 'generator of your own devising: in that case, override the following', + 'methods: random(), seed(), getstate(), and setstate().', + 'Optionally, implement a getrandbits() method so that randrange()', + 'can cover arbitrarily large ranges.', + '```' + ]; + verifySignatureLines(actual, expected); }); test('Highlight Method', async () => { @@ -123,10 +155,13 @@ suite('Hover Definition (Analysis Engine)', () => { assert.equal(`${def[0].range!.start.line},${def[0].range!.start.character}`, '12,0', 'Start position is incorrect'); assert.equal(`${def[0].range!.end.line},${def[0].range!.end.character}`, '12,12', 'End position is incorrect'); - const lines = normalizeMarkedString(def[0].contents[0]).splitLines(); - assert.equal(lines.length, 2, 'incorrect number of lines'); - assert.equal(lines[0].trim(), 'rnd2.randint: method randint of misc.Random objects -> int', 'function signature line #1 is incorrect'); - assert.equal(lines[1].trim(), 'Return random integer in range [a, b], including both end points.', 'function signature line #2 is incorrect'); + const actual = normalizeMarkedString(def[0].contents[0]).splitLines(); + const expected = [ + 'rnd2.randint:', + 'method randint of misc.Random objects -> int', + 'Return random integer in range [a, b], including both end points.' + ]; + verifySignatureLines(actual, expected); }); test('Highlight Function', async () => { @@ -135,11 +170,14 @@ suite('Hover Definition (Analysis Engine)', () => { assert.equal(`${def[0].range!.start.line},${def[0].range!.start.character}`, '8,6', 'Start position is incorrect'); assert.equal(`${def[0].range!.end.line},${def[0].range!.end.character}`, '8,15', 'End position is incorrect'); - const lines = normalizeMarkedString(def[0].contents[0]).splitLines(); - assert.equal(lines.length, 3, 'incorrect number of lines'); - assert.equal(lines[0].trim(), 'math.acos: built-in function acos(x)', 'function signature line #1 is incorrect'); - assert.equal(lines[1].trim(), 'acos(x)', 'function signature line #2 is incorrect'); - assert.equal(lines[2].trim(), 'Return the arc cosine (measured in radians) of x.', 'function signature line #3 is incorrect'); + const actual = normalizeMarkedString(def[0].contents[0]).splitLines(); + const expected = [ + 'math.acos:', + 'built-in function acos(x)', + 'acos(x)', + 'Return the arc cosine (measured in radians) of x.' + ]; + verifySignatureLines(actual, expected); }); test('Highlight Multiline Method Signature', async () => { @@ -148,11 +186,16 @@ suite('Hover Definition (Analysis Engine)', () => { assert.equal(`${def[0].range!.start.line},${def[0].range!.start.character}`, '14,4', 'Start position is incorrect'); assert.equal(`${def[0].range!.end.line},${def[0].range!.end.character}`, '14,15', 'End position is incorrect'); - const lines = normalizeMarkedString(def[0].contents[0]).splitLines(); - assert.equal(lines.length, 3, 'incorrect number of lines'); - assert.equal(lines[0].trim(), 'misc.Thread: class misc.Thread(_Verbose)', 'function signature line #1 is incorrect'); - assert.equal(lines[1].trim(), 'A class that represents a thread of control.', 'function signature line #2 is incorrect'); - + const actual = normalizeMarkedString(def[0].contents[0]).splitLines(); + const expected = [ + 'misc.Thread:', + 'class misc.Thread(_Verbose)', + 'A class that represents a thread of control.', + '```html', + 'This class can be safely subclassed in a limited fashion.', + '```' + ]; + verifySignatureLines(actual, expected); }); test('Variable', async () => { @@ -181,4 +224,11 @@ suite('Hover Definition (Analysis Engine)', () => { assert.fail(contents, '', '\'Return a capitalized version of S/Return a copy of the string S with only its first character\' message missing', 'compare'); } }); + + function verifySignatureLines(actual: string[], expected: string[]) { + assert.equal(actual.length, expected.length, 'incorrect number of lines'); + for (let i = 0; i < actual.length; i += 1) { + assert.equal(actual[i].trim(), expected[i], `signature line ${i + 1} is incorrect`); + } + } }); diff --git a/src/test/signature/signature.ptvs.test.ts b/src/test/signature/signature.ptvs.test.ts index 68720e33cde1..ad8e58508342 100644 --- a/src/test/signature/signature.ptvs.test.ts +++ b/src/test/signature/signature.ptvs.test.ts @@ -74,14 +74,13 @@ suite('Signatures (Analysis Engine)', () => { new SignatureHelpResult(0, 3, 1, -1, null), new SignatureHelpResult(0, 4, 1, -1, null), new SignatureHelpResult(0, 5, 1, -1, null), - new SignatureHelpResult(0, 6, 1, 0, 'stop'), - new SignatureHelpResult(0, 7, 1, 0, 'stop') - // https://github.com/Microsoft/PTVS/issues/3869 - // new SignatureHelpResult(0, 8, 1, 1, 'stop'), - // new SignatureHelpResult(0, 9, 1, 1, 'stop'), - // new SignatureHelpResult(0, 10, 1, 1, 'stop'), - // new SignatureHelpResult(0, 11, 1, 2, 'step'), - // new SignatureHelpResult(1, 0, 1, 2, 'step') + new SignatureHelpResult(0, 6, 1, 0, 'start'), + new SignatureHelpResult(0, 7, 1, 0, 'start'), + new SignatureHelpResult(0, 8, 1, 1, 'stop'), + new SignatureHelpResult(0, 9, 1, 1, 'stop'), + new SignatureHelpResult(0, 10, 1, 1, 'stop'), + new SignatureHelpResult(0, 11, 1, 2, 'step'), + new SignatureHelpResult(1, 0, 1, 2, 'step') ]; const document = await openDocument(path.join(autoCompPath, 'basicSig.py')); From a7267b537c1871d80b21d9b67da68d2a4ba0babc Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Mon, 16 Apr 2018 15:29:05 -0700 Subject: [PATCH 37/83] Jedi 0.12 --- pythonFiles/jedi/__init__.py | 6 +- pythonFiles/jedi/_compatibility.py | 314 +++++++-- pythonFiles/jedi/api/__init__.py | 148 ++-- pythonFiles/jedi/api/classes.py | 30 +- pythonFiles/jedi/api/completion.py | 24 +- pythonFiles/jedi/api/environment.py | 393 +++++++++++ pythonFiles/jedi/api/exceptions.py | 10 + pythonFiles/jedi/api/helpers.py | 41 +- pythonFiles/jedi/api/interpreter.py | 22 +- pythonFiles/jedi/api/keywords.py | 68 +- pythonFiles/jedi/api/project.py | 200 ++++++ pythonFiles/jedi/api/replstartup.py | 4 +- pythonFiles/jedi/cache.py | 26 +- pythonFiles/jedi/common/utils.py | 12 + pythonFiles/jedi/debug.py | 2 +- pythonFiles/jedi/evaluate/__init__.py | 51 +- pythonFiles/jedi/evaluate/analysis.py | 18 +- pythonFiles/jedi/evaluate/arguments.py | 61 +- pythonFiles/jedi/evaluate/base_context.py | 42 +- pythonFiles/jedi/evaluate/cache.py | 4 +- .../jedi/evaluate/compiled/__init__.py | 651 +----------------- pythonFiles/jedi/evaluate/compiled/access.py | 490 +++++++++++++ pythonFiles/jedi/evaluate/compiled/context.py | 474 +++++++++++++ pythonFiles/jedi/evaluate/compiled/fake.py | 221 ++---- .../jedi/evaluate/compiled/fake/builtins.pym | 5 +- .../jedi/evaluate/compiled/getattr_static.py | 9 +- pythonFiles/jedi/evaluate/compiled/mixed.py | 90 +-- .../evaluate/compiled/subprocess/__init__.py | 340 +++++++++ .../evaluate/compiled/subprocess/__main__.py | 49 ++ .../evaluate/compiled/subprocess/functions.py | 113 +++ .../jedi/evaluate/context/asynchronous.py | 38 + pythonFiles/jedi/evaluate/context/function.py | 48 +- pythonFiles/jedi/evaluate/context/instance.py | 85 ++- pythonFiles/jedi/evaluate/context/iterable.py | 219 +++--- pythonFiles/jedi/evaluate/context/klass.py | 10 +- pythonFiles/jedi/evaluate/context/module.py | 51 +- .../jedi/evaluate/context/namespace.py | 28 +- pythonFiles/jedi/evaluate/docstrings.py | 104 +-- pythonFiles/jedi/evaluate/dynamic.py | 74 +- pythonFiles/jedi/evaluate/filters.py | 201 ++++-- pythonFiles/jedi/evaluate/finder.py | 42 +- pythonFiles/jedi/evaluate/flow_analysis.py | 17 +- pythonFiles/jedi/evaluate/helpers.py | 21 +- pythonFiles/jedi/evaluate/imports.py | 249 +++---- pythonFiles/jedi/evaluate/param.py | 5 +- pythonFiles/jedi/evaluate/pep0484.py | 192 ++++-- pythonFiles/jedi/evaluate/project.py | 40 -- pythonFiles/jedi/evaluate/recursion.py | 11 +- pythonFiles/jedi/evaluate/site.py | 110 --- pythonFiles/jedi/evaluate/stdlib.py | 80 ++- pythonFiles/jedi/evaluate/syntax_tree.py | 231 ++++--- pythonFiles/jedi/evaluate/sys_path.py | 134 +--- pythonFiles/jedi/evaluate/utils.py | 44 ++ pythonFiles/jedi/parser_utils.py | 29 +- pythonFiles/jedi/refactoring.py | 19 +- pythonFiles/jedi/utils.py | 8 +- pythonFiles/parso/__init__.py | 2 +- pythonFiles/parso/_compatibility.py | 2 +- pythonFiles/parso/grammar.py | 36 +- pythonFiles/parso/pgen2/pgen.py | 5 +- pythonFiles/parso/python/diff.py | 14 +- pythonFiles/parso/python/errors.py | 119 +--- pythonFiles/parso/python/fstring.py | 211 ------ pythonFiles/parso/python/grammar26.txt | 3 +- pythonFiles/parso/python/grammar27.txt | 3 +- pythonFiles/parso/python/grammar33.txt | 3 +- pythonFiles/parso/python/grammar34.txt | 3 +- pythonFiles/parso/python/grammar35.txt | 3 +- pythonFiles/parso/python/grammar36.txt | 9 +- pythonFiles/parso/python/grammar37.txt | 9 +- pythonFiles/parso/python/parser.py | 26 +- pythonFiles/parso/python/token.py | 9 + pythonFiles/parso/python/tokenize.py | 236 ++++++- pythonFiles/parso/python/tree.py | 27 + pythonFiles/parso/tree.py | 3 +- src/client/providers/jediProxy.ts | 193 +++--- src/client/providers/signatureProvider.ts | 34 +- src/test/signature/signature.jedi.test.ts | 16 +- 78 files changed, 4415 insertions(+), 2559 deletions(-) create mode 100644 pythonFiles/jedi/api/environment.py create mode 100644 pythonFiles/jedi/api/exceptions.py create mode 100644 pythonFiles/jedi/api/project.py create mode 100644 pythonFiles/jedi/common/utils.py create mode 100644 pythonFiles/jedi/evaluate/compiled/access.py create mode 100644 pythonFiles/jedi/evaluate/compiled/context.py create mode 100644 pythonFiles/jedi/evaluate/compiled/subprocess/__init__.py create mode 100644 pythonFiles/jedi/evaluate/compiled/subprocess/__main__.py create mode 100644 pythonFiles/jedi/evaluate/compiled/subprocess/functions.py create mode 100644 pythonFiles/jedi/evaluate/context/asynchronous.py delete mode 100644 pythonFiles/jedi/evaluate/project.py delete mode 100644 pythonFiles/jedi/evaluate/site.py delete mode 100644 pythonFiles/parso/python/fstring.py diff --git a/pythonFiles/jedi/__init__.py b/pythonFiles/jedi/__init__.py index 1a1080ad2fd4..ff2de906d9cd 100644 --- a/pythonFiles/jedi/__init__.py +++ b/pythonFiles/jedi/__init__.py @@ -36,8 +36,12 @@ good text editor, while still having very good IDE features for Python. """ -__version__ = '0.11.1' +__version__ = '0.12.0' from jedi.api import Script, Interpreter, set_debug_function, \ preload_module, names from jedi import settings +from jedi.api.environment import find_virtualenvs, find_system_environments, \ + get_default_environment, InvalidPythonEnvironment, create_environment, \ + get_system_environment +from jedi.api.exceptions import InternalError diff --git a/pythonFiles/jedi/_compatibility.py b/pythonFiles/jedi/_compatibility.py index 52a20fe2c07c..8b55fb8f4a1d 100644 --- a/pythonFiles/jedi/_compatibility.py +++ b/pythonFiles/jedi/_compatibility.py @@ -1,25 +1,25 @@ """ -To ensure compatibility from Python ``2.6`` - ``3.3``, a module has been +To ensure compatibility from Python ``2.7`` - ``3.x``, a module has been created. Clearly there is huge need to use conforming syntax. """ +import binascii +import errno import sys -import imp import os import re import pkgutil import warnings +import inspect +import subprocess try: import importlib except ImportError: pass -# Cannot use sys.version.major and minor names, because in Python 2.6 it's not -# a namedtuple. is_py3 = sys.version_info[0] >= 3 is_py33 = is_py3 and sys.version_info[1] >= 3 is_py34 = is_py3 and sys.version_info[1] >= 4 is_py35 = is_py3 and sys.version_info[1] >= 5 -is_py26 = not is_py3 and sys.version_info[1] < 7 py_version = int(str(sys.version_info[0]) + str(sys.version_info[1])) @@ -35,28 +35,24 @@ def close(self): del self.loader -def find_module_py34(string, path=None, fullname=None): - implicit_namespace_pkg = False +def find_module_py34(string, path=None, full_name=None): spec = None loader = None spec = importlib.machinery.PathFinder.find_spec(string, path) - if hasattr(spec, 'origin'): - origin = spec.origin - implicit_namespace_pkg = origin == 'namespace' - - # We try to disambiguate implicit namespace pkgs with non implicit namespace pkgs - if implicit_namespace_pkg: - fullname = string if not path else fullname - implicit_ns_info = ImplicitNSInfo(fullname, spec.submodule_search_locations._path) - return None, implicit_ns_info, False - - # we have found the tail end of the dotted path - if hasattr(spec, 'loader'): + if spec is not None: + # We try to disambiguate implicit namespace pkgs with non implicit namespace pkgs + if not spec.has_location: + full_name = string if not path else full_name + implicit_ns_info = ImplicitNSInfo(full_name, spec.submodule_search_locations._path) + return None, implicit_ns_info, False + + # we have found the tail end of the dotted path loader = spec.loader return find_module_py33(string, path, loader) -def find_module_py33(string, path=None, loader=None, fullname=None): + +def find_module_py33(string, path=None, loader=None, full_name=None): loader = loader or importlib.machinery.PathFinder.find_module(string, path) if loader is None and path is None: # Fallback to find builtins @@ -74,7 +70,7 @@ def find_module_py33(string, path=None, loader=None, fullname=None): raise ImportError("Originally " + repr(e)) if loader is None: - raise ImportError("Couldn't find a loader for {0}".format(string)) + raise ImportError("Couldn't find a loader for {}".format(string)) try: is_package = loader.is_package(string) @@ -109,7 +105,10 @@ def find_module_py33(string, path=None, loader=None, fullname=None): return module_file, module_path, is_package -def find_module_pre_py33(string, path=None, fullname=None): +def find_module_pre_py33(string, path=None, full_name=None): + # This import is here, because in other places it will raise a + # DeprecationWarning. + import imp try: module_file, module_path, description = imp.find_module(string, path) module_type = description[2] @@ -127,14 +126,7 @@ def find_module_pre_py33(string, path=None, fullname=None): if loader: is_package = loader.is_package(string) is_archive = hasattr(loader, 'archive') - try: - module_path = loader.get_filename(string) - except AttributeError: - # fallback for py26 - try: - module_path = loader._get_filename(string) - except AttributeError: - continue + module_path = loader.get_filename(string) if is_package: module_path = os.path.dirname(module_path) if is_archive: @@ -142,14 +134,14 @@ def find_module_pre_py33(string, path=None, fullname=None): file = None if not is_package or is_archive: file = DummyFile(loader, string) - return (file, module_path, is_package) + return file, module_path, is_package except ImportError: pass - raise ImportError("No module named {0}".format(string)) + raise ImportError("No module named {}".format(string)) find_module = find_module_py33 if is_py33 else find_module_pre_py33 -find_module = find_module_py34 if is_py34 else find_module +find_module = find_module_py34 if is_py34 else find_module find_module.__doc__ = """ Provides information about a module. @@ -161,12 +153,80 @@ def find_module_pre_py33(string, path=None, fullname=None): """ +def _iter_modules(paths, prefix=''): + # Copy of pkgutil.iter_modules adapted to work with namespaces + + for path in paths: + importer = pkgutil.get_importer(path) + + if not isinstance(importer, importlib.machinery.FileFinder): + # We're only modifying the case for FileFinder. All the other cases + # still need to be checked (like zip-importing). Do this by just + # calling the pkgutil version. + for mod_info in pkgutil.iter_modules([path], prefix): + yield mod_info + continue + + # START COPY OF pkutils._iter_file_finder_modules. + if importer.path is None or not os.path.isdir(importer.path): + return + + yielded = {} + + try: + filenames = os.listdir(importer.path) + except OSError: + # ignore unreadable directories like import does + filenames = [] + filenames.sort() # handle packages before same-named modules + + for fn in filenames: + modname = inspect.getmodulename(fn) + if modname == '__init__' or modname in yielded: + continue + + # jedi addition: Avoid traversing special directories + if fn.startswith('.') or fn == '__pycache__': + continue + + path = os.path.join(importer.path, fn) + ispkg = False + + if not modname and os.path.isdir(path) and '.' not in fn: + modname = fn + # A few jedi modifications: Don't check if there's an + # __init__.py + try: + os.listdir(path) + except OSError: + # ignore unreadable directories like import does + continue + ispkg = True + + if modname and '.' not in modname: + yielded[modname] = 1 + yield importer, prefix + modname, ispkg + # END COPY + +iter_modules = _iter_modules if py_version >= 34 else pkgutil.iter_modules + + class ImplicitNSInfo(object): """Stores information returned from an implicit namespace spec""" def __init__(self, name, paths): self.name = name self.paths = paths + +if is_py3: + all_suffixes = importlib.machinery.all_suffixes +else: + def all_suffixes(): + # Is deprecated and raises a warning in Python 3.6. + import imp + return [suffix for suffix, _, _ in imp.get_suffixes()] + + # unicode function try: unicode = unicode @@ -208,7 +268,7 @@ def use_metaclass(meta, *bases): """ Create a class with a metaclass. """ if not bases: bases = (object,) - return meta("HackClass", bases, {}) + return meta("Py2CompatibilityMetaClass", bases, {}) try: @@ -219,19 +279,37 @@ def use_metaclass(meta, *bases): encoding = 'ascii' -def u(string): +def u(string, errors='strict'): """Cast to unicode DAMMIT! Written because Python2 repr always implicitly casts to a string, so we have to cast back to a unicode (and we now that we always deal with valid unicode, because we check that in the beginning). """ - if is_py3: - return str(string) - - if not isinstance(string, unicode): - return unicode(str(string), 'UTF-8') + if isinstance(string, bytes): + return unicode(string, encoding='UTF-8', errors=errors) return string + +def cast_path(obj): + """ + Take a bytes or str path and cast it to unicode. + + Apparently it is perfectly fine to pass both byte and unicode objects into + the sys.path. This probably means that byte paths are normal at other + places as well. + + Since this just really complicates everything and Python 2.7 will be EOL + soon anyway, just go with always strings. + """ + return u(obj, errors='replace') + + +def force_unicode(obj): + # Intentionally don't mix those two up, because those two code paths might + # be different in the future (maybe windows?). + return cast_path(obj) + + try: import builtins # module name in python 3 except ImportError: @@ -242,11 +320,6 @@ def u(string): def literal_eval(string): - # py3.0, py3.1 and py32 don't support unicode literals. Support those, I - # don't want to write two versions of the tokenizer. - if is_py3 and sys.version_info.minor < 3: - if re.match('[uU][\'"]', string): - string = string[1:] return ast.literal_eval(string) @@ -260,6 +333,11 @@ def literal_eval(string): except NameError: FileNotFoundError = IOError +try: + NotADirectoryError = NotADirectoryError +except NameError: + NotADirectoryError = IOError + def no_unicode_pprint(dct): """ @@ -273,6 +351,13 @@ def no_unicode_pprint(dct): print(re.sub("u'", "'", s)) +def print_to_stderr(*args): + if is_py3: + eval("print(*args, file=sys.stderr)") + else: + print >> sys.stderr, args + + def utf8_repr(func): """ ``__repr__`` methods in Python 2 don't allow unicode objects to be @@ -289,3 +374,142 @@ def wrapper(self): return func else: return wrapper + + +if is_py3: + import queue +else: + import Queue as queue + + +import pickle +if sys.version_info[:2] == (3, 3): + """ + Monkeypatch the unpickler in Python 3.3. This is needed, because the + argument `encoding='bytes'` is not supported in 3.3, but badly needed to + communicate with Python 2. + """ + + class NewUnpickler(pickle._Unpickler): + dispatch = dict(pickle._Unpickler.dispatch) + + def _decode_string(self, value): + # Used to allow strings from Python 2 to be decoded either as + # bytes or Unicode strings. This should be used only with the + # STRING, BINSTRING and SHORT_BINSTRING opcodes. + if self.encoding == "bytes": + return value + else: + return value.decode(self.encoding, self.errors) + + def load_string(self): + data = self.readline()[:-1] + # Strip outermost quotes + if len(data) >= 2 and data[0] == data[-1] and data[0] in b'"\'': + data = data[1:-1] + else: + raise pickle.UnpicklingError("the STRING opcode argument must be quoted") + self.append(self._decode_string(pickle.codecs.escape_decode(data)[0])) + dispatch[pickle.STRING[0]] = load_string + + def load_binstring(self): + # Deprecated BINSTRING uses signed 32-bit length + len, = pickle.struct.unpack('>> defs[0].type + >>> defs = [str(d.type) for d in defs] # It's unicode and in Py2 has u before it. + >>> defs[0] 'module' - >>> defs[1].type + >>> defs[1] 'class' - >>> defs[2].type + >>> defs[2] 'instance' - >>> defs[3].type + >>> defs[3] 'function' """ @@ -159,7 +157,7 @@ def to_reverse(): except IndexError: pass - if name.api_type == 'module': + if name.api_type in 'module': module_contexts = name.infer() if module_contexts: module_context, = module_contexts @@ -259,7 +257,7 @@ def docstring(self, raw=False, fast=True): @property def description(self): """A textual description of the object.""" - return u(self._name.string_name) + return self._name.string_name @property def full_name(self): @@ -324,9 +322,9 @@ def get_param_names(context): param_names = param_names[1:] elif isinstance(context, (instance.AbstractInstanceContext, ClassContext)): if isinstance(context, ClassContext): - search = '__init__' + search = u'__init__' else: - search = '__call__' + search = u'__call__' names = context.get_function_slot_names(search) if not names: return [] @@ -377,8 +375,7 @@ def get_line_code(self, before=0, after=0): if self.in_builtin_module(): return '' - path = self._name.get_root_context().py__file__() - lines = parser_cache[self._evaluator.grammar._hashed][path].lines + lines = self._name.get_root_context().code_lines index = self._name.start_pos[0] - 1 start_index = max(index - before, 0) @@ -406,7 +403,7 @@ def _complete(self, like_name): and self.type == 'Function': append = '(' - if isinstance(self._name, ParamName) and self._stack is not None: + if self._name.api_type == 'param' and self._stack is not None: node_names = list(self._stack.get_node_names(self._evaluator.grammar._pgen_grammar)) if 'trailer' in node_names and 'argument' not in node_names: append += '=' @@ -525,7 +522,7 @@ def description(self): if typ == 'function': # For the description we want a short and a pythonic way. typ = 'def' - return typ + ' ' + u(self._name.string_name) + return typ + ' ' + self._name.string_name elif typ == 'param': code = search_ancestor(tree_name, 'param').get_code( include_prefix=False, @@ -533,7 +530,6 @@ def description(self): ) return typ + ' ' + code - definition = tree_name.get_definition() or tree_name # Remove the prefix, because that's not what we want for get_code # here. @@ -555,7 +551,7 @@ def desc_with_module(self): .. todo:: Add full path. This function is should return a `module.class.function` path. """ - position = '' if self.in_builtin_module else '@%s' % (self.line) + position = '' if self.in_builtin_module else '@%s' % self.line return "%s:%s%s" % (self.module_name, self.description, position) @memoize_method diff --git a/pythonFiles/jedi/api/completion.py b/pythonFiles/jedi/api/completion.py index 559a4d3f8320..c88a031e4679 100644 --- a/pythonFiles/jedi/api/completion.py +++ b/pythonFiles/jedi/api/completion.py @@ -2,6 +2,7 @@ from parso.python import tree from parso.tree import search_ancestor, Leaf +from jedi._compatibility import Parameter from jedi import debug from jedi import settings from jedi.api import classes @@ -18,24 +19,21 @@ def get_call_signature_param_names(call_signatures): for call_sig in call_signatures: for p in call_sig.params: # Allow protected access, because it's a public API. - tree_name = p._name.tree_name - # Compiled modules typically don't allow keyword arguments. - if tree_name is not None: - # Allow access on _definition here, because it's a - # public API and we don't want to make the internal - # Name object public. - tree_param = tree.search_ancestor(tree_name, 'param') - if tree_param.star_count == 0: # no *args/**kwargs - yield p._name + if p._name.get_kind() in (Parameter.POSITIONAL_OR_KEYWORD, + Parameter.KEYWORD_ONLY): + yield p._name def filter_names(evaluator, completion_names, stack, like_name): comp_dct = {} + if settings.case_insensitive_completion: + like_name = like_name.lower() for name in completion_names: - if settings.case_insensitive_completion \ - and name.string_name.lower().startswith(like_name.lower()) \ - or name.string_name.startswith(like_name): + string = name.string_name + if settings.case_insensitive_completion: + string = string.lower() + if string.startswith(like_name): new = classes.Completion( evaluator, name, @@ -208,7 +206,7 @@ def _get_context_completions(self): def _get_keyword_completion_names(self, keywords_): for k in keywords_: - yield keywords.keyword(self._evaluator, k).name + yield keywords.KeywordName(self._evaluator, k) def _global_completions(self): context = get_user_scope(self._module_context, self._position) diff --git a/pythonFiles/jedi/api/environment.py b/pythonFiles/jedi/api/environment.py new file mode 100644 index 000000000000..51b390f36ab4 --- /dev/null +++ b/pythonFiles/jedi/api/environment.py @@ -0,0 +1,393 @@ +""" +Environments are a way to activate different Python versions or Virtualenvs for +static analysis. The Python binary in that environment is going to be executed. +""" +import os +import re +import sys +import hashlib +import filecmp +from subprocess import PIPE +from collections import namedtuple +# When dropping Python 2.7 support we should consider switching to +# `shutil.which`. +from distutils.spawn import find_executable + +from jedi._compatibility import GeneralizedPopen +from jedi.cache import memoize_method, time_cache +from jedi.evaluate.compiled.subprocess import get_subprocess, \ + EvaluatorSameProcess, EvaluatorSubprocess + +import parso + +_VersionInfo = namedtuple('VersionInfo', 'major minor micro') + +_SUPPORTED_PYTHONS = ['3.6', '3.5', '3.4', '3.3', '2.7'] +_SAFE_PATHS = ['/usr/bin', '/usr/local/bin'] +_CURRENT_VERSION = '%s.%s' % (sys.version_info.major, sys.version_info.minor) + + +class InvalidPythonEnvironment(Exception): + """ + If you see this exception, the Python executable or Virtualenv you have + been trying to use is probably not a correct Python version. + """ + + +class _BaseEnvironment(object): + @memoize_method + def get_grammar(self): + version_string = '%s.%s' % (self.version_info.major, self.version_info.minor) + return parso.load_grammar(version=version_string) + + @property + def _sha256(self): + try: + return self._hash + except AttributeError: + self._hash = _calculate_sha256_for_file(self.executable) + return self._hash + + +class Environment(_BaseEnvironment): + """ + This class is supposed to be created by internal Jedi architecture. You + should not create it directly. Please use create_environment or the other + functions instead. It is then returned by that function. + """ + def __init__(self, path, executable): + self.path = os.path.abspath(path) + """ + The path to an environment, matches ``sys.prefix``. + """ + self.executable = os.path.abspath(executable) + """ + The Python executable, matches ``sys.executable``. + """ + self.version_info = self._get_version() + """ + + Like ``sys.version_info``. A tuple to show the current Environment's + Python version. + """ + + def _get_version(self): + try: + process = GeneralizedPopen([self.executable, '--version'], stdout=PIPE, stderr=PIPE) + stdout, stderr = process.communicate() + retcode = process.poll() + if retcode: + raise InvalidPythonEnvironment() + except OSError: + raise InvalidPythonEnvironment() + + # Until Python 3.4 wthe version string is part of stderr, after that + # stdout. + output = stdout + stderr + match = re.match(br'Python (\d+)\.(\d+)\.(\d+)', output) + if match is None: + raise InvalidPythonEnvironment("--version not working") + + return _VersionInfo(*[int(m) for m in match.groups()]) + + def __repr__(self): + version = '.'.join(str(i) for i in self.version_info) + return '<%s: %s in %s>' % (self.__class__.__name__, version, self.path) + + def get_evaluator_subprocess(self, evaluator): + return EvaluatorSubprocess(evaluator, self._get_subprocess()) + + def _get_subprocess(self): + return get_subprocess(self.executable) + + @memoize_method + def get_sys_path(self): + """ + The sys path for this environment. Does not include potential + modifications like ``sys.path.append``. + + :returns: list of str + """ + # It's pretty much impossible to generate the sys path without actually + # executing Python. The sys path (when starting with -S) itself depends + # on how the Python version was compiled (ENV variables). + # If you omit -S when starting Python (normal case), additionally + # site.py gets executed. + return self._get_subprocess().get_sys_path() + + +class SameEnvironment(Environment): + def __init__(self): + super(SameEnvironment, self).__init__(sys.prefix, sys.executable) + + def _get_version(self): + return _VersionInfo(*sys.version_info[:3]) + + +class InterpreterEnvironment(_BaseEnvironment): + def __init__(self): + self.version_info = _VersionInfo(*sys.version_info[:3]) + + def get_evaluator_subprocess(self, evaluator): + return EvaluatorSameProcess(evaluator) + + def get_sys_path(self): + return sys.path + + +def _get_virtual_env_from_var(): + var = os.environ.get('VIRTUAL_ENV') + if var is not None: + if var == sys.prefix: + return SameEnvironment() + + try: + return create_environment(var) + except InvalidPythonEnvironment: + pass + + +def _calculate_sha256_for_file(path): + sha256 = hashlib.sha256() + with open(path, 'rb') as f: + for block in iter(lambda: f.read(filecmp.BUFSIZE), b''): + sha256.update(block) + return sha256.hexdigest() + + +def get_default_environment(): + """ + Tries to return an active Virtualenv. If there is no VIRTUAL_ENV variable + set it will return the latest Python version installed on the system. This + makes it possible to use as many new Python features as possible when using + autocompletion and other functionality. + + :returns: :class:`Environment` + """ + virtual_env = _get_virtual_env_from_var() + if virtual_env is not None: + return virtual_env + + for environment in find_system_environments(): + return environment + + # If no Python Environment is found, use the environment we're already + # using. + return SameEnvironment() + + +@time_cache(seconds=10 * 60) # 10 Minutes +def get_cached_default_environment(): + return get_default_environment() + + +def find_virtualenvs(paths=None, **kwargs): + """ + :param paths: A list of paths in your file system to be scanned for + Virtualenvs. It will search in these paths and potentially execute the + Python binaries. Also the VIRTUAL_ENV variable will be checked if it + contains a valid Virtualenv. + :param safe: Default True. In case this is False, it will allow this + function to execute potential `python` environments. An attacker might + be able to drop an executable in a path this function is searching by + default. If the executable has not been installed by root, it will not + be executed. + + :yields: :class:`Environment` + """ + def py27_comp(paths=None, safe=True): + if paths is None: + paths = [] + + _used_paths = set() + + # Using this variable should be safe, because attackers might be able + # to drop files (via git) but not environment variables. + virtual_env = _get_virtual_env_from_var() + if virtual_env is not None: + yield virtual_env + _used_paths.add(virtual_env.path) + + for directory in paths: + if not os.path.isdir(directory): + continue + + directory = os.path.abspath(directory) + for path in os.listdir(directory): + path = os.path.join(directory, path) + if path in _used_paths: + # A path shouldn't be evaluated twice. + continue + _used_paths.add(path) + + try: + executable = _get_executable_path(path, safe=safe) + yield Environment(path, executable) + except InvalidPythonEnvironment: + pass + + return py27_comp(paths, **kwargs) + + +def find_system_environments(): + """ + Ignores virtualenvs and returns the Python versions that were installed on + your system. This might return nothing, if you're running Python e.g. from + a portable version. + + The environments are sorted from latest to oldest Python version. + + :yields: :class:`Environment` + """ + for version_string in _SUPPORTED_PYTHONS: + try: + yield get_system_environment(version_string) + except InvalidPythonEnvironment: + pass + + +# TODO: the logic to find the Python prefix is much more complicated than that. +# See Modules/getpath.c for UNIX and PC/getpathp.c for Windows in CPython's +# source code. A solution would be to deduce it by running the Python +# interpreter and printing the value of sys.prefix. +def _get_python_prefix(executable): + if os.name != 'nt': + return os.path.dirname(os.path.dirname(executable)) + landmark = os.path.join('Lib', 'os.py') + prefix = os.path.dirname(executable) + while prefix: + if os.path.join(prefix, landmark): + return prefix + prefix = os.path.dirname(prefix) + raise InvalidPythonEnvironment( + "Cannot find prefix of executable %s." % executable) + + +# TODO: this function should probably return a list of environments since +# multiple Python installations can be found on a system for the same version. +def get_system_environment(version): + """ + Return the first Python environment found for a string of the form 'X.Y' + where X and Y are the major and minor versions of Python. + + :raises: :exc:`.InvalidPythonEnvironment` + :returns: :class:`Environment` + """ + exe = find_executable('python' + version) + if exe: + if exe == sys.executable: + return SameEnvironment() + return Environment(_get_python_prefix(exe), exe) + + if os.name == 'nt': + for prefix, exe in _get_executables_from_windows_registry(version): + return Environment(prefix, exe) + raise InvalidPythonEnvironment("Cannot find executable python%s." % version) + + +def create_environment(path, safe=True): + """ + Make it possible to create an environment by hand. + + :raises: :exc:`.InvalidPythonEnvironment` + :returns: :class:`Environment` + """ + return Environment(path, _get_executable_path(path, safe=safe)) + + +def _get_executable_path(path, safe=True): + """ + Returns None if it's not actually a virtual env. + """ + + if os.name == 'nt': + python = os.path.join(path, 'Scripts', 'python.exe') + else: + python = os.path.join(path, 'bin', 'python') + if not os.path.exists(python): + raise InvalidPythonEnvironment("%s seems to be missing." % python) + + if safe and not _is_safe(python): + raise InvalidPythonEnvironment("The python binary is potentially unsafe.") + return python + + +def _get_executables_from_windows_registry(version): + # The winreg module is named _winreg on Python 2. + try: + import winreg + except ImportError: + import _winreg as winreg + + # TODO: support Python Anaconda. + sub_keys = [ + r'SOFTWARE\Python\PythonCore\{version}\InstallPath', + r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}\InstallPath', + r'SOFTWARE\Python\PythonCore\{version}-32\InstallPath', + r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}-32\InstallPath' + ] + for root_key in [winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE]: + for sub_key in sub_keys: + sub_key = sub_key.format(version=version) + try: + with winreg.OpenKey(root_key, sub_key) as key: + prefix = winreg.QueryValueEx(key, '')[0] + exe = os.path.join(prefix, 'python.exe') + if os.path.isfile(exe): + yield prefix, exe + except WindowsError: + pass + + +def _is_safe(executable_path): + # Resolve sym links. A venv typically is a symlink to a known Python + # binary. Only virtualenvs copy symlinks around. + real_path = os.path.realpath(executable_path) + + if _is_unix_safe_simple(real_path): + return True + + # Just check the list of known Python versions. If it's not in there, + # it's likely an attacker or some Python that was not properly + # installed in the system. + for environment in find_system_environments(): + if environment.executable == real_path: + return True + + # If the versions don't match, just compare the binary files. If we + # don't do that, only venvs will be working and not virtualenvs. + # venvs are symlinks while virtualenvs are actual copies of the + # Python files. + # This still means that if the system Python is updated and the + # virtualenv's Python is not (which is probably never going to get + # upgraded), it will not work with Jedi. IMO that's fine, because + # people should just be using venv. ~ dave + if environment._sha256 == _calculate_sha256_for_file(real_path): + return True + return False + + +def _is_unix_safe_simple(real_path): + if _is_unix_admin(): + # In case we are root, just be conservative and + # only execute known paths. + return any(real_path.startswith(p) for p in _SAFE_PATHS) + + uid = os.stat(real_path).st_uid + # The interpreter needs to be owned by root. This means that it wasn't + # written by a user and therefore attacking Jedi is not as simple. + # The attack could look like the following: + # 1. A user clones a repository. + # 2. The repository has an innocent looking folder called foobar. jedi + # searches for the folder and executes foobar/bin/python --version if + # there's also a foobar/bin/activate. + # 3. The bin/python is obviously not a python script but a bash script or + # whatever the attacker wants. + return uid == 0 + + +def _is_unix_admin(): + try: + return os.getuid() == 0 + except AttributeError: + return False # Windows diff --git a/pythonFiles/jedi/api/exceptions.py b/pythonFiles/jedi/api/exceptions.py new file mode 100644 index 000000000000..99cebdb7ddb5 --- /dev/null +++ b/pythonFiles/jedi/api/exceptions.py @@ -0,0 +1,10 @@ +class _JediError(Exception): + pass + + +class InternalError(_JediError): + pass + + +class WrongVersion(_JediError): + pass diff --git a/pythonFiles/jedi/api/helpers.py b/pythonFiles/jedi/api/helpers.py index 2c4d8e0d10fc..221fc4dfe0d4 100644 --- a/pythonFiles/jedi/api/helpers.py +++ b/pythonFiles/jedi/api/helpers.py @@ -7,12 +7,13 @@ from parso.python.parser import Parser from parso.python import tree -from parso import split_lines from jedi._compatibility import u from jedi.evaluate.syntax_tree import eval_atom from jedi.evaluate.helpers import evaluate_call_of_leaf -from jedi.cache import time_cache +from jedi.evaluate.compiled import get_string_context_set +from jedi.evaluate.base_context import ContextSet +from jedi.cache import call_signature_time_cache CompletionParts = namedtuple('CompletionParts', ['path', 'has_dot', 'name']) @@ -44,7 +45,7 @@ def _get_code(code_lines, start_pos, end_pos): lines[-1] = lines[-1][:end_pos[1]] # Remove first line indentation. lines[0] = lines[0][start_pos[1]:] - return '\n'.join(lines) + return ''.join(lines) class OnErrorLeaf(Exception): @@ -53,28 +54,11 @@ def error_leaf(self): return self.args[0] -def _is_on_comment(leaf, position): - comment_lines = split_lines(leaf.prefix) - difference = leaf.start_pos[0] - position[0] - prefix_start_pos = leaf.get_start_pos_of_prefix() - if difference == 0: - indent = leaf.start_pos[1] - elif position[0] == prefix_start_pos[0]: - indent = prefix_start_pos[1] - else: - indent = 0 - line = comment_lines[-difference - 1][:position[1] - indent] - return '#' in line - - def _get_code_for_stack(code_lines, module_node, position): leaf = module_node.get_leaf_for_position(position, include_prefixes=True) # It might happen that we're on whitespace or on a comment. This means # that we would not get the right leaf. if leaf.start_pos >= position: - if _is_on_comment(leaf, position): - return u('') - # If we're not on a comment simply get the previous leaf and proceed. leaf = leaf.get_previous_leaf() if leaf is None: @@ -125,6 +109,9 @@ def tokenize_without_endmarker(code): for token_ in tokens: if token_.string == safeword: raise EndMarkerReached() + elif token_.prefix.endswith(safeword): + # This happens with comments. + raise EndMarkerReached() else: yield token_ @@ -134,7 +121,7 @@ def tokenize_without_endmarker(code): # completion. # Use Z as a prefix because it's not part of a number suffix. safeword = 'ZZZ_USER_WANTS_TO_COMPLETE_HERE_WITH_JEDI' - code = code + safeword + code = code + ' ' + safeword p = Parser(grammar._pgen_grammar, error_recovery=True) try: @@ -208,6 +195,8 @@ def evaluate_goto_definition(evaluator, context, leaf): return evaluate_call_of_leaf(context, leaf) elif isinstance(leaf, tree.Literal): return eval_atom(context, leaf) + elif leaf.type in ('fstring_string', 'fstring_start', 'fstring_end'): + return get_string_context_set(evaluator) return [] @@ -294,14 +283,14 @@ def get_call_signature_details(module, position): return None -@time_cache("call_signatures_validity") +@call_signature_time_cache("call_signatures_validity") def cache_call_signatures(evaluator, context, bracket_leaf, code_lines, user_pos): """This function calculates the cache key.""" - index = user_pos[0] - 1 + line_index = user_pos[0] - 1 - before_cursor = code_lines[index][:user_pos[1]] - other_lines = code_lines[bracket_leaf.start_pos[0]:index] - whole = '\n'.join(other_lines + [before_cursor]) + before_cursor = code_lines[line_index][:user_pos[1]] + other_lines = code_lines[bracket_leaf.start_pos[0]:line_index] + whole = ''.join(other_lines + [before_cursor]) before_bracket = re.match(r'.*\(', whole, re.DOTALL) module_path = context.get_root_context().py__file__() diff --git a/pythonFiles/jedi/api/interpreter.py b/pythonFiles/jedi/api/interpreter.py index 202f345e94b9..c9b7bd69bbe0 100644 --- a/pythonFiles/jedi/api/interpreter.py +++ b/pythonFiles/jedi/api/interpreter.py @@ -5,24 +5,34 @@ from jedi.evaluate.context import ModuleContext from jedi.evaluate import compiled from jedi.evaluate.compiled import mixed +from jedi.evaluate.compiled.access import create_access_path from jedi.evaluate.base_context import Context +def _create(evaluator, obj): + return compiled.create_from_access_path( + evaluator, create_access_path(evaluator, obj) + ) + + class NamespaceObject(object): def __init__(self, dct): self.__dict__ = dct class MixedModuleContext(Context): - resets_positions = True type = 'mixed_module' - def __init__(self, evaluator, tree_module, namespaces, path): + def __init__(self, evaluator, tree_module, namespaces, path, code_lines): self.evaluator = evaluator self._namespaces = namespaces self._namespace_objects = [NamespaceObject(n) for n in namespaces] - self._module_context = ModuleContext(evaluator, tree_module, path=path) + self._module_context = ModuleContext( + evaluator, tree_module, + path=path, + code_lines=code_lines + ) self.tree_node = tree_module def get_node(self): @@ -33,7 +43,7 @@ def get_filters(self, *args, **kwargs): yield filter for namespace_obj in self._namespace_objects: - compiled_object = compiled.create(self.evaluator, namespace_obj) + compiled_object = _create(self.evaluator, namespace_obj) mixed_object = mixed.MixedObject( self.evaluator, parent_context=self, @@ -43,5 +53,9 @@ def get_filters(self, *args, **kwargs): for filter in mixed_object.get_filters(*args, **kwargs): yield filter + @property + def code_lines(self): + return self._module_context.code_lines + def __getattr__(self, name): return getattr(self._module_context, name) diff --git a/pythonFiles/jedi/api/keywords.py b/pythonFiles/jedi/api/keywords.py index a1bc4e7f8556..2991a0f81a56 100644 --- a/pythonFiles/jedi/api/keywords.py +++ b/pythonFiles/jedi/api/keywords.py @@ -1,10 +1,7 @@ import pydoc -import keyword -from jedi._compatibility import is_py3, is_py35 from jedi.evaluate.utils import ignored from jedi.evaluate.filters import AbstractNameDefinition -from parso.python.tree import Leaf try: from pydoc_data import topics as pydoc_topics @@ -17,87 +14,30 @@ # pydoc_data module in its file python3x.zip. pydoc_topics = None -if is_py3: - if is_py35: - # in python 3.5 async and await are not proper keywords, but for - # completion pursposes should as as though they are - keys = keyword.kwlist + ["async", "await"] - else: - keys = keyword.kwlist -else: - keys = keyword.kwlist + ['None', 'False', 'True'] - - -def has_inappropriate_leaf_keyword(pos, module): - relevant_errors = filter( - lambda error: error.first_pos[0] == pos[0], - module.error_statement_stacks) - - for error in relevant_errors: - if error.next_token in keys: - return True - - return False - - -def completion_names(evaluator, stmt, pos, module): - keyword_list = all_keywords(evaluator) - - if not isinstance(stmt, Leaf) or has_inappropriate_leaf_keyword(pos, module): - keyword_list = filter( - lambda keyword: not keyword.only_valid_as_leaf, - keyword_list - ) - return [keyword.name for keyword in keyword_list] - - -def all_keywords(evaluator, pos=(0, 0)): - return set([Keyword(evaluator, k, pos) for k in keys]) - - -def keyword(evaluator, string, pos=(0, 0)): - if string in keys: - return Keyword(evaluator, string, pos) - else: - return None - def get_operator(evaluator, string, pos): return Keyword(evaluator, string, pos) -keywords_only_valid_as_leaf = ( - 'continue', - 'break', -) - - class KeywordName(AbstractNameDefinition): - api_type = 'keyword' + api_type = u'keyword' def __init__(self, evaluator, name): self.evaluator = evaluator self.string_name = name - self.parent_context = evaluator.BUILTINS - - def eval(self): - return set() + self.parent_context = evaluator.builtins_module def infer(self): return [Keyword(self.evaluator, self.string_name, (0, 0))] class Keyword(object): - api_type = 'keyword' + api_type = u'keyword' def __init__(self, evaluator, name, pos): self.name = KeywordName(evaluator, name) self.start_pos = pos - self.parent = evaluator.BUILTINS - - @property - def only_valid_as_leaf(self): - return self.name.value in keywords_only_valid_as_leaf + self.parent = evaluator.builtins_module @property def names(self): diff --git a/pythonFiles/jedi/api/project.py b/pythonFiles/jedi/api/project.py new file mode 100644 index 000000000000..ca6992b5db7f --- /dev/null +++ b/pythonFiles/jedi/api/project.py @@ -0,0 +1,200 @@ +import os +import json + +from jedi._compatibility import FileNotFoundError, NotADirectoryError +from jedi.api.environment import SameEnvironment, \ + get_cached_default_environment +from jedi.api.exceptions import WrongVersion +from jedi._compatibility import force_unicode +from jedi.evaluate.sys_path import discover_buildout_paths +from jedi.evaluate.cache import evaluator_as_method_param_cache +from jedi.common.utils import traverse_parents + +_CONFIG_FOLDER = '.jedi' +_CONTAINS_POTENTIAL_PROJECT = 'setup.py', '.git', '.hg', 'requirements.txt', 'MANIFEST.in' + +_SERIALIZER_VERSION = 1 + + +def _remove_duplicates_from_path(path): + used = set() + for p in path: + if p in used: + continue + used.add(p) + yield p + + +def _force_unicode_list(lst): + return list(map(force_unicode, lst)) + + +class Project(object): + # TODO serialize environment + _serializer_ignore_attributes = ('_environment',) + _environment = None + + @staticmethod + def _get_json_path(base_path): + return os.path.join(base_path, _CONFIG_FOLDER, 'project.json') + + @classmethod + def load(cls, path): + """ + :param path: The path of the directory you want to use as a project. + """ + with open(cls._get_json_path(path)) as f: + version, data = json.load(f) + + if version == 1: + self = cls.__new__() + self.__dict__.update(data) + return self + else: + raise WrongVersion( + "The Jedi version of this project seems newer than what we can handle." + ) + + def __init__(self, path, **kwargs): + """ + :param path: The base path for this project. + :param sys_path: list of str. You can override the sys path if you + want. By default the ``sys.path.`` is generated from the + environment (virtualenvs, etc). + :param smart_sys_path: If this is enabled (default), adds paths from + local directories. Otherwise you will have to rely on your packages + being properly configured on the ``sys.path``. + """ + def py2_comp(path, environment=None, sys_path=None, + smart_sys_path=True, _django=False): + self._path = path + if isinstance(environment, SameEnvironment): + self._environment = environment + + self._sys_path = sys_path + self._smart_sys_path = smart_sys_path + self._django = _django + + py2_comp(path, **kwargs) + + def _get_base_sys_path(self, environment=None): + if self._sys_path is not None: + return self._sys_path + + # The sys path has not been set explicitly. + if environment is None: + environment = self.get_environment() + + sys_path = environment.get_sys_path() + try: + sys_path.remove('') + except ValueError: + pass + return sys_path + + @evaluator_as_method_param_cache() + def _get_sys_path(self, evaluator, environment=None): + """ + Keep this method private for all users of jedi. However internally this + one is used like a public method. + """ + suffixed = [] + prefixed = [] + + sys_path = list(self._get_base_sys_path(environment)) + if self._smart_sys_path: + prefixed.append(self._path) + + if evaluator.script_path is not None: + suffixed += discover_buildout_paths(evaluator, evaluator.script_path) + + traversed = [] + for parent in traverse_parents(evaluator.script_path): + traversed.append(parent) + if parent == self._path: + # Don't go futher than the project path. + break + + # AFAIK some libraries have imports like `foo.foo.bar`, which + # leads to the conclusion to by default prefer longer paths + # rather than shorter ones by default. + suffixed += reversed(traversed) + + if self._django: + prefixed.append(self._path) + + path = prefixed + sys_path + suffixed + return list(_force_unicode_list(_remove_duplicates_from_path(path))) + + def save(self): + data = dict(self.__dict__) + for attribute in self._serializer_ignore_attributes: + data.pop(attribute, None) + + with open(self._get_json_path(self._path), 'wb') as f: + return json.dump((_SERIALIZER_VERSION, data), f) + + def get_environment(self): + if self._environment is None: + return get_cached_default_environment() + + return self._environment + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._path) + + +def _is_potential_project(path): + for name in _CONTAINS_POTENTIAL_PROJECT: + if os.path.exists(os.path.join(path, name)): + return True + return False + + +def _is_django_path(directory): + """ Detects the path of the very well known Django library (if used) """ + try: + with open(os.path.join(directory, 'manage.py'), 'rb') as f: + return b"DJANGO_SETTINGS_MODULE" in f.read() + except (FileNotFoundError, NotADirectoryError): + return False + + return False + + +def get_default_project(path=None): + if path is None: + path = os.getcwd() + + check = os.path.realpath(path) + probable_path = None + first_no_init_file = None + for dir in traverse_parents(check, include_current=True): + try: + return Project.load(dir) + except (FileNotFoundError, NotADirectoryError): + pass + + if first_no_init_file is None: + if os.path.exists(os.path.join(dir, '__init__.py')): + # In the case that a __init__.py exists, it's in 99% just a + # Python package and the project sits at least one level above. + continue + else: + first_no_init_file = dir + + if _is_django_path(dir): + return Project(dir, _django=True) + + if probable_path is None and _is_potential_project(dir): + probable_path = dir + + if probable_path is not None: + # TODO search for setup.py etc + return Project(probable_path) + + if first_no_init_file is not None: + return Project(first_no_init_file) + + curdir = path if os.path.isdir(path) else os.path.dirname(path) + return Project(curdir) diff --git a/pythonFiles/jedi/api/replstartup.py b/pythonFiles/jedi/api/replstartup.py index 5bfcc8ce889e..4c44a626b775 100644 --- a/pythonFiles/jedi/api/replstartup.py +++ b/pythonFiles/jedi/api/replstartup.py @@ -11,8 +11,8 @@ [GCC 4.6.1] on linux2 Type "help", "copyright", "credits" or "license" for more information. >>> import os - >>> os.path.join().split().in # doctest: +SKIP - os.path.join().split().index os.path.join().split().insert + >>> os.path.join('a', 'b').split().in # doctest: +SKIP + ..dex ..sert """ import jedi.utils diff --git a/pythonFiles/jedi/cache.py b/pythonFiles/jedi/cache.py index 01138e75a99b..6c0c2a830942 100644 --- a/pythonFiles/jedi/cache.py +++ b/pythonFiles/jedi/cache.py @@ -12,6 +12,7 @@ these variables are being cleaned after every API usage. """ import time +from functools import wraps from jedi import settings from parso.cache import parser_cache @@ -74,7 +75,7 @@ def clear_time_caches(delete_all=False): del tc[key] -def time_cache(time_add_setting): +def call_signature_time_cache(time_add_setting): """ This decorator works as follows: Call it with a setting and after that use the function with a callable that returns the key. @@ -106,8 +107,31 @@ def wrapper(*args, **kwargs): return _temp +def time_cache(seconds): + def decorator(func): + cache = {} + + @wraps(func) + def wrapper(*args, **kwargs): + key = (args, frozenset(kwargs.items())) + try: + created, result = cache[key] + if time.time() < created + seconds: + return result + except KeyError: + pass + result = func(*args, **kwargs) + cache[key] = time.time(), result + return result + + wrapper.clear_cache = lambda: cache.clear() + return wrapper + return decorator + + def memoize_method(method): """A normal memoize function.""" + @wraps(method) def wrapper(self, *args, **kwargs): cache_dict = self.__dict__.setdefault('_memoize_method_dct', {}) dct = cache_dict.setdefault(method, {}) diff --git a/pythonFiles/jedi/common/utils.py b/pythonFiles/jedi/common/utils.py new file mode 100644 index 000000000000..72726a4696e6 --- /dev/null +++ b/pythonFiles/jedi/common/utils.py @@ -0,0 +1,12 @@ +import os + + +def traverse_parents(path, include_current=False): + if not include_current: + path = os.path.dirname(path) + + previous = None + while previous != path: + yield path + previous = path + path = os.path.dirname(path) diff --git a/pythonFiles/jedi/debug.py b/pythonFiles/jedi/debug.py index 8caf1accb17f..a4fd86465bac 100644 --- a/pythonFiles/jedi/debug.py +++ b/pythonFiles/jedi/debug.py @@ -35,7 +35,7 @@ def _lazy_colorama_init(): # need this. initialise.atexit_done = True try: - init() + init(strip=False) except Exception: # Colorama fails with initializing under vim and is buggy in # version 0.3.6. diff --git a/pythonFiles/jedi/evaluate/__init__.py b/pythonFiles/jedi/evaluate/__init__.py index 20461071abdb..3ba52b89f408 100644 --- a/pythonFiles/jedi/evaluate/__init__.py +++ b/pythonFiles/jedi/evaluate/__init__.py @@ -17,7 +17,8 @@ ``eval_expr_stmt``. There's separate logic for autocompletion in the API, the evaluator is all about evaluating an expression. -TODO this paragraph is not what jedi does anymore. +TODO this paragraph is not what jedi does anymore, it's similar, but not the +same. Now you need to understand what follows after ``eval_expr_stmt``. Let's make an example:: @@ -62,10 +63,9 @@ that are not used are just being ignored. """ -import sys - from parso.python import tree import parso +from parso import python_bytes_to_unicode from jedi import debug from jedi import parser_utils @@ -86,31 +86,42 @@ class Evaluator(object): - def __init__(self, grammar, project): - self.grammar = grammar + def __init__(self, project, environment=None, script_path=None): + if environment is None: + environment = project.get_environment() + self.environment = environment + self.script_path = script_path + self.compiled_subprocess = environment.get_evaluator_subprocess(self) + self.grammar = environment.get_grammar() + self.latest_grammar = parso.load_grammar(version='3.6') self.memoize_cache = {} # for memoize decorators - # To memorize modules -> equals `sys.modules`. - self.modules = {} # like `sys.modules`. + self.module_cache = imports.ModuleCache() # does the job of `sys.modules`. self.compiled_cache = {} # see `evaluate.compiled.create()` self.inferred_element_counts = {} self.mixed_cache = {} # see `evaluate.compiled.mixed._create()` self.analysis = [] self.dynamic_params_depth = 0 self.is_analysis = False - self.python_version = sys.version_info[:2] self.project = project - project.add_evaluator(self) + self.access_cache = {} self.reset_recursion_limitations() + self.allow_different_encoding = True - # Constants - self.BUILTINS = compiled.get_special_object(self, 'BUILTINS') + @property + @evaluator_function_cache() + def builtins_module(self): + return compiled.get_special_object(self, u'BUILTINS') def reset_recursion_limitations(self): self.recursion_detector = recursion.RecursionDetector() self.execution_recursion_detector = recursion.ExecutionRecursionDetector(self) + def get_sys_path(self): + """Convenience function""" + return self.project._get_sys_path(self, environment=self.environment) + def eval_element(self, context, element): if isinstance(context, CompForContext): return eval_node(context, element) @@ -124,7 +135,11 @@ def eval_element(self, context, element): if_stmt = None break predefined_if_name_dict = context.predefined_names.get(if_stmt) - if predefined_if_name_dict is None and if_stmt and if_stmt.type == 'if_stmt': + # TODO there's a lot of issues with this one. We actually should do + # this in a different way. Caching should only be active in certain + # cases and this all sucks. + if predefined_if_name_dict is None and if_stmt \ + and if_stmt.type == 'if_stmt' and self.is_analysis: if_stmt_test = if_stmt.children[1] name_dicts = [{}] # If we already did a check, we don't want to do it again -> If @@ -357,3 +372,15 @@ def from_scope_node(scope_node, child_is_funcdef=None, is_nested=True, node_is_o node = node.parent scope_node = parent_scope(node) return from_scope_node(scope_node, is_nested=True, node_is_object=node_is_object) + + def parse_and_get_code(self, code=None, path=None, **kwargs): + if self.allow_different_encoding: + if code is None: + with open(path, 'rb') as f: + code = f.read() + code = python_bytes_to_unicode(code, errors='replace') + + return self.grammar.parse(code=code, path=path, **kwargs), code + + def parse(self, *args, **kwargs): + return self.parse_and_get_code(*args, **kwargs)[0] diff --git a/pythonFiles/jedi/evaluate/analysis.py b/pythonFiles/jedi/evaluate/analysis.py index c825e5fef9e9..ded4e9f20880 100644 --- a/pythonFiles/jedi/evaluate/analysis.py +++ b/pythonFiles/jedi/evaluate/analysis.py @@ -1,9 +1,12 @@ """ Module for statical analysis. """ -from jedi import debug from parso.python import tree + +from jedi._compatibility import force_unicode +from jedi import debug from jedi.evaluate.compiled import CompiledObject +from jedi.evaluate.helpers import is_string CODES = { @@ -114,9 +117,10 @@ def add_attribute_error(name_context, lookup_context, name): # instead of an error, if that happens. typ = Error if isinstance(lookup_context, AbstractInstanceContext): - slot_names = lookup_context.get_function_slot_names('__getattr__') + \ - lookup_context.get_function_slot_names('__getattribute__') + slot_names = lookup_context.get_function_slot_names(u'__getattr__') + \ + lookup_context.get_function_slot_names(u'__getattribute__') for n in slot_names: + # TODO do we even get here? if isinstance(name, CompiledInstanceName) and \ n.parent_context.obj == object: typ = Warning @@ -139,7 +143,7 @@ def _check_for_exception_catch(node_context, jedi_name, exception, payload=None) """ def check_match(cls, exception): try: - return isinstance(cls, CompiledObject) and issubclass(exception, cls.obj) + return isinstance(cls, CompiledObject) and cls.is_super_class(exception) except TypeError: return False @@ -160,7 +164,7 @@ def check_try_for_except(obj, exception): except_classes = node_context.eval_node(node) for cls in except_classes: from jedi.evaluate.context import iterable - if isinstance(cls, iterable.AbstractIterable) and \ + if isinstance(cls, iterable.Sequence) and \ cls.array_type == 'tuple': # multiple exceptions for lazy_context in cls.py__iter__(): @@ -189,8 +193,8 @@ def check_hasattr(node, suite): # Check name key, lazy_context = args[1] names = list(lazy_context.infer()) - assert len(names) == 1 and isinstance(names[0], CompiledObject) - assert names[0].obj == payload[1].value + assert len(names) == 1 and is_string(names[0]) + assert force_unicode(names[0].get_safe_value()) == payload[1].value # Check objects key, lazy_context = args[0] diff --git a/pythonFiles/jedi/evaluate/arguments.py b/pythonFiles/jedi/evaluate/arguments.py index 32b9238c6f4d..beab4c8c9541 100644 --- a/pythonFiles/jedi/evaluate/arguments.py +++ b/pythonFiles/jedi/evaluate/arguments.py @@ -10,6 +10,7 @@ from jedi.evaluate.context import iterable from jedi.evaluate.param import get_params, ExecutedParam + def try_iter_content(types, depth=0): """Helper method for static analysis.""" if depth > 10: @@ -29,6 +30,8 @@ def try_iter_content(types, depth=0): class AbstractArguments(object): context = None + argument_node = None + trailer = None def eval_argument_clinic(self, parameters): """Uses a list with argument clinic information (see PEP 436).""" @@ -95,29 +98,30 @@ def __init__(self, evaluator, context, argument_node, trailer=None): self.trailer = trailer # Can be None, e.g. in a class definition. def _split(self): - if isinstance(self.argument_node, (tuple, list)): - for el in self.argument_node: - yield 0, el - else: - if not (self.argument_node.type == 'arglist' or ( - # in python 3.5 **arg is an argument, not arglist - (self.argument_node.type == 'argument') and - self.argument_node.children[0] in ('*', '**'))): - yield 0, self.argument_node - return - - iterator = iter(self.argument_node.children) - for child in iterator: - if child == ',': - continue - elif child in ('*', '**'): - yield len(child.value), next(iterator) - elif child.type == 'argument' and \ - child.children[0] in ('*', '**'): - assert len(child.children) == 2 - yield len(child.children[0].value), child.children[1] - else: - yield 0, child + if self.argument_node is None: + return + + # Allow testlist here as well for Python2's class inheritance + # definitions. + if not (self.argument_node.type in ('arglist', 'testlist') or ( + # in python 3.5 **arg is an argument, not arglist + (self.argument_node.type == 'argument') and + self.argument_node.children[0] in ('*', '**'))): + yield 0, self.argument_node + return + + iterator = iter(self.argument_node.children) + for child in iterator: + if child == ',': + continue + elif child in ('*', '**'): + yield len(child.value), next(iterator) + elif child.type == 'argument' and \ + child.children[0] in ('*', '**'): + assert len(child.children) == 2 + yield len(child.children[0].value), child.children[1] + else: + yield 0, child def unpack(self, funcdef=None): named_args = [] @@ -126,7 +130,6 @@ def unpack(self, funcdef=None): arrays = self.context.eval_node(el) iterators = [_iterate_star_args(self.context, a, el, funcdef) for a in arrays] - iterators = list(iterators) for values in list(zip_longest(*iterators)): # TODO zip_longest yields None, that means this would raise # an exception? @@ -134,7 +137,7 @@ def unpack(self, funcdef=None): [v for v in values if v is not None] ) elif star_count == 2: - arrays = self._evaluator.eval_element(self.context, el) + arrays = self.context.eval_node(el) for dct in arrays: for key, values in _star_star_dict(self.context, dct, el, funcdef): yield key, values @@ -197,7 +200,11 @@ def get_calling_nodes(self): arguments = param.var_args break - return [arguments.argument_node or arguments.trailer] + if arguments.argument_node is not None: + return [arguments.argument_node] + if arguments.trailer is not None: + return [arguments.trailer] + return [] class ValuesArguments(AbstractArguments): @@ -235,7 +242,7 @@ def _star_star_dict(context, array, input_node, funcdef): # For now ignore this case. In the future add proper iterators and just # make one call without crazy isinstance checks. return {} - elif isinstance(array, iterable.AbstractIterable) and array.array_type == 'dict': + elif isinstance(array, iterable.Sequence) and array.array_type == 'dict': return array.exact_key_items() else: if funcdef is not None: diff --git a/pythonFiles/jedi/evaluate/base_context.py b/pythonFiles/jedi/evaluate/base_context.py index 693a99aae7aa..2c6fe6cd2c88 100644 --- a/pythonFiles/jedi/evaluate/base_context.py +++ b/pythonFiles/jedi/evaluate/base_context.py @@ -1,3 +1,11 @@ +""" +Contexts are the "values" that Python would return. However Contexts are at the +same time also the "contexts" that a user is currently sitting in. + +A ContextSet is typically used to specify the return of a function or any other +static analysis operation. In jedi there are always multiple returns and not +just one. +""" from parso.python.tree import ExprStmt, CompFor from jedi import debug @@ -63,10 +71,13 @@ def execute_evaluated(self, *value_list): arguments = ValuesArguments([ContextSet(value) for value in value_list]) return self.execute(arguments) - def iterate(self, contextualized_node=None): - debug.dbg('iterate') + def iterate(self, contextualized_node=None, is_async=False): + debug.dbg('iterate %s', self) try: - iter_method = self.py__iter__ + if is_async: + iter_method = self.py__aiter__ + else: + iter_method = self.py__iter__ except AttributeError: if contextualized_node is not None: from jedi.evaluate import analysis @@ -81,17 +92,22 @@ def iterate(self, contextualized_node=None): def get_item(self, index_contexts, contextualized_node): from jedi.evaluate.compiled import CompiledObject - from jedi.evaluate.context.iterable import Slice, AbstractIterable + from jedi.evaluate.context.iterable import Slice, Sequence result = ContextSet() for index in index_contexts: - if isinstance(index, (CompiledObject, Slice)): + if isinstance(index, Slice): index = index.obj + if isinstance(index, CompiledObject): + try: + index = index.get_safe_value() + except ValueError: + pass - if type(index) not in (float, int, str, unicode, slice, type(Ellipsis)): + if type(index) not in (float, int, str, unicode, slice, bytes): # If the index is not clearly defined, we have to get all the # possiblities. - if isinstance(self, AbstractIterable) and self.array_type == 'dict': + if isinstance(self, Sequence) and self.array_type == 'dict': result |= self.dict_values() else: result |= iterate_contexts(ContextSet(self)) @@ -139,10 +155,6 @@ def py__getattribute__(self, name_or_str, name_context=None, position=None, return f.filter_name(filters) return f.find(filters, attribute_lookup=not search_global) - return self.evaluator.find_types( - self, name_or_str, name_context, position, search_global, is_goto, - analysis_errors) - def create_context(self, node, node_is_context=False, node_is_object=False): return self.evaluator.create_context(self, node, node_is_context, node_is_object) @@ -169,14 +181,14 @@ def py__doc__(self, include_call_signature=False): return None -def iterate_contexts(contexts, contextualized_node=None): +def iterate_contexts(contexts, contextualized_node=None, is_async=False): """ Calls `iterate`, on all contexts but ignores the ordering and just returns all contexts that the iterate functions yield. """ return ContextSet.from_sets( lazy_context.infer() - for lazy_context in contexts.iterate(contextualized_node) + for lazy_context in contexts.iterate(contextualized_node, is_async=is_async) ) @@ -241,9 +253,9 @@ class ContextSet(BaseContextSet): def py__class__(self): return ContextSet.from_iterable(c.py__class__() for c in self._set) - def iterate(self, contextualized_node=None): + def iterate(self, contextualized_node=None, is_async=False): from jedi.evaluate.lazy_context import get_merged_lazy_context - type_iters = [c.iterate(contextualized_node) for c in self._set] + type_iters = [c.iterate(contextualized_node, is_async=is_async) for c in self._set] for lazy_contexts in zip_longest(*type_iters): yield get_merged_lazy_context( [l for l in lazy_contexts if l is not None] diff --git a/pythonFiles/jedi/evaluate/cache.py b/pythonFiles/jedi/evaluate/cache.py index b7c7cd7e979f..c619e698a3c8 100644 --- a/pythonFiles/jedi/evaluate/cache.py +++ b/pythonFiles/jedi/evaluate/cache.py @@ -59,7 +59,7 @@ def decorator(func): return decorator -def _memoize_meta_class(): +def evaluator_as_method_param_cache(): def decorator(call): return _memoize_default(second_arg_is_evaluator=True)(call) @@ -72,6 +72,6 @@ class CachedMetaClass(type): class initializations. Either you do it this way or with decorators, but with decorators you lose class access (isinstance, etc). """ - @_memoize_meta_class() + @evaluator_as_method_param_cache() def __call__(self, *args, **kwargs): return super(CachedMetaClass, self).__call__(*args, **kwargs) diff --git a/pythonFiles/jedi/evaluate/compiled/__init__.py b/pythonFiles/jedi/evaluate/compiled/__init__.py index f9f2e0781e13..357d26cc87fc 100644 --- a/pythonFiles/jedi/evaluate/compiled/__init__.py +++ b/pythonFiles/jedi/evaluate/compiled/__init__.py @@ -1,638 +1,39 @@ -""" -Imitate the parser representation. -""" -import inspect -import re -import sys -import os -import types -from functools import partial +from jedi._compatibility import unicode +from jedi.evaluate.compiled.context import CompiledObject, CompiledName, \ + CompiledObjectFilter, CompiledContextName, create_from_access_path, \ + create_from_name -from jedi._compatibility import builtins as _builtins, unicode, py_version -from jedi import debug -from jedi.cache import underscore_memoization, memoize_method -from jedi.evaluate.filters import AbstractFilter, AbstractNameDefinition, \ - ContextNameMixin -from jedi.evaluate.base_context import Context, ContextSet -from jedi.evaluate.lazy_context import LazyKnownContext -from jedi.evaluate.compiled.getattr_static import getattr_static -from . import fake - -_sep = os.path.sep -if os.path.altsep is not None: - _sep += os.path.altsep -_path_re = re.compile('(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep))) -del _sep - -# Those types don't exist in typing. -MethodDescriptorType = type(str.replace) -WrapperDescriptorType = type(set.__iter__) -# `object.__subclasshook__` is an already executed descriptor. -object_class_dict = type.__dict__["__dict__"].__get__(object) -ClassMethodDescriptorType = type(object_class_dict['__subclasshook__']) - -ALLOWED_DESCRIPTOR_ACCESS = ( - types.FunctionType, - types.GetSetDescriptorType, - types.MemberDescriptorType, - MethodDescriptorType, - WrapperDescriptorType, - ClassMethodDescriptorType, - staticmethod, - classmethod, -) - -class CheckAttribute(object): - """Raises an AttributeError if the attribute X isn't available.""" - def __init__(self, func): - self.func = func - # Remove the py in front of e.g. py__call__. - self.check_name = func.__name__[2:] - - def __get__(self, instance, owner): - # This might raise an AttributeError. That's wanted. - if self.check_name == '__iter__': - # Python iterators are a bit strange, because there's no need for - # the __iter__ function as long as __getitem__ is defined (it will - # just start with __getitem__(0). This is especially true for - # Python 2 strings, where `str.__iter__` is not even defined. - try: - iter(instance.obj) - except TypeError: - raise AttributeError - else: - getattr(instance.obj, self.check_name) - return partial(self.func, instance) - - -class CompiledObject(Context): - path = None # modules have this attribute - set it to None. - used_names = lambda self: {} # To be consistent with modules. - - def __init__(self, evaluator, obj, parent_context=None, faked_class=None): - super(CompiledObject, self).__init__(evaluator, parent_context) - self.obj = obj - # This attribute will not be set for most classes, except for fakes. - self.tree_node = faked_class - - def get_root_node(self): - # To make things a bit easier with filters we add this method here. - return self.get_root_context() - - @CheckAttribute - def py__call__(self, params): - if inspect.isclass(self.obj): - from jedi.evaluate.context import CompiledInstance - return ContextSet(CompiledInstance(self.evaluator, self.parent_context, self, params)) - else: - return ContextSet.from_iterable(self._execute_function(params)) - - @CheckAttribute - def py__class__(self): - return create(self.evaluator, self.obj.__class__) - - @CheckAttribute - def py__mro__(self): - return (self,) + tuple(create(self.evaluator, cls) for cls in self.obj.__mro__[1:]) - - @CheckAttribute - def py__bases__(self): - return tuple(create(self.evaluator, cls) for cls in self.obj.__bases__) - - def py__bool__(self): - return bool(self.obj) - - def py__file__(self): - try: - return self.obj.__file__ - except AttributeError: - return None - - def is_class(self): - return inspect.isclass(self.obj) - - def py__doc__(self, include_call_signature=False): - return inspect.getdoc(self.obj) or '' - - def get_param_names(self): - obj = self.obj - try: - if py_version < 33: - raise ValueError("inspect.signature was introduced in 3.3") - if py_version == 34: - # In 3.4 inspect.signature are wrong for str and int. This has - # been fixed in 3.5. The signature of object is returned, - # because no signature was found for str. Here we imitate 3.5 - # logic and just ignore the signature if the magic methods - # don't match object. - # 3.3 doesn't even have the logic and returns nothing for str - # and classes that inherit from object. - user_def = inspect._signature_get_user_defined_method - if (inspect.isclass(obj) - and not user_def(type(obj), '__init__') - and not user_def(type(obj), '__new__') - and (obj.__init__ != object.__init__ - or obj.__new__ != object.__new__)): - raise ValueError - - signature = inspect.signature(obj) - except ValueError: # Has no signature - params_str, ret = self._parse_function_doc() - tokens = params_str.split(',') - if inspect.ismethoddescriptor(obj): - tokens.insert(0, 'self') - for p in tokens: - parts = p.strip().split('=') - yield UnresolvableParamName(self, parts[0]) - else: - for signature_param in signature.parameters.values(): - yield SignatureParamName(self, signature_param) - - def __repr__(self): - return '<%s: %s>' % (self.__class__.__name__, repr(self.obj)) - - @underscore_memoization - def _parse_function_doc(self): - doc = self.py__doc__() - if doc is None: - return '', '' - - return _parse_function_doc(doc) - - @property - def api_type(self): - obj = self.obj - if inspect.isclass(obj): - return 'class' - elif inspect.ismodule(obj): - return 'module' - elif inspect.isbuiltin(obj) or inspect.ismethod(obj) \ - or inspect.ismethoddescriptor(obj) or inspect.isfunction(obj): - return 'function' - # Everything else... - return 'instance' - - @property - def type(self): - """Imitate the tree.Node.type values.""" - cls = self._get_class() - if inspect.isclass(cls): - return 'classdef' - elif inspect.ismodule(cls): - return 'file_input' - elif inspect.isbuiltin(cls) or inspect.ismethod(cls) or \ - inspect.ismethoddescriptor(cls): - return 'funcdef' - - @underscore_memoization - def _cls(self): - """ - We used to limit the lookups for instantiated objects like list(), but - this is not the case anymore. Python itself - """ - # Ensures that a CompiledObject is returned that is not an instance (like list) - return self - - def _get_class(self): - if not fake.is_class_instance(self.obj) or \ - inspect.ismethoddescriptor(self.obj): # slots - return self.obj - - try: - return self.obj.__class__ - except AttributeError: - # happens with numpy.core.umath._UFUNC_API (you get it - # automatically by doing `import numpy`. - return type - - def get_filters(self, search_global=False, is_instance=False, - until_position=None, origin_scope=None): - yield self._ensure_one_filter(is_instance) - - @memoize_method - def _ensure_one_filter(self, is_instance): - """ - search_global shouldn't change the fact that there's one dict, this way - there's only one `object`. - """ - return CompiledObjectFilter(self.evaluator, self, is_instance) - - @CheckAttribute - def py__getitem__(self, index): - if type(self.obj) not in (str, list, tuple, unicode, bytes, bytearray, dict): - # Get rid of side effects, we won't call custom `__getitem__`s. - return ContextSet() - - return ContextSet(create(self.evaluator, self.obj[index])) - - @CheckAttribute - def py__iter__(self): - if type(self.obj) not in (str, list, tuple, unicode, bytes, bytearray, dict): - # Get rid of side effects, we won't call custom `__getitem__`s. - return - - for i, part in enumerate(self.obj): - if i > 20: - # Should not go crazy with large iterators - break - yield LazyKnownContext(create(self.evaluator, part)) - - def py__name__(self): - try: - return self._get_class().__name__ - except AttributeError: - return None - - @property - def name(self): - try: - name = self._get_class().__name__ - except AttributeError: - name = repr(self.obj) - return CompiledContextName(self, name) - - def _execute_function(self, params): - from jedi.evaluate import docstrings - if self.type != 'funcdef': - return - for name in self._parse_function_doc()[1].split(): - try: - bltn_obj = getattr(_builtins, name) - except AttributeError: - continue - else: - if bltn_obj is None: - # We want to evaluate everything except None. - # TODO do we? - continue - bltn_obj = create(self.evaluator, bltn_obj) - for result in bltn_obj.execute(params): - yield result - for type_ in docstrings.infer_return_types(self): - yield type_ - - def get_self_attributes(self): - return [] # Instance compatibility - - def get_imports(self): - return [] # Builtins don't have imports - - def dict_values(self): - return ContextSet.from_iterable( - create(self.evaluator, v) for v in self.obj.values() - ) - - -class CompiledName(AbstractNameDefinition): - def __init__(self, evaluator, parent_context, name): - self._evaluator = evaluator - self.parent_context = parent_context - self.string_name = name - - def __repr__(self): - try: - name = self.parent_context.name # __name__ is not defined all the time - except AttributeError: - name = None - return '<%s: (%s).%s>' % (self.__class__.__name__, name, self.string_name) - - @property - def api_type(self): - return next(iter(self.infer())).api_type - - @underscore_memoization - def infer(self): - module = self.parent_context.get_root_context() - return ContextSet(_create_from_name( - self._evaluator, module, self.parent_context, self.string_name - )) - - -class SignatureParamName(AbstractNameDefinition): - api_type = 'param' - - def __init__(self, compiled_obj, signature_param): - self.parent_context = compiled_obj.parent_context - self._signature_param = signature_param - - @property - def string_name(self): - return self._signature_param.name - - def infer(self): - p = self._signature_param - evaluator = self.parent_context.evaluator - contexts = ContextSet() - if p.default is not p.empty: - contexts = ContextSet(create(evaluator, p.default)) - if p.annotation is not p.empty: - annotation = create(evaluator, p.annotation) - contexts |= annotation.execute_evaluated() - return contexts - - -class UnresolvableParamName(AbstractNameDefinition): - api_type = 'param' - - def __init__(self, compiled_obj, name): - self.parent_context = compiled_obj.parent_context - self.string_name = name - - def infer(self): - return ContextSet() - - -class CompiledContextName(ContextNameMixin, AbstractNameDefinition): - def __init__(self, context, name): - self.string_name = name - self._context = context - self.parent_context = context.parent_context - - -class EmptyCompiledName(AbstractNameDefinition): - """ - Accessing some names will raise an exception. To avoid not having any - completions, just give Jedi the option to return this object. It infers to - nothing. - """ - def __init__(self, evaluator, name): - self.parent_context = evaluator.BUILTINS - self.string_name = name - - def infer(self): - return ContextSet() - - -class CompiledObjectFilter(AbstractFilter): - name_class = CompiledName - - def __init__(self, evaluator, compiled_object, is_instance=False): - self._evaluator = evaluator - self._compiled_object = compiled_object - self._is_instance = is_instance - - @memoize_method - def get(self, name): - name = str(name) - obj = self._compiled_object.obj - try: - attr, is_get_descriptor = getattr_static(obj, name) - except AttributeError: - return [] - else: - if is_get_descriptor \ - and not type(attr) in ALLOWED_DESCRIPTOR_ACCESS: - # In case of descriptors that have get methods we cannot return - # it's value, because that would mean code execution. - return [EmptyCompiledName(self._evaluator, name)] - if self._is_instance and name not in dir(obj): - return [] - return [self._create_name(name)] - - def values(self): - obj = self._compiled_object.obj - - names = [] - for name in dir(obj): - names += self.get(name) - - is_instance = self._is_instance or fake.is_class_instance(obj) - # ``dir`` doesn't include the type names. - if not inspect.ismodule(obj) and (obj is not type) and not is_instance: - for filter in create(self._evaluator, type).get_filters(): - names += filter.values() - return names - - def _create_name(self, name): - return self.name_class(self._evaluator, self._compiled_object, name) - - -def dotted_from_fs_path(fs_path, sys_path): - """ - Changes `/usr/lib/python3.4/email/utils.py` to `email.utils`. I.e. - compares the path with sys.path and then returns the dotted_path. If the - path is not in the sys.path, just returns None. - """ - if os.path.basename(fs_path).startswith('__init__.'): - # We are calculating the path. __init__ files are not interesting. - fs_path = os.path.dirname(fs_path) - - # prefer - # - UNIX - # /path/to/pythonX.Y/lib-dynload - # /path/to/pythonX.Y/site-packages - # - Windows - # C:\path\to\DLLs - # C:\path\to\Lib\site-packages - # over - # - UNIX - # /path/to/pythonX.Y - # - Windows - # C:\path\to\Lib - path = '' - for s in sys_path: - if (fs_path.startswith(s) and len(path) < len(s)): - path = s - - # - Window - # X:\path\to\lib-dynload/datetime.pyd => datetime - module_path = fs_path[len(path):].lstrip(os.path.sep).lstrip('/') - # - Window - # Replace like X:\path\to\something/foo/bar.py - return _path_re.sub('', module_path).replace(os.path.sep, '.').replace('/', '.') - - -def load_module(evaluator, path=None, name=None): - sys_path = list(evaluator.project.sys_path) - if path is not None: - dotted_path = dotted_from_fs_path(path, sys_path=sys_path) - else: - dotted_path = name - - temp, sys.path = sys.path, sys_path - try: - __import__(dotted_path) - except RuntimeError: - if 'PySide' in dotted_path or 'PyQt' in dotted_path: - # RuntimeError: the PyQt4.QtCore and PyQt5.QtCore modules both wrap - # the QObject class. - # See https://github.com/davidhalter/jedi/pull/483 - return None - raise - except ImportError: - # If a module is "corrupt" or not really a Python module or whatever. - debug.warning('Module %s not importable in path %s.', dotted_path, path) - return None - finally: - sys.path = temp - - # Just access the cache after import, because of #59 as well as the very - # complicated import structure of Python. - module = sys.modules[dotted_path] - - return create(evaluator, module) - - -docstr_defaults = { - 'floating point number': 'float', - 'character': 'str', - 'integer': 'int', - 'dictionary': 'dict', - 'string': 'str', -} +def builtin_from_name(evaluator, string): + builtins = evaluator.builtins_module + return create_from_name(evaluator, builtins, string) -def _parse_function_doc(doc): +def create_simple_object(evaluator, obj): """ - Takes a function and returns the params and return value as a tuple. - This is nothing more than a docstring parser. - - TODO docstrings like utime(path, (atime, mtime)) and a(b [, b]) -> None - TODO docstrings like 'tuple of integers' + Only allows creations of objects that are easily picklable across Python + versions. """ - # parse round parentheses: def func(a, (b,c)) - try: - count = 0 - start = doc.index('(') - for i, s in enumerate(doc[start:]): - if s == '(': - count += 1 - elif s == ')': - count -= 1 - if count == 0: - end = start + i - break - param_str = doc[start + 1:end] - except (ValueError, UnboundLocalError): - # ValueError for doc.index - # UnboundLocalError for undefined end in last line - debug.dbg('no brackets found - no param') - end = 0 - param_str = '' - else: - # remove square brackets, that show an optional param ( = None) - def change_options(m): - args = m.group(1).split(',') - for i, a in enumerate(args): - if a and '=' not in a: - args[i] += '=None' - return ','.join(args) - - while True: - param_str, changes = re.subn(r' ?\[([^\[\]]+)\]', - change_options, param_str) - if changes == 0: - break - param_str = param_str.replace('-', '_') # see: isinstance.__doc__ - - # parse return value - r = re.search('-[>-]* ', doc[end:end + 7]) - if r is None: - ret = '' - else: - index = end + r.end() - # get result type, which can contain newlines - pattern = re.compile(r'(,\n|[^\n-])+') - ret_str = pattern.match(doc, index).group(0).strip() - # New object -> object() - ret_str = re.sub(r'[nN]ew (.*)', r'\1()', ret_str) - - ret = docstr_defaults.get(ret_str, ret_str) - - return param_str, ret - - -def _create_from_name(evaluator, module, compiled_object, name): - obj = compiled_object.obj - faked = None - try: - faked = fake.get_faked(evaluator, module, obj, parent_context=compiled_object, name=name) - if faked.type == 'funcdef': - from jedi.evaluate.context.function import FunctionContext - return FunctionContext(evaluator, compiled_object, faked) - except fake.FakeDoesNotExist: - pass - - try: - obj = getattr(obj, name) - except AttributeError: - # Happens e.g. in properties of - # PyQt4.QtGui.QStyleOptionComboBox.currentText - # -> just set it to None - obj = None - return create(evaluator, obj, parent_context=compiled_object, faked=faked) - - -def builtin_from_name(evaluator, string): - bltn_obj = getattr(_builtins, string) - return create(evaluator, bltn_obj) - - -def _a_generator(foo): - """Used to have an object to return for generators.""" - yield 42 - yield foo - - -_SPECIAL_OBJECTS = { - 'FUNCTION_CLASS': type(load_module), - 'METHOD_CLASS': type(CompiledObject.is_class), - 'MODULE_CLASS': type(os), - 'GENERATOR_OBJECT': _a_generator(1.0), - 'BUILTINS': _builtins, -} + assert isinstance(obj, (int, float, str, bytes, unicode, slice, complex)) + return create_from_access_path( + evaluator, + evaluator.compiled_subprocess.create_simple_object(obj) + ) def get_special_object(evaluator, identifier): - obj = _SPECIAL_OBJECTS[identifier] - return create(evaluator, obj, parent_context=create(evaluator, _builtins)) + return create_from_access_path( + evaluator, + evaluator.compiled_subprocess.get_special_object(identifier) + ) -def compiled_objects_cache(attribute_name): - def decorator(func): - """ - This decorator caches just the ids, oopposed to caching the object itself. - Caching the id has the advantage that an object doesn't need to be - hashable. - """ - def wrapper(evaluator, obj, parent_context=None, module=None, faked=None): - cache = getattr(evaluator, attribute_name) - # Do a very cheap form of caching here. - key = id(obj), id(parent_context) - try: - return cache[key][0] - except KeyError: - # TODO this whole decorator is way too ugly - result = func(evaluator, obj, parent_context, module, faked) - # Need to cache all of them, otherwise the id could be overwritten. - cache[key] = result, obj, parent_context, module, faked - return result - return wrapper +def get_string_context_set(evaluator): + return builtin_from_name(evaluator, u'str').execute_evaluated() - return decorator - -@compiled_objects_cache('compiled_cache') -def create(evaluator, obj, parent_context=None, module=None, faked=None): - """ - A very weird interface class to this module. The more options provided the - more acurate loading compiled objects is. - """ - if inspect.ismodule(obj): - if parent_context is not None: - # Modules don't have parents, be careful with caching: recurse. - return create(evaluator, obj) - else: - if parent_context is None and obj is not _builtins: - return create(evaluator, obj, create(evaluator, _builtins)) - - try: - faked = fake.get_faked(evaluator, module, obj, parent_context=parent_context) - if faked.type == 'funcdef': - from jedi.evaluate.context.function import FunctionContext - return FunctionContext(evaluator, parent_context, faked) - except fake.FakeDoesNotExist: - pass - - return CompiledObject(evaluator, obj, parent_context, faked) +def load_module(evaluator, **kwargs): + access_path = evaluator.compiled_subprocess.load_module(**kwargs) + if access_path is None: + return None + return create_from_access_path(evaluator, access_path) diff --git a/pythonFiles/jedi/evaluate/compiled/access.py b/pythonFiles/jedi/evaluate/compiled/access.py new file mode 100644 index 000000000000..97a8dd0b5c3d --- /dev/null +++ b/pythonFiles/jedi/evaluate/compiled/access.py @@ -0,0 +1,490 @@ +import inspect +import types +import sys +from textwrap import dedent +import operator as op +from collections import namedtuple + +from jedi import debug +from jedi._compatibility import unicode, is_py3, is_py34, builtins, \ + py_version, force_unicode, print_to_stderr +from jedi.evaluate.compiled.getattr_static import getattr_static +from jedi.evaluate.utils import dotted_from_fs_path + + +MethodDescriptorType = type(str.replace) +# These are not considered classes and access is granted even though they have +# a __class__ attribute. +NOT_CLASS_TYPES = ( + types.BuiltinFunctionType, + types.CodeType, + types.FrameType, + types.FunctionType, + types.GeneratorType, + types.GetSetDescriptorType, + types.LambdaType, + types.MemberDescriptorType, + types.MethodType, + types.ModuleType, + types.TracebackType, + MethodDescriptorType +) + +if is_py3: + NOT_CLASS_TYPES += ( + types.MappingProxyType, + types.SimpleNamespace + ) + if is_py34: + NOT_CLASS_TYPES += (types.DynamicClassAttribute,) + + +# Those types don't exist in typing. +MethodDescriptorType = type(str.replace) +WrapperDescriptorType = type(set.__iter__) +# `object.__subclasshook__` is an already executed descriptor. +object_class_dict = type.__dict__["__dict__"].__get__(object) +ClassMethodDescriptorType = type(object_class_dict['__subclasshook__']) + +def _a_generator(foo): + """Used to have an object to return for generators.""" + yield 42 + yield foo + + +_sentinel = object() + +# Maps Python syntax to the operator module. +COMPARISON_OPERATORS = { + '==': op.eq, + '!=': op.ne, + 'is': op.is_, + 'is not': op.is_not, + '<': op.lt, + '<=': op.le, + '>': op.gt, + '>=': op.ge, +} + +_OPERATORS = { + '+': op.add, + '-': op.sub, +} +_OPERATORS.update(COMPARISON_OPERATORS) + +ALLOWED_DESCRIPTOR_ACCESS = ( + types.FunctionType, + types.GetSetDescriptorType, + types.MemberDescriptorType, + MethodDescriptorType, + WrapperDescriptorType, + ClassMethodDescriptorType, + staticmethod, + classmethod, +) + + +def safe_getattr(obj, name, default=_sentinel): + try: + attr, is_get_descriptor = getattr_static(obj, name) + except AttributeError: + if default is _sentinel: + raise + return default + else: + if type(attr) in ALLOWED_DESCRIPTOR_ACCESS: + # In case of descriptors that have get methods we cannot return + # it's value, because that would mean code execution. + return getattr(obj, name) + return attr + + +SignatureParam = namedtuple( + 'SignatureParam', + 'name has_default default has_annotation annotation kind_name' +) + + +def compiled_objects_cache(attribute_name): + def decorator(func): + """ + This decorator caches just the ids, oopposed to caching the object itself. + Caching the id has the advantage that an object doesn't need to be + hashable. + """ + def wrapper(evaluator, obj, parent_context=None): + cache = getattr(evaluator, attribute_name) + # Do a very cheap form of caching here. + key = id(obj) + try: + cache[key] + return cache[key][0] + except KeyError: + # TODO wuaaaarrghhhhhhhh + if attribute_name == 'mixed_cache': + result = func(evaluator, obj, parent_context) + else: + result = func(evaluator, obj) + # Need to cache all of them, otherwise the id could be overwritten. + cache[key] = result, obj, parent_context + return result + return wrapper + + return decorator + + +def create_access(evaluator, obj): + return evaluator.compiled_subprocess.get_or_create_access_handle(obj) + + +def load_module(evaluator, path=None, name=None, sys_path=None): + if sys_path is None: + sys_path = list(evaluator.get_sys_path()) + if path is not None: + dotted_path = dotted_from_fs_path(path, sys_path=sys_path) + else: + dotted_path = name + + temp, sys.path = sys.path, sys_path + try: + __import__(dotted_path) + except ImportError: + # If a module is "corrupt" or not really a Python module or whatever. + debug.warning('Module %s not importable in path %s.', dotted_path, path) + return None + except Exception: + # Since __import__ pretty much makes code execution possible, just + # catch any error here and print it. + import traceback + print_to_stderr("Cannot import:\n%s" % traceback.format_exc()) + return None + finally: + sys.path = temp + + # Just access the cache after import, because of #59 as well as the very + # complicated import structure of Python. + module = sys.modules[dotted_path] + return create_access_path(evaluator, module) + + +class AccessPath(object): + def __init__(self, accesses): + self.accesses = accesses + + # Writing both of these methods here looks a bit ridiculous. However with + # the differences of Python 2/3 it's actually necessary, because we will + # otherwise have a accesses attribute that is bytes instead of unicode. + def __getstate__(self): + return self.accesses + + def __setstate__(self, value): + self.accesses = value + + +def create_access_path(evaluator, obj): + access = create_access(evaluator, obj) + return AccessPath(access.get_access_path_tuples()) + + +def _force_unicode_decorator(func): + return lambda *args, **kwargs: force_unicode(func(*args, **kwargs)) + + +class DirectObjectAccess(object): + def __init__(self, evaluator, obj): + self._evaluator = evaluator + self._obj = obj + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, self.get_repr()) + + def _create_access(self, obj): + return create_access(self._evaluator, obj) + + def _create_access_path(self, obj): + return create_access_path(self._evaluator, obj) + + def py__bool__(self): + return bool(self._obj) + + def py__file__(self): + try: + return self._obj.__file__ + except AttributeError: + return None + + def py__doc__(self, include_call_signature=False): + return force_unicode(inspect.getdoc(self._obj)) or u'' + + def py__name__(self): + if not _is_class_instance(self._obj) or \ + inspect.ismethoddescriptor(self._obj): # slots + cls = self._obj + else: + try: + cls = self._obj.__class__ + except AttributeError: + # happens with numpy.core.umath._UFUNC_API (you get it + # automatically by doing `import numpy`. + return None + + try: + return force_unicode(cls.__name__) + except AttributeError: + return None + + def py__mro__accesses(self): + return tuple(self._create_access_path(cls) for cls in self._obj.__mro__[1:]) + + def py__getitem__(self, index): + if type(self._obj) not in (str, list, tuple, unicode, bytes, bytearray, dict): + # Get rid of side effects, we won't call custom `__getitem__`s. + return None + + return self._create_access_path(self._obj[index]) + + def py__iter__list(self): + if type(self._obj) not in (str, list, tuple, unicode, bytes, bytearray, dict): + # Get rid of side effects, we won't call custom `__getitem__`s. + return [] + + lst = [] + for i, part in enumerate(self._obj): + if i > 20: + # Should not go crazy with large iterators + break + lst.append(self._create_access_path(part)) + return lst + + def py__class__(self): + return self._create_access_path(self._obj.__class__) + + def py__bases__(self): + return [self._create_access_path(base) for base in self._obj.__bases__] + + @_force_unicode_decorator + def get_repr(self): + builtins = 'builtins', '__builtin__' + + if inspect.ismodule(self._obj): + return repr(self._obj) + # Try to avoid execution of the property. + if safe_getattr(self._obj, '__module__', default='') in builtins: + return repr(self._obj) + + type_ = type(self._obj) + if type_ == type: + return type.__repr__(self._obj) + + if safe_getattr(type_, '__module__', default='') in builtins: + # Allow direct execution of repr for builtins. + return repr(self._obj) + return object.__repr__(self._obj) + + def is_class(self): + return inspect.isclass(self._obj) + + def ismethoddescriptor(self): + return inspect.ismethoddescriptor(self._obj) + + def dir(self): + return list(map(force_unicode, dir(self._obj))) + + def has_iter(self): + try: + iter(self._obj) + return True + except TypeError: + return False + + def is_allowed_getattr(self, name): + # TODO this API is ugly. + try: + attr, is_get_descriptor = getattr_static(self._obj, name) + except AttributeError: + return False, False + else: + if is_get_descriptor and type(attr) not in ALLOWED_DESCRIPTOR_ACCESS: + # In case of descriptors that have get methods we cannot return + # it's value, because that would mean code execution. + return True, True + return True, False + + def getattr(self, name, default=_sentinel): + try: + return self._create_access(getattr(self._obj, name)) + except AttributeError: + # Happens e.g. in properties of + # PyQt4.QtGui.QStyleOptionComboBox.currentText + # -> just set it to None + if default is _sentinel: + raise + return self._create_access(default) + + def get_safe_value(self): + if type(self._obj) in (bool, bytes, float, int, str, unicode, slice): + return self._obj + raise ValueError("Object is type %s and not simple" % type(self._obj)) + + def get_api_type(self): + obj = self._obj + if self.is_class(): + return u'class' + elif inspect.ismodule(obj): + return u'module' + elif inspect.isbuiltin(obj) or inspect.ismethod(obj) \ + or inspect.ismethoddescriptor(obj) or inspect.isfunction(obj): + return u'function' + # Everything else... + return u'instance' + + def get_access_path_tuples(self): + accesses = [create_access(self._evaluator, o) for o in self._get_objects_path()] + return [(access.py__name__(), access) for access in accesses] + + def _get_objects_path(self): + def get(): + obj = self._obj + yield obj + try: + obj = obj.__objclass__ + except AttributeError: + pass + else: + yield obj + + try: + # Returns a dotted string path. + imp_plz = obj.__module__ + except AttributeError: + # Unfortunately in some cases like `int` there's no __module__ + if not inspect.ismodule(obj): + yield builtins + else: + if imp_plz is None: + # Happens for example in `(_ for _ in []).send.__module__`. + yield builtins + else: + try: + # TODO use sys.modules, __module__ can be faked. + yield sys.modules[imp_plz] + except KeyError: + # __module__ can be something arbitrary that doesn't exist. + yield builtins + + return list(reversed(list(get()))) + + def execute_operation(self, other_access_handle, operator): + other_access = other_access_handle.access + op = _OPERATORS[operator] + return self._create_access_path(op(self._obj, other_access._obj)) + + def needs_type_completions(self): + return inspect.isclass(self._obj) and self._obj != type + + def get_signature_params(self): + obj = self._obj + if py_version < 33: + raise ValueError("inspect.signature was introduced in 3.3") + if py_version == 34: + # In 3.4 inspect.signature are wrong for str and int. This has + # been fixed in 3.5. The signature of object is returned, + # because no signature was found for str. Here we imitate 3.5 + # logic and just ignore the signature if the magic methods + # don't match object. + # 3.3 doesn't even have the logic and returns nothing for str + # and classes that inherit from object. + user_def = inspect._signature_get_user_defined_method + if (inspect.isclass(obj) + and not user_def(type(obj), '__init__') + and not user_def(type(obj), '__new__') + and (obj.__init__ != object.__init__ + or obj.__new__ != object.__new__)): + raise ValueError + + try: + signature = inspect.signature(obj) + except (RuntimeError, TypeError): + # Reading the code of the function in Python 3.6 implies there are + # at least these errors that might occur if something is wrong with + # the signature. In that case we just want a simple escape for now. + raise ValueError + return [ + SignatureParam( + name=p.name, + has_default=p.default is not p.empty, + default=self._create_access_path(p.default), + has_annotation=p.annotation is not p.empty, + annotation=self._create_access_path(p.annotation), + kind_name=str(p.kind) + ) for p in signature.parameters.values() + ] + + def negate(self): + return self._create_access_path(-self._obj) + + def dict_values(self): + return [self._create_access_path(v) for v in self._obj.values()] + + def is_super_class(self, exception): + return issubclass(exception, self._obj) + + def get_dir_infos(self): + """ + Used to return a couple of infos that are needed when accessing the sub + objects of an objects + """ + # TODO is_allowed_getattr might raise an AttributeError + tuples = dict( + (force_unicode(name), self.is_allowed_getattr(name)) + for name in self.dir() + ) + return self.needs_type_completions(), tuples + + +def _is_class_instance(obj): + """Like inspect.* methods.""" + try: + cls = obj.__class__ + except AttributeError: + return False + else: + return cls != type and not issubclass(cls, NOT_CLASS_TYPES) + + +if py_version >= 35: + exec(compile(dedent(""" + async def _coroutine(): pass + _coroutine = _coroutine() + CoroutineType = type(_coroutine) + _coroutine.close() # Prevent ResourceWarning + """), 'blub', 'exec')) + _coroutine_wrapper = _coroutine.__await__() +else: + _coroutine = None + _coroutine_wrapper = None + +if py_version >= 36: + exec(compile(dedent(""" + async def _async_generator(): + yield + _async_generator = _async_generator() + AsyncGeneratorType = type(_async_generator) + """), 'blub', 'exec')) +else: + _async_generator = None + +class _SPECIAL_OBJECTS(object): + FUNCTION_CLASS = types.FunctionType + METHOD_CLASS = type(DirectObjectAccess.py__bool__) + MODULE_CLASS = types.ModuleType + GENERATOR_OBJECT = _a_generator(1.0) + BUILTINS = builtins + COROUTINE = _coroutine + COROUTINE_WRAPPER = _coroutine_wrapper + ASYNC_GENERATOR = _async_generator + + +def get_special_object(evaluator, identifier): + obj = getattr(_SPECIAL_OBJECTS, identifier) + return create_access_path(evaluator, obj) diff --git a/pythonFiles/jedi/evaluate/compiled/context.py b/pythonFiles/jedi/evaluate/compiled/context.py new file mode 100644 index 000000000000..f81509d73f72 --- /dev/null +++ b/pythonFiles/jedi/evaluate/compiled/context.py @@ -0,0 +1,474 @@ +""" +Imitate the parser representation. +""" +import re +from functools import partial + +from jedi import debug +from jedi._compatibility import force_unicode, Parameter +from jedi.cache import underscore_memoization, memoize_method +from jedi.evaluate.filters import AbstractFilter, AbstractNameDefinition, \ + ContextNameMixin +from jedi.evaluate.base_context import Context, ContextSet +from jedi.evaluate.lazy_context import LazyKnownContext +from jedi.evaluate.compiled.access import _sentinel +from jedi.evaluate.cache import evaluator_function_cache +from . import fake + + +class CheckAttribute(object): + """Raises an AttributeError if the attribute X isn't available.""" + def __init__(self, func): + self.func = func + # Remove the py in front of e.g. py__call__. + self.check_name = force_unicode(func.__name__[2:]) + + def __get__(self, instance, owner): + if instance is None: + return self + + # This might raise an AttributeError. That's wanted. + if self.check_name == '__iter__': + # Python iterators are a bit strange, because there's no need for + # the __iter__ function as long as __getitem__ is defined (it will + # just start with __getitem__(0). This is especially true for + # Python 2 strings, where `str.__iter__` is not even defined. + if not instance.access_handle.has_iter(): + raise AttributeError + else: + instance.access_handle.getattr(self.check_name) + return partial(self.func, instance) + + +class CompiledObject(Context): + def __init__(self, evaluator, access_handle, parent_context=None, faked_class=None): + super(CompiledObject, self).__init__(evaluator, parent_context) + self.access_handle = access_handle + # This attribute will not be set for most classes, except for fakes. + self.tree_node = faked_class + + @CheckAttribute + def py__call__(self, params): + if self.tree_node is not None and self.tree_node.type == 'funcdef': + from jedi.evaluate.context.function import FunctionContext + return FunctionContext( + self.evaluator, + parent_context=self.parent_context, + funcdef=self.tree_node + ).py__call__(params) + if self.access_handle.is_class(): + from jedi.evaluate.context import CompiledInstance + return ContextSet(CompiledInstance(self.evaluator, self.parent_context, self, params)) + else: + return ContextSet.from_iterable(self._execute_function(params)) + + @CheckAttribute + def py__class__(self): + return create_from_access_path(self.evaluator, self.access_handle.py__class__()) + + @CheckAttribute + def py__mro__(self): + return (self,) + tuple( + create_from_access_path(self.evaluator, access) + for access in self.access_handle.py__mro__accesses() + ) + + @CheckAttribute + def py__bases__(self): + return tuple( + create_from_access_path(self.evaluator, access) + for access in self.access_handle.py__bases__() + ) + + def py__bool__(self): + return self.access_handle.py__bool__() + + def py__file__(self): + return self.access_handle.py__file__() + + def is_class(self): + return self.access_handle.is_class() + + def py__doc__(self, include_call_signature=False): + return self.access_handle.py__doc__() + + def get_param_names(self): + try: + signature_params = self.access_handle.get_signature_params() + except ValueError: # Has no signature + params_str, ret = self._parse_function_doc() + tokens = params_str.split(',') + if self.access_handle.ismethoddescriptor(): + tokens.insert(0, 'self') + for p in tokens: + parts = p.strip().split('=') + yield UnresolvableParamName(self, parts[0]) + else: + for signature_param in signature_params: + yield SignatureParamName(self, signature_param) + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.access_handle.get_repr()) + + @underscore_memoization + def _parse_function_doc(self): + doc = self.py__doc__() + if doc is None: + return '', '' + + return _parse_function_doc(doc) + + @property + def api_type(self): + return self.access_handle.get_api_type() + + @underscore_memoization + def _cls(self): + """ + We used to limit the lookups for instantiated objects like list(), but + this is not the case anymore. Python itself + """ + # Ensures that a CompiledObject is returned that is not an instance (like list) + return self + + def get_filters(self, search_global=False, is_instance=False, + until_position=None, origin_scope=None): + yield self._ensure_one_filter(is_instance) + + @memoize_method + def _ensure_one_filter(self, is_instance): + """ + search_global shouldn't change the fact that there's one dict, this way + there's only one `object`. + """ + return CompiledObjectFilter(self.evaluator, self, is_instance) + + @CheckAttribute + def py__getitem__(self, index): + access = self.access_handle.py__getitem__(index) + if access is None: + return ContextSet() + + return ContextSet(create_from_access_path(self.evaluator, access)) + + @CheckAttribute + def py__iter__(self): + for access in self.access_handle.py__iter__list(): + yield LazyKnownContext(create_from_access_path(self.evaluator, access)) + + def py__name__(self): + return self.access_handle.py__name__() + + @property + def name(self): + name = self.py__name__() + if name is None: + name = self.access_handle.get_repr() + return CompiledContextName(self, name) + + def _execute_function(self, params): + from jedi.evaluate import docstrings + from jedi.evaluate.compiled import builtin_from_name + if self.api_type != 'function': + return + + for name in self._parse_function_doc()[1].split(): + try: + # TODO wtf is this? this is exactly the same as the thing + # below. It uses getattr as well. + self.evaluator.builtins_module.access_handle.getattr(name) + except AttributeError: + continue + else: + bltn_obj = builtin_from_name(self.evaluator, name) + for result in bltn_obj.execute(params): + yield result + for type_ in docstrings.infer_return_types(self): + yield type_ + + def dict_values(self): + return ContextSet.from_iterable( + create_from_access_path(self.evaluator, access) + for access in self.access_handle.dict_values() + ) + + def get_safe_value(self, default=_sentinel): + try: + return self.access_handle.get_safe_value() + except ValueError: + if default == _sentinel: + raise + return default + + def execute_operation(self, other, operator): + return create_from_access_path( + self.evaluator, + self.access_handle.execute_operation(other.access_handle, operator) + ) + + def negate(self): + return create_from_access_path(self.evaluator, self.access_handle.negate()) + + def is_super_class(self, exception): + return self.access_handle.is_super_class(exception) + + +class CompiledName(AbstractNameDefinition): + def __init__(self, evaluator, parent_context, name): + self._evaluator = evaluator + self.parent_context = parent_context + self.string_name = name + + def __repr__(self): + try: + name = self.parent_context.name # __name__ is not defined all the time + except AttributeError: + name = None + return '<%s: (%s).%s>' % (self.__class__.__name__, name, self.string_name) + + @property + def api_type(self): + return next(iter(self.infer())).api_type + + @underscore_memoization + def infer(self): + return ContextSet(create_from_name( + self._evaluator, self.parent_context, self.string_name + )) + + +class SignatureParamName(AbstractNameDefinition): + api_type = u'param' + + def __init__(self, compiled_obj, signature_param): + self.parent_context = compiled_obj.parent_context + self._signature_param = signature_param + + @property + def string_name(self): + return self._signature_param.name + + def get_kind(self): + return getattr(Parameter, self._signature_param.kind_name) + + def is_keyword_param(self): + return self._signature_param + + def infer(self): + p = self._signature_param + evaluator = self.parent_context.evaluator + contexts = ContextSet() + if p.has_default: + contexts = ContextSet(create_from_access_path(evaluator, p.default)) + if p.has_annotation: + annotation = create_from_access_path(evaluator, p.annotation) + contexts |= annotation.execute_evaluated() + return contexts + + +class UnresolvableParamName(AbstractNameDefinition): + api_type = u'param' + + def __init__(self, compiled_obj, name): + self.parent_context = compiled_obj.parent_context + self.string_name = name + + def get_kind(self): + return Parameter.POSITIONAL_ONLY + + def infer(self): + return ContextSet() + + +class CompiledContextName(ContextNameMixin, AbstractNameDefinition): + def __init__(self, context, name): + self.string_name = name + self._context = context + self.parent_context = context.parent_context + + +class EmptyCompiledName(AbstractNameDefinition): + """ + Accessing some names will raise an exception. To avoid not having any + completions, just give Jedi the option to return this object. It infers to + nothing. + """ + def __init__(self, evaluator, name): + self.parent_context = evaluator.builtins_module + self.string_name = name + + def infer(self): + return ContextSet() + + +class CompiledObjectFilter(AbstractFilter): + name_class = CompiledName + + def __init__(self, evaluator, compiled_object, is_instance=False): + self._evaluator = evaluator + self._compiled_object = compiled_object + self._is_instance = is_instance + + def get(self, name): + return self._get( + name, + lambda: self._compiled_object.access_handle.is_allowed_getattr(name), + lambda: self._compiled_object.access_handle.dir(), + check_has_attribute=True + ) + + def _get(self, name, allowed_getattr_callback, dir_callback, check_has_attribute=False): + """ + To remove quite a few access calls we introduced the callback here. + """ + has_attribute, is_descriptor = allowed_getattr_callback() + if check_has_attribute and not has_attribute: + return [] + + # Always use unicode objects in Python 2 from here. + name = force_unicode(name) + + if is_descriptor or not has_attribute: + return [self._get_cached_name(name, is_empty=True)] + + if self._is_instance and name not in dir_callback(): + return [] + return [self._get_cached_name(name)] + + @memoize_method + def _get_cached_name(self, name, is_empty=False): + if is_empty: + return EmptyCompiledName(self._evaluator, name) + else: + return self._create_name(name) + + def values(self): + from jedi.evaluate.compiled import builtin_from_name + names = [] + needs_type_completions, dir_infos = self._compiled_object.access_handle.get_dir_infos() + for name in dir_infos: + names += self._get( + name, + lambda: dir_infos[name], + lambda: dir_infos.keys(), + ) + + # ``dir`` doesn't include the type names. + if not self._is_instance and needs_type_completions: + for filter in builtin_from_name(self._evaluator, u'type').get_filters(): + names += filter.values() + return names + + def _create_name(self, name): + return self.name_class(self._evaluator, self._compiled_object, name) + + +docstr_defaults = { + 'floating point number': u'float', + 'character': u'str', + 'integer': u'int', + 'dictionary': u'dict', + 'string': u'str', +} + + +def _parse_function_doc(doc): + """ + Takes a function and returns the params and return value as a tuple. + This is nothing more than a docstring parser. + + TODO docstrings like utime(path, (atime, mtime)) and a(b [, b]) -> None + TODO docstrings like 'tuple of integers' + """ + doc = force_unicode(doc) + # parse round parentheses: def func(a, (b,c)) + try: + count = 0 + start = doc.index('(') + for i, s in enumerate(doc[start:]): + if s == '(': + count += 1 + elif s == ')': + count -= 1 + if count == 0: + end = start + i + break + param_str = doc[start + 1:end] + except (ValueError, UnboundLocalError): + # ValueError for doc.index + # UnboundLocalError for undefined end in last line + debug.dbg('no brackets found - no param') + end = 0 + param_str = u'' + else: + # remove square brackets, that show an optional param ( = None) + def change_options(m): + args = m.group(1).split(',') + for i, a in enumerate(args): + if a and '=' not in a: + args[i] += '=None' + return ','.join(args) + + while True: + param_str, changes = re.subn(r' ?\[([^\[\]]+)\]', + change_options, param_str) + if changes == 0: + break + param_str = param_str.replace('-', '_') # see: isinstance.__doc__ + + # parse return value + r = re.search(u'-[>-]* ', doc[end:end + 7]) + if r is None: + ret = u'' + else: + index = end + r.end() + # get result type, which can contain newlines + pattern = re.compile(r'(,\n|[^\n-])+') + ret_str = pattern.match(doc, index).group(0).strip() + # New object -> object() + ret_str = re.sub(r'[nN]ew (.*)', r'\1()', ret_str) + + ret = docstr_defaults.get(ret_str, ret_str) + + return param_str, ret + + +def create_from_name(evaluator, compiled_object, name): + faked = None + try: + faked = fake.get_faked_with_parent_context(compiled_object, name) + except fake.FakeDoesNotExist: + pass + + access = compiled_object.access_handle.getattr(name, default=None) + return create_cached_compiled_object( + evaluator, access, parent_context=compiled_object, faked=faked + ) + + +def _normalize_create_args(func): + """The cache doesn't care about keyword vs. normal args.""" + def wrapper(evaluator, obj, parent_context=None, faked=None): + return func(evaluator, obj, parent_context, faked) + return wrapper + + +def create_from_access_path(evaluator, access_path): + parent_context = None + for name, access in access_path.accesses: + try: + if parent_context is None: + faked = fake.get_faked_module(evaluator, access_path.accesses[0][0]) + else: + faked = fake.get_faked_with_parent_context(parent_context, name) + except fake.FakeDoesNotExist: + faked = None + + parent_context = create_cached_compiled_object(evaluator, access, parent_context, faked) + return parent_context + + +@_normalize_create_args +@evaluator_function_cache() +def create_cached_compiled_object(evaluator, access_handle, parent_context, faked): + return CompiledObject(evaluator, access_handle, parent_context, faked) diff --git a/pythonFiles/jedi/evaluate/compiled/fake.py b/pythonFiles/jedi/evaluate/compiled/fake.py index 60dbefe4acac..ac43ea286b48 100644 --- a/pythonFiles/jedi/evaluate/compiled/fake.py +++ b/pythonFiles/jedi/evaluate/compiled/fake.py @@ -5,73 +5,59 @@ """ import os -import inspect -import types from itertools import chain -from parso.python import tree +from jedi._compatibility import unicode -from jedi._compatibility import is_py3, builtins, unicode, is_py34 +fake_modules = {} -modules = {} +def _get_path_dict(): + path = os.path.dirname(os.path.abspath(__file__)) + base_path = os.path.join(path, 'fake') + dct = {} + for file_name in os.listdir(base_path): + if file_name.endswith('.pym'): + dct[file_name[:-4]] = os.path.join(base_path, file_name) + return dct -MethodDescriptorType = type(str.replace) -# These are not considered classes and access is granted even though they have -# a __class__ attribute. -NOT_CLASS_TYPES = ( - types.BuiltinFunctionType, - types.CodeType, - types.FrameType, - types.FunctionType, - types.GeneratorType, - types.GetSetDescriptorType, - types.LambdaType, - types.MemberDescriptorType, - types.MethodType, - types.ModuleType, - types.TracebackType, - MethodDescriptorType -) -if is_py3: - NOT_CLASS_TYPES += ( - types.MappingProxyType, - types.SimpleNamespace - ) - if is_py34: - NOT_CLASS_TYPES += (types.DynamicClassAttribute,) +_path_dict = _get_path_dict() class FakeDoesNotExist(Exception): pass -def _load_faked_module(grammar, module): - module_name = module.__name__ - if module_name == '__builtin__' and not is_py3: - module_name = 'builtins' +def _load_faked_module(evaluator, module_name): + try: + return fake_modules[module_name] + except KeyError: + pass + + check_module_name = module_name + if module_name == '__builtin__' and evaluator.environment.version_info.major == 2: + check_module_name = 'builtins' try: - return modules[module_name] + path = _path_dict[check_module_name] except KeyError: - path = os.path.dirname(os.path.abspath(__file__)) - try: - with open(os.path.join(path, 'fake', module_name) + '.pym') as f: - source = f.read() - except IOError: - modules[module_name] = None - return - modules[module_name] = m = grammar.parse(unicode(source)) - - if module_name == 'builtins' and not is_py3: - # There are two implementations of `open` for either python 2/3. - # -> Rename the python2 version (`look at fake/builtins.pym`). - open_func = _search_scope(m, 'open') - open_func.children[1].value = 'open_python3' - open_func = _search_scope(m, 'open_python2') - open_func.children[1].value = 'open' - return m + fake_modules[module_name] = None + return + + with open(path) as f: + source = f.read() + + fake_modules[module_name] = m = evaluator.latest_grammar.parse(unicode(source)) + + if check_module_name != module_name: + # There are two implementations of `open` for either python 2/3. + # -> Rename the python2 version (`look at fake/builtins.pym`). + open_func = _search_scope(m, 'open') + open_func.children[1].value = 'open_python3' + open_func = _search_scope(m, 'open_python2') + open_func.children[1].value = 'open' + return m def _search_scope(scope, obj_name): @@ -80,134 +66,17 @@ def _search_scope(scope, obj_name): return s -def get_module(obj): - if inspect.ismodule(obj): - return obj - try: - obj = obj.__objclass__ - except AttributeError: - pass - - try: - imp_plz = obj.__module__ - except AttributeError: - # Unfortunately in some cases like `int` there's no __module__ - return builtins - else: - if imp_plz is None: - # Happens for example in `(_ for _ in []).send.__module__`. - return builtins - else: - try: - return __import__(imp_plz) - except ImportError: - # __module__ can be something arbitrary that doesn't exist. - return builtins - - -def _faked(grammar, module, obj, name): - # Crazy underscore actions to try to escape all the internal madness. - if module is None: - module = get_module(obj) - - faked_mod = _load_faked_module(grammar, module) - if faked_mod is None: - return None, None - - # Having the module as a `parser.python.tree.Module`, we need to scan - # for methods. - if name is None: - if inspect.isbuiltin(obj) or inspect.isclass(obj): - return _search_scope(faked_mod, obj.__name__), faked_mod - elif not inspect.isclass(obj): - # object is a method or descriptor - try: - objclass = obj.__objclass__ - except AttributeError: - return None, None - else: - cls = _search_scope(faked_mod, objclass.__name__) - if cls is None: - return None, None - return _search_scope(cls, obj.__name__), faked_mod - else: - if obj is module: - return _search_scope(faked_mod, name), faked_mod - else: - try: - cls_name = obj.__name__ - except AttributeError: - return None, None - cls = _search_scope(faked_mod, cls_name) - if cls is None: - return None, None - return _search_scope(cls, name), faked_mod - return None, None - - -def memoize_faked(obj): - """ - A typical memoize function that ignores issues with non hashable results. - """ - cache = obj.cache = {} - - def memoizer(*args, **kwargs): - key = (obj, args, frozenset(kwargs.items())) - try: - result = cache[key] - except (TypeError, ValueError): - return obj(*args, **kwargs) - except KeyError: - result = obj(*args, **kwargs) - if result is not None: - cache[key] = obj(*args, **kwargs) - return result - else: - return result - return memoizer - - -@memoize_faked -def _get_faked(grammar, module, obj, name=None): - result, fake_module = _faked(grammar, module, obj, name) - if result is None: - # We're not interested in classes. What we want is functions. - raise FakeDoesNotExist - elif result.type == 'classdef': - return result, fake_module - else: - # Set the docstr which was previously not set (faked modules don't - # contain it). - assert result.type == 'funcdef' - doc = '"""%s"""' % obj.__doc__ # TODO need escapes. - suite = result.children[-1] - string = tree.String(doc, (0, 0), '') - new_line = tree.Newline('\n', (0, 0)) - docstr_node = tree.PythonNode('simple_stmt', [string, new_line]) - suite.children.insert(1, docstr_node) - return result, fake_module - - -def get_faked(evaluator, module, obj, name=None, parent_context=None): - if parent_context and parent_context.tree_node is not None: +def get_faked_with_parent_context(parent_context, name): + if parent_context.tree_node is not None: # Try to search in already clearly defined stuff. found = _search_scope(parent_context.tree_node, name) if found is not None: return found - else: - raise FakeDoesNotExist + raise FakeDoesNotExist - faked, fake_module = _get_faked(evaluator.latest_grammar, module and module.obj, obj, name) - if module is not None: - module.get_used_names = fake_module.get_used_names - return faked - -def is_class_instance(obj): - """Like inspect.* methods.""" - try: - cls = obj.__class__ - except AttributeError: - return False - else: - return cls != type and not issubclass(cls, NOT_CLASS_TYPES) +def get_faked_module(evaluator, string_name): + module = _load_faked_module(evaluator, string_name) + if module is None: + raise FakeDoesNotExist + return module diff --git a/pythonFiles/jedi/evaluate/compiled/fake/builtins.pym b/pythonFiles/jedi/evaluate/compiled/fake/builtins.pym index 1225929c2cb7..46ec619fb420 100644 --- a/pythonFiles/jedi/evaluate/compiled/fake/builtins.pym +++ b/pythonFiles/jedi/evaluate/compiled/fake/builtins.pym @@ -201,10 +201,13 @@ class dict(): # has a strange docstr pass + def __getitem__(self, obj): + return self.__elements[obj] + def get(self, k, d=None): # TODO implement try: - #return self.__elements[k] + return self.__elements[k] pass except KeyError: return d diff --git a/pythonFiles/jedi/evaluate/compiled/getattr_static.py b/pythonFiles/jedi/evaluate/compiled/getattr_static.py index 9f8cd8a838cf..946ac09b6b06 100644 --- a/pythonFiles/jedi/evaluate/compiled/getattr_static.py +++ b/pythonFiles/jedi/evaluate/compiled/getattr_static.py @@ -10,6 +10,7 @@ _sentinel = object() + def _check_instance(obj, attr): instance_dict = {} try: @@ -28,6 +29,7 @@ def _check_class(klass, attr): pass return _sentinel + def _is_type(obj): try: _static_getmro(obj) @@ -87,7 +89,7 @@ class Foo(object): pass return getattr(klass, '__dict__', _sentinel) return _shadowed_dict_newstyle(klass) - class _OldStyleClass(): + class _OldStyleClass: pass _oldstyle_instance_type = type(_OldStyleClass()) @@ -122,7 +124,7 @@ def _safe_hasattr(obj, name): def _safe_is_data_descriptor(obj): - return (_safe_hasattr(obj, '__set__') or _safe_hasattr(obj, '__delete__')) + return _safe_hasattr(obj, '__set__') or _safe_hasattr(obj, '__delete__') def getattr_static(obj, attr, default=_sentinel): @@ -143,8 +145,7 @@ def getattr_static(obj, attr, default=_sentinel): if not _is_type(obj): klass = _get_type(obj) dict_attr = _shadowed_dict(klass) - if (dict_attr is _sentinel or - type(dict_attr) is types.MemberDescriptorType): + if (dict_attr is _sentinel or type(dict_attr) is types.MemberDescriptorType): instance_result = _check_instance(obj, attr) else: klass = obj diff --git a/pythonFiles/jedi/evaluate/compiled/mixed.py b/pythonFiles/jedi/evaluate/compiled/mixed.py index ac0f6dd614db..b8003502e3d7 100644 --- a/pythonFiles/jedi/evaluate/compiled/mixed.py +++ b/pythonFiles/jedi/evaluate/compiled/mixed.py @@ -5,6 +5,8 @@ import inspect import os +from jedi.parser_utils import get_cached_code_lines + from jedi import settings from jedi.evaluate import compiled from jedi.cache import underscore_memoization @@ -13,6 +15,8 @@ from jedi.evaluate.context import ModuleContext from jedi.evaluate.cache import evaluator_function_cache from jedi.evaluate.compiled.getattr_static import getattr_static +from jedi.evaluate.compiled.access import compiled_objects_cache +from jedi.evaluate.compiled.context import create_cached_compiled_object class MixedObject(object): @@ -37,7 +41,7 @@ def __init__(self, evaluator, parent_context, compiled_object, tree_context): self.parent_context = parent_context self.compiled_object = compiled_object self._context = tree_context - self.obj = compiled_object.obj + self.access_handle = compiled_object.access_handle # We have to overwrite everything that has to do with trailers, name # lookups and filters to make it possible to route name lookups towards @@ -49,7 +53,7 @@ def get_filters(self, *args, **kwargs): yield MixedObjectFilter(self.evaluator, self) def __repr__(self): - return '<%s: %s>' % (type(self).__name__, repr(self.obj)) + return '<%s: %s>' % (type(self).__name__, self.access_handle.get_repr()) def __getattr__(self, name): return getattr(self._context, name) @@ -64,7 +68,7 @@ def start_pos(self): contexts = list(self.infer()) if not contexts: # This means a start_pos that doesn't exist (compiled objects). - return (0, 0) + return 0, 0 return contexts[0].name.start_pos @start_pos.setter @@ -74,17 +78,11 @@ def start_pos(self, value): @underscore_memoization def infer(self): - obj = self.parent_context.obj - try: - # TODO use logic from compiled.CompiledObjectFilter - obj = getattr(obj, self.string_name) - except AttributeError: - # Happens e.g. in properties of - # PyQt4.QtGui.QStyleOptionComboBox.currentText - # -> just set it to None - obj = None + access_handle = self.parent_context.access_handle + # TODO use logic from compiled.CompiledObjectFilter + access_handle = access_handle.getattr(self.string_name, default=None) return ContextSet( - _create(self._evaluator, obj, parent_context=self.parent_context) + _create(self._evaluator, access_handle, parent_context=self.parent_context) ) @property @@ -105,17 +103,17 @@ def __init__(self, evaluator, mixed_object, is_instance=False): @evaluator_function_cache() -def _load_module(evaluator, path, python_object): - module = evaluator.grammar.parse( +def _load_module(evaluator, path): + module_node = evaluator.grammar.parse( path=path, cache=True, diff_cache=True, cache_path=settings.cache_directory ).get_root_node() - python_module = inspect.getmodule(python_object) - - evaluator.modules[python_module.__name__] = module - return module + # python_module = inspect.getmodule(python_object) + # TODO we should actually make something like this possible. + #evaluator.modules[python_module.__name__] = module_node + return module_node def _get_object_to_check(python_object): @@ -135,39 +133,43 @@ def _get_object_to_check(python_object): raise TypeError # Prevents computation of `repr` within inspect. -def find_syntax_node_name(evaluator, python_object): +def _find_syntax_node_name(evaluator, access_handle): + # TODO accessing this is bad, but it probably doesn't matter that much, + # because we're working with interpreteters only here. + python_object = access_handle.access._obj try: python_object = _get_object_to_check(python_object) path = inspect.getsourcefile(python_object) except TypeError: # The type might not be known (e.g. class_with_dict.__weakref__) - return None, None + return None if path is None or not os.path.exists(path): # The path might not exist or be e.g. . - return None, None + return None - module = _load_module(evaluator, path, python_object) + module_node = _load_module(evaluator, path) if inspect.ismodule(python_object): # We don't need to check names for modules, because there's not really # a way to write a module in a module in Python (and also __name__ can # be something like ``email.utils``). - return module, path + code_lines = get_cached_code_lines(evaluator.grammar, path) + return module_node, module_node, path, code_lines try: name_str = python_object.__name__ except AttributeError: # Stuff like python_function.__code__. - return None, None + return None if name_str == '': - return None, None # It's too hard to find lambdas. + return None # It's too hard to find lambdas. # Doesn't always work (e.g. os.stat_result) try: - names = module.get_used_names()[name_str] + names = module_node.get_used_names()[name_str] except KeyError: - return None, None + return None names = [n for n in names if n.is_definition()] try: @@ -184,33 +186,40 @@ def find_syntax_node_name(evaluator, python_object): # There's a chance that the object is not available anymore, because # the code has changed in the background. if line_names: - return line_names[-1].parent, path + names = line_names + code_lines = get_cached_code_lines(evaluator.grammar, path) # It's really hard to actually get the right definition, here as a last # resort we just return the last one. This chance might lead to odd # completions at some points but will lead to mostly correct type # inference, because people tend to define a public name in a module only # once. - return names[-1].parent, path + return module_node, names[-1].parent, path, code_lines -@compiled.compiled_objects_cache('mixed_cache') -def _create(evaluator, obj, parent_context=None, *args): - tree_node, path = find_syntax_node_name(evaluator, obj) +@compiled_objects_cache('mixed_cache') +def _create(evaluator, access_handle, parent_context, *args): + compiled_object = create_cached_compiled_object( + evaluator, access_handle, parent_context=parent_context.compiled_object) - compiled_object = compiled.create( - evaluator, obj, parent_context=parent_context.compiled_object) - if tree_node is None: + result = _find_syntax_node_name(evaluator, access_handle) + if result is None: return compiled_object - module_node = tree_node.get_root_node() + module_node, tree_node, path, code_lines = result + if parent_context.tree_node.get_root_node() == module_node: module_context = parent_context.get_root_context() else: - module_context = ModuleContext(evaluator, module_node, path=path) + module_context = ModuleContext( + evaluator, module_node, + path=path, + code_lines=code_lines, + ) # TODO this __name__ is probably wrong. name = compiled_object.get_root_context().py__name__() - imports.add_module(evaluator, name, module_context) + if name is not None: + imports.add_module_to_cache(evaluator, name, module_context) tree_context = module_context.create_context( tree_node, @@ -218,7 +227,7 @@ def _create(evaluator, obj, parent_context=None, *args): node_is_object=True ) if tree_node.type == 'classdef': - if not inspect.isclass(obj): + if not access_handle.is_class(): # Is an instance, not a class. tree_context, = tree_context.execute_evaluated() @@ -228,4 +237,3 @@ def _create(evaluator, obj, parent_context=None, *args): compiled_object, tree_context=tree_context ) - diff --git a/pythonFiles/jedi/evaluate/compiled/subprocess/__init__.py b/pythonFiles/jedi/evaluate/compiled/subprocess/__init__.py new file mode 100644 index 000000000000..9cc8704a457e --- /dev/null +++ b/pythonFiles/jedi/evaluate/compiled/subprocess/__init__.py @@ -0,0 +1,340 @@ +""" +Makes it possible to do the compiled analysis in a subprocess. This has two +goals: + +1. Making it safer - Segfaults and RuntimeErrors as well as stdout/stderr can + be ignored and dealt with. +2. Make it possible to handle different Python versions as well as virtualenvs. +""" + +import os +import sys +import subprocess +import socket +import errno +import weakref +import traceback +from functools import partial + +from jedi._compatibility import queue, is_py3, force_unicode, \ + pickle_dump, pickle_load, GeneralizedPopen +from jedi.cache import memoize_method +from jedi.evaluate.compiled.subprocess import functions +from jedi.evaluate.compiled.access import DirectObjectAccess, AccessPath, \ + SignatureParam +from jedi.api.exceptions import InternalError + +_subprocesses = {} + +_MAIN_PATH = os.path.join(os.path.dirname(__file__), '__main__.py') + + +def get_subprocess(executable): + try: + return _subprocesses[executable] + except KeyError: + sub = _subprocesses[executable] = _CompiledSubprocess(executable) + return sub + + +def _get_function(name): + return getattr(functions, name) + + +class _EvaluatorProcess(object): + def __init__(self, evaluator): + self._evaluator_weakref = weakref.ref(evaluator) + self._evaluator_id = id(evaluator) + self._handles = {} + + def get_or_create_access_handle(self, obj): + id_ = id(obj) + try: + return self.get_access_handle(id_) + except KeyError: + access = DirectObjectAccess(self._evaluator_weakref(), obj) + handle = AccessHandle(self, access, id_) + self.set_access_handle(handle) + return handle + + def get_access_handle(self, id_): + return self._handles[id_] + + def set_access_handle(self, handle): + self._handles[handle.id] = handle + + +class EvaluatorSameProcess(_EvaluatorProcess): + """ + Basically just an easy access to functions.py. It has the same API + as EvaluatorSubprocess and does the same thing without using a subprocess. + This is necessary for the Interpreter process. + """ + def __getattr__(self, name): + return partial(_get_function(name), self._evaluator_weakref()) + + +class EvaluatorSubprocess(_EvaluatorProcess): + def __init__(self, evaluator, compiled_subprocess): + super(EvaluatorSubprocess, self).__init__(evaluator) + self._used = False + self._compiled_subprocess = compiled_subprocess + + def __getattr__(self, name): + func = _get_function(name) + + def wrapper(*args, **kwargs): + self._used = True + + result = self._compiled_subprocess.run( + self._evaluator_weakref(), + func, + args=args, + kwargs=kwargs, + ) + # IMO it should be possible to create a hook in pickle.load to + # mess with the loaded objects. However it's extremely complicated + # to work around this so just do it with this call. ~ dave + return self._convert_access_handles(result) + + return wrapper + + def _convert_access_handles(self, obj): + if isinstance(obj, SignatureParam): + return SignatureParam(*self._convert_access_handles(tuple(obj))) + elif isinstance(obj, tuple): + return tuple(self._convert_access_handles(o) for o in obj) + elif isinstance(obj, list): + return [self._convert_access_handles(o) for o in obj] + elif isinstance(obj, AccessHandle): + try: + # Rewrite the access handle to one we're already having. + obj = self.get_access_handle(obj.id) + except KeyError: + obj.add_subprocess(self) + self.set_access_handle(obj) + elif isinstance(obj, AccessPath): + return AccessPath(self._convert_access_handles(obj.accesses)) + return obj + + def __del__(self): + if self._used: + self._compiled_subprocess.delete_evaluator(self._evaluator_id) + + +class _CompiledSubprocess(object): + _crashed = False + + def __init__(self, executable): + self._executable = executable + self._evaluator_deletion_queue = queue.deque() + + @property + @memoize_method + def _process(self): + parso_path = sys.modules['parso'].__file__ + args = ( + self._executable, + _MAIN_PATH, + os.path.dirname(os.path.dirname(parso_path)) + ) + return GeneralizedPopen( + args, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + ) + + def run(self, evaluator, function, args=(), kwargs={}): + # Delete old evaluators. + while True: + try: + evaluator_id = self._evaluator_deletion_queue.pop() + except IndexError: + break + else: + self._send(evaluator_id, None) + + assert callable(function) + return self._send(id(evaluator), function, args, kwargs) + + def get_sys_path(self): + return self._send(None, functions.get_sys_path, (), {}) + + def kill(self): + self._crashed = True + try: + subprocess = _subprocesses[self._executable] + except KeyError: + # Fine it was already removed from the cache. + pass + else: + # In the `!=` case there is already a new subprocess in place + # and we don't need to do anything here anymore. + if subprocess == self: + del _subprocesses[self._executable] + + self._process.kill() + self._process.wait() + + def _send(self, evaluator_id, function, args=(), kwargs={}): + if self._crashed: + raise InternalError("The subprocess %s has crashed." % self._executable) + + if not is_py3: + # Python 2 compatibility + kwargs = {force_unicode(key): value for key, value in kwargs.items()} + + data = evaluator_id, function, args, kwargs + try: + pickle_dump(data, self._process.stdin) + except (socket.error, IOError) as e: + # Once Python2 will be removed we can just use `BrokenPipeError`. + # Also, somehow in windows it returns EINVAL instead of EPIPE if + # the subprocess dies. + if e.errno not in (errno.EPIPE, errno.EINVAL): + # Not a broken pipe + raise + self.kill() + raise InternalError("The subprocess %s was killed. Maybe out of memory?" + % self._executable) + + try: + is_exception, traceback, result = pickle_load(self._process.stdout) + except EOFError: + self.kill() + raise InternalError("The subprocess %s has crashed." % self._executable) + + if is_exception: + # Replace the attribute error message with a the traceback. It's + # way more informative. + result.args = (traceback,) + raise result + return result + + def delete_evaluator(self, evaluator_id): + """ + Currently we are not deleting evalutors instantly. They only get + deleted once the subprocess is used again. It would probably a better + solution to move all of this into a thread. However, the memory usage + of a single evaluator shouldn't be that high. + """ + # With an argument - the evaluator gets deleted. + self._evaluator_deletion_queue.append(evaluator_id) + + +class Listener(object): + def __init__(self): + self._evaluators = {} + # TODO refactor so we don't need to process anymore just handle + # controlling. + self._process = _EvaluatorProcess(Listener) + + def _get_evaluator(self, function, evaluator_id): + from jedi.evaluate import Evaluator + + try: + evaluator = self._evaluators[evaluator_id] + except KeyError: + from jedi.api.environment import InterpreterEnvironment + evaluator = Evaluator( + # The project is not actually needed. Nothing should need to + # access it. + project=None, + environment=InterpreterEnvironment() + ) + self._evaluators[evaluator_id] = evaluator + return evaluator + + def _run(self, evaluator_id, function, args, kwargs): + if evaluator_id is None: + return function(*args, **kwargs) + elif function is None: + del self._evaluators[evaluator_id] + else: + evaluator = self._get_evaluator(function, evaluator_id) + + # Exchange all handles + args = list(args) + for i, arg in enumerate(args): + if isinstance(arg, AccessHandle): + args[i] = evaluator.compiled_subprocess.get_access_handle(arg.id) + for key, value in kwargs.items(): + if isinstance(value, AccessHandle): + kwargs[key] = evaluator.compiled_subprocess.get_access_handle(value.id) + + return function(evaluator, *args, **kwargs) + + def listen(self): + stdout = sys.stdout + # Mute stdout/stderr. Nobody should actually be able to write to those, + # because stdout is used for IPC and stderr will just be annoying if it + # leaks (on module imports). + sys.stdout = open(os.devnull, 'w') + sys.stderr = open(os.devnull, 'w') + stdin = sys.stdin + if sys.version_info[0] > 2: + stdout = stdout.buffer + stdin = stdin.buffer + + while True: + try: + payload = pickle_load(stdin) + except EOFError: + # It looks like the parent process closed. Don't make a big fuss + # here and just exit. + exit(1) + try: + result = False, None, self._run(*payload) + except Exception as e: + result = True, traceback.format_exc(), e + + pickle_dump(result, file=stdout) + + +class AccessHandle(object): + def __init__(self, subprocess, access, id_): + self.access = access + self._subprocess = subprocess + self.id = id_ + + def add_subprocess(self, subprocess): + self._subprocess = subprocess + + def __repr__(self): + try: + detail = self.access + except AttributeError: + detail = '#' + str(self.id) + return '<%s of %s>' % (self.__class__.__name__, detail) + + def __getstate__(self): + return self.id + + def __setstate__(self, state): + self.id = state + + def __getattr__(self, name): + if name in ('id', 'access') or name.startswith('_'): + raise AttributeError("Something went wrong with unpickling") + + #if not is_py3: print >> sys.stderr, name + #print('getattr', name, file=sys.stderr) + return partial(self._workaround, force_unicode(name)) + + def _workaround(self, name, *args, **kwargs): + """ + TODO Currently we're passing slice objects around. This should not + happen. They are also the only unhashable objects that we're passing + around. + """ + if args and isinstance(args[0], slice): + return self._subprocess.get_compiled_method_return(self.id, name, *args, **kwargs) + return self._cached_results(name, *args, **kwargs) + + @memoize_method + def _cached_results(self, name, *args, **kwargs): + #if type(self._subprocess) == EvaluatorSubprocess: + #print(name, args, kwargs, + #self._subprocess.get_compiled_method_return(self.id, name, *args, **kwargs) + #) + return self._subprocess.get_compiled_method_return(self.id, name, *args, **kwargs) diff --git a/pythonFiles/jedi/evaluate/compiled/subprocess/__main__.py b/pythonFiles/jedi/evaluate/compiled/subprocess/__main__.py new file mode 100644 index 000000000000..8a0148ce5040 --- /dev/null +++ b/pythonFiles/jedi/evaluate/compiled/subprocess/__main__.py @@ -0,0 +1,49 @@ +import sys +import os + + +def _get_paths(): + # Get the path to jedi. + _d = os.path.dirname + _jedi_path = _d(_d(_d(_d(_d(__file__))))) + _parso_path = sys.argv[1] + # The paths are the directory that jedi and parso lie in. + return {'jedi': _jedi_path, 'parso': _parso_path} + + +# Remove the first entry, because it's simply a directory entry that equals +# this directory. +del sys.path[0] + +if sys.version_info > (3, 4): + from importlib.machinery import PathFinder + + class _ExactImporter(object): + def __init__(self, path_dct): + self._path_dct = path_dct + + def find_module(self, fullname, path=None): + if path is None and fullname in self._path_dct: + p = self._path_dct[fullname] + loader = PathFinder.find_module(fullname, path=[p]) + return loader + return None + + # Try to import jedi/parso. + sys.meta_path.insert(0, _ExactImporter(_get_paths())) + from jedi.evaluate.compiled import subprocess # NOQA + sys.meta_path.pop(0) +else: + import imp + + def load(name): + paths = list(_get_paths().values()) + fp, pathname, description = imp.find_module(name, paths) + return imp.load_module(name, fp, pathname, description) + + load('parso') + load('jedi') + from jedi.evaluate.compiled import subprocess # NOQA + +# And finally start the client. +subprocess.Listener().listen() diff --git a/pythonFiles/jedi/evaluate/compiled/subprocess/functions.py b/pythonFiles/jedi/evaluate/compiled/subprocess/functions.py new file mode 100644 index 000000000000..593ff91e8caf --- /dev/null +++ b/pythonFiles/jedi/evaluate/compiled/subprocess/functions.py @@ -0,0 +1,113 @@ +import sys +import os + +from jedi._compatibility import find_module, cast_path, force_unicode, \ + iter_modules, all_suffixes, print_to_stderr +from jedi.evaluate.compiled import access +from jedi import parser_utils + + +def get_sys_path(): + return list(map(cast_path, sys.path)) + + +def load_module(evaluator, **kwargs): + return access.load_module(evaluator, **kwargs) + + +def get_compiled_method_return(evaluator, id, attribute, *args, **kwargs): + handle = evaluator.compiled_subprocess.get_access_handle(id) + return getattr(handle.access, attribute)(*args, **kwargs) + + +def get_special_object(evaluator, identifier): + return access.get_special_object(evaluator, identifier) + + +def create_simple_object(evaluator, obj): + return access.create_access_path(evaluator, obj) + + +def get_module_info(evaluator, sys_path=None, full_name=None, **kwargs): + if sys_path is not None: + sys.path, temp = sys_path, sys.path + try: + module_file, module_path, is_pkg = find_module(full_name=full_name, **kwargs) + except ImportError: + return None, None, None + finally: + if sys_path is not None: + sys.path = temp + + code = None + if is_pkg: + # In this case, we don't have a file yet. Search for the + # __init__ file. + if module_path.endswith(('.zip', '.egg')): + code = module_file.loader.get_source(full_name) + else: + module_path = _get_init_path(module_path) + elif module_file: + if module_path.endswith(('.zip', '.egg')): + # Unfortunately we are reading unicode here already, not byes. + # It seems however hard to get bytes, because the zip importer + # logic just unpacks the zip file and returns a file descriptor + # that we cannot as easily access. Therefore we just read it as + # a string. + code = module_file.read() + else: + # Read the code with a binary file, because the binary file + # might not be proper unicode. This is handled by the parser + # wrapper. + with open(module_path, 'rb') as f: + code = f.read() + + module_file.close() + + return code, cast_path(module_path), is_pkg + + +def list_module_names(evaluator, search_path): + return [ + name + for module_loader, name, is_pkg in iter_modules(search_path) + ] + + +def get_builtin_module_names(evaluator): + return list(map(force_unicode, sys.builtin_module_names)) + + +def _test_raise_error(evaluator, exception_type): + """ + Raise an error to simulate certain problems for unit tests. + """ + raise exception_type + + +def _test_print(evaluator, stderr=None, stdout=None): + """ + Force some prints in the subprocesses. This exists for unit tests. + """ + if stderr is not None: + print_to_stderr(stderr) + sys.stderr.flush() + if stdout is not None: + print(stdout) + sys.stdout.flush() + + +def _get_init_path(directory_path): + """ + The __init__ file can be searched in a directory. If found return it, else + None. + """ + for suffix in all_suffixes(): + path = os.path.join(directory_path, '__init__' + suffix) + if os.path.exists(path): + return path + return None + + +def safe_literal_eval(evaluator, value): + return parser_utils.safe_literal_eval(value) diff --git a/pythonFiles/jedi/evaluate/context/asynchronous.py b/pythonFiles/jedi/evaluate/context/asynchronous.py new file mode 100644 index 000000000000..51e59a48261e --- /dev/null +++ b/pythonFiles/jedi/evaluate/context/asynchronous.py @@ -0,0 +1,38 @@ +from jedi.evaluate.filters import publish_method, BuiltinOverwrite +from jedi.evaluate.base_context import ContextSet + + +class AsyncBase(BuiltinOverwrite): + def __init__(self, evaluator, func_execution_context): + super(AsyncBase, self).__init__(evaluator) + self.func_execution_context = func_execution_context + + @property + def name(self): + return self.get_object().name + + def __repr__(self): + return "<%s of %s>" % (type(self).__name__, self.func_execution_context) + + +class Coroutine(AsyncBase): + special_object_identifier = u'COROUTINE' + + @publish_method('__await__') + def _await(self): + return ContextSet(CoroutineWrapper(self.evaluator, self.func_execution_context)) + + +class CoroutineWrapper(AsyncBase): + special_object_identifier = u'COROUTINE_WRAPPER' + + def py__stop_iteration_returns(self): + return self.func_execution_context.get_return_values() + + +class AsyncGenerator(AsyncBase): + """Handling of `yield` functions.""" + special_object_identifier = u'ASYNC_GENERATOR' + + def py__aiter__(self): + return self.func_execution_context.get_yield_lazy_contexts(is_async=True) diff --git a/pythonFiles/jedi/evaluate/context/function.py b/pythonFiles/jedi/evaluate/context/function.py index 0dba9c91d707..2bb3a9b88bc3 100644 --- a/pythonFiles/jedi/evaluate/context/function.py +++ b/pythonFiles/jedi/evaluate/context/function.py @@ -17,17 +17,20 @@ from jedi.evaluate.lazy_context import LazyKnownContexts, LazyKnownContext, \ LazyTreeContext from jedi.evaluate.context import iterable +from jedi.evaluate.context import asynchronous from jedi import parser_utils from jedi.evaluate.parser_cache import get_yield_exprs class LambdaName(AbstractNameDefinition): string_name = '' + api_type = u'function' def __init__(self, lambda_context): self._lambda_context = lambda_context self.parent_context = lambda_context.parent_context + @property def start_pos(self): return self._lambda_context.tree_node.start_pos @@ -39,7 +42,7 @@ class FunctionContext(use_metaclass(CachedMetaClass, TreeContext)): """ Needed because of decorators. Decorators are evaluated here. """ - api_type = 'function' + api_type = u'function' def __init__(self, evaluator, parent_context, funcdef): """ This should not be called directly """ @@ -63,11 +66,23 @@ def infer_function_execution(self, function_execution): """ Created to be used by inheritance. """ - yield_exprs = get_yield_exprs(self.evaluator, self.tree_node) - if yield_exprs: - return ContextSet(iterable.Generator(self.evaluator, function_execution)) + is_coroutine = self.tree_node.parent.type == 'async_stmt' + is_generator = bool(get_yield_exprs(self.evaluator, self.tree_node)) + + if is_coroutine: + if is_generator: + if self.evaluator.environment.version_info < (3, 6): + return NO_CONTEXTS + return ContextSet(asynchronous.AsyncGenerator(self.evaluator, function_execution)) + else: + if self.evaluator.environment.version_info < (3, 5): + return NO_CONTEXTS + return ContextSet(asynchronous.Coroutine(self.evaluator, function_execution)) else: - return function_execution.get_return_values() + if is_generator: + return ContextSet(iterable.Generator(self.evaluator, function_execution)) + else: + return function_execution.get_return_values() def get_function_execution(self, arguments=None): if arguments is None: @@ -83,9 +98,9 @@ def py__class__(self): # This differentiation is only necessary for Python2. Python3 does not # use a different method class. if isinstance(parser_utils.get_parent_scope(self.tree_node), tree.Class): - name = 'METHOD_CLASS' + name = u'METHOD_CLASS' else: - name = 'FUNCTION_CLASS' + name = u'FUNCTION_CLASS' return compiled.get_special_object(self.evaluator, name) @property @@ -122,7 +137,7 @@ def __init__(self, evaluator, parent_context, function_context, var_args): def get_return_values(self, check_yields=False): funcdef = self.tree_node if funcdef.type == 'lambdef': - return self.evaluator.eval_element(self, funcdef.children[-1]) + return self.eval_node(funcdef.children[-1]) if check_yields: context_set = NO_CONTEXTS @@ -140,13 +155,14 @@ def get_return_values(self, check_yields=False): if check_yields: context_set |= ContextSet.from_sets( lazy_context.infer() - for lazy_context in self._eval_yield(r) + for lazy_context in self._get_yield_lazy_context(r) ) else: try: children = r.children except AttributeError: - context_set |= ContextSet(compiled.create(self.evaluator, None)) + ctx = compiled.builtin_from_name(self.evaluator, u'None') + context_set |= ContextSet(ctx) else: context_set |= self.eval_node(children[1]) if check is flow_analysis.REACHABLE: @@ -154,10 +170,11 @@ def get_return_values(self, check_yields=False): break return context_set - def _eval_yield(self, yield_expr): + def _get_yield_lazy_context(self, yield_expr): if yield_expr.type == 'keyword': # `yield` just yields None. - yield LazyKnownContext(compiled.create(self.evaluator, None)) + ctx = compiled.builtin_from_name(self.evaluator, u'None') + yield LazyKnownContext(ctx) return node = yield_expr.children[1] @@ -169,7 +186,8 @@ def _eval_yield(self, yield_expr): yield LazyTreeContext(self, node) @recursion.execution_recursion_decorator(default=iter([])) - def get_yield_values(self): + def get_yield_lazy_contexts(self, is_async=False): + # TODO: if is_async, wrap yield statements in Awaitable/async_generator_asend for_parents = [(y, tree.search_ancestor(y, 'for_stmt', 'funcdef', 'while_stmt', 'if_stmt')) for y in get_yield_exprs(self.evaluator, self.tree_node)] @@ -202,7 +220,7 @@ def get_yield_values(self): if for_stmt is None: # No for_stmt, just normal yields. for yield_ in yields: - for result in self._eval_yield(yield_): + for result in self._get_yield_lazy_context(yield_): yield result else: input_node = for_stmt.get_testlist() @@ -213,7 +231,7 @@ def get_yield_values(self): dct = {str(for_stmt.children[1].value): lazy_context.infer()} with helpers.predefine_names(self, for_stmt, dct): for yield_in_same_for_stmt in yields: - for result in self._eval_yield(yield_in_same_for_stmt): + for result in self._get_yield_lazy_context(yield_in_same_for_stmt): yield result def get_filters(self, search_global, until_position=None, origin_scope=None): diff --git a/pythonFiles/jedi/evaluate/context/instance.py b/pythonFiles/jedi/evaluate/context/instance.py index 2c8d796c9c6d..def5e19a2da9 100644 --- a/pythonFiles/jedi/evaluate/context/instance.py +++ b/pythonFiles/jedi/evaluate/context/instance.py @@ -1,6 +1,5 @@ from abc import abstractproperty -from jedi._compatibility import is_py3 from jedi import debug from jedi.evaluate import compiled from jedi.evaluate import filters @@ -16,30 +15,34 @@ from jedi.parser_utils import get_parent_scope +class BaseInstanceFunctionExecution(FunctionExecutionContext): + def __init__(self, instance, *args, **kwargs): + self.instance = instance + super(BaseInstanceFunctionExecution, self).__init__( + instance.evaluator, *args, **kwargs) + -class InstanceFunctionExecution(FunctionExecutionContext): +class InstanceFunctionExecution(BaseInstanceFunctionExecution): def __init__(self, instance, parent_context, function_context, var_args): - self.instance = instance var_args = InstanceVarArgs(self, var_args) super(InstanceFunctionExecution, self).__init__( - instance.evaluator, parent_context, function_context, var_args) + instance, parent_context, function_context, var_args) -class AnonymousInstanceFunctionExecution(FunctionExecutionContext): +class AnonymousInstanceFunctionExecution(BaseInstanceFunctionExecution): function_execution_filter = filters.AnonymousInstanceFunctionExecutionFilter def __init__(self, instance, parent_context, function_context, var_args): - self.instance = instance super(AnonymousInstanceFunctionExecution, self).__init__( - instance.evaluator, parent_context, function_context, var_args) + instance, parent_context, function_context, var_args) class AbstractInstanceContext(Context): """ This class is used to evaluate instances. """ - api_type = 'instance' + api_type = u'instance' function_execution_cls = InstanceFunctionExecution def __init__(self, evaluator, parent_context, class_context, var_args): @@ -54,7 +57,7 @@ def is_class(self): @property def py__call__(self): - names = self.get_function_slot_names('__call__') + names = self.get_function_slot_names(u'__call__') if not names: # Means the Instance is not callable. raise AttributeError @@ -90,12 +93,12 @@ def execute_function_slots(self, names, *evaluated_args): def py__get__(self, obj): # Arguments in __get__ descriptors are obj, class. # `method` is the new parent of the array, don't know if that's good. - names = self.get_function_slot_names('__get__') + names = self.get_function_slot_names(u'__get__') if names: if isinstance(obj, AbstractInstanceContext): return self.execute_function_slots(names, obj, obj.class_context) else: - none_obj = compiled.create(self.evaluator, None) + none_obj = compiled.builtin_from_name(self.evaluator, u'None') return self.execute_function_slots(names, none_obj, obj) else: return ContextSet(self) @@ -104,14 +107,12 @@ def get_filters(self, search_global=None, until_position=None, origin_scope=None, include_self_names=True): if include_self_names: for cls in self.class_context.py__mro__(): - if isinstance(cls, compiled.CompiledObject): - if cls.tree_node is not None: - # In this case we're talking about a fake object, it - # doesn't make sense for normal compiled objects to - # search for self variables. - yield SelfNameFilter(self.evaluator, self, cls, origin_scope) - else: - yield SelfNameFilter(self.evaluator, self, cls, origin_scope) + if not isinstance(cls, compiled.CompiledObject) \ + or cls.tree_node is not None: + # In this case we're excluding compiled objects that are + # not fake objects. It doesn't make sense for normal + # compiled objects to search for self variables. + yield SelfAttributeFilter(self.evaluator, self, cls, origin_scope) for cls in self.class_context.py__mro__(): if isinstance(cls, compiled.CompiledObject): @@ -121,16 +122,16 @@ def get_filters(self, search_global=None, until_position=None, def py__getitem__(self, index): try: - names = self.get_function_slot_names('__getitem__') + names = self.get_function_slot_names(u'__getitem__') except KeyError: debug.warning('No __getitem__, cannot access the array.') return NO_CONTEXTS else: - index_obj = compiled.create(self.evaluator, index) + index_obj = compiled.create_simple_object(self.evaluator, index) return self.execute_function_slots(names, index_obj) def py__iter__(self): - iter_slot_names = self.get_function_slot_names('__iter__') + iter_slot_names = self.get_function_slot_names(u'__iter__') if not iter_slot_names: debug.warning('No __iter__ on %s.' % self) return @@ -138,7 +139,10 @@ def py__iter__(self): for generator in self.execute_function_slots(iter_slot_names): if isinstance(generator, AbstractInstanceContext): # `__next__` logic. - name = '__next__' if is_py3 else 'next' + if self.evaluator.environment.version_info.major == 2: + name = u'next' + else: + name = u'__next__' iter_slot_names = generator.get_function_slot_names(name) if iter_slot_names: yield LazyKnownContexts( @@ -166,8 +170,8 @@ def _create_init_execution(self, class_context, func_node): ) def create_init_executions(self): - for name in self.get_function_slot_names('__init__'): - if isinstance(name, LazyInstanceName): + for name in self.get_function_slot_names(u'__init__'): + if isinstance(name, SelfName): yield self._create_init_execution(name.class_context, name.tree_name.parent) @evaluator_method_cache() @@ -189,7 +193,7 @@ def create_instance_context(self, class_context, node): ) return bound_method.get_function_execution() elif scope.type == 'classdef': - class_context = ClassContext(self.evaluator, scope, parent_context) + class_context = ClassContext(self.evaluator, parent_context, scope) return class_context elif scope.type == 'comp_for': # Comprehensions currently don't have a special scope in Jedi. @@ -208,8 +212,10 @@ def __init__(self, *args, **kwargs): super(CompiledInstance, self).__init__(*args, **kwargs) # I don't think that dynamic append lookups should happen here. That # sounds more like something that should go to py__iter__. + self._original_var_args = self.var_args + if self.class_context.name.string_name in ['list', 'set'] \ - and self.parent_context.get_root_context() == self.evaluator.BUILTINS: + and self.parent_context.get_root_context() == self.evaluator.builtins_module: # compare the module path with the builtin name. self.var_args = iterable.get_dynamic_array_instance(self) @@ -223,6 +229,13 @@ def create_instance_context(self, class_context, node): else: return super(CompiledInstance, self).create_instance_context(class_context, node) + def get_first_non_keyword_argument_contexts(self): + key, lazy_context = next(self._original_var_args.unpack(), ('', None)) + if key is not None: + return NO_CONTEXTS + + return lazy_context.infer() + class TreeInstance(AbstractInstanceContext): def __init__(self, evaluator, parent_context, class_context, var_args): @@ -255,7 +268,8 @@ def __init__(self, evaluator, instance, parent_context, name): @iterator_to_context_set def infer(self): for result_context in super(CompiledInstanceName, self).infer(): - if isinstance(result_context, FunctionContext): + is_function = result_context.api_type == 'function' + if result_context.tree_node is not None and is_function: parent_context = result_context.parent_context while parent_context.is_class(): parent_context = parent_context.parent_context @@ -265,7 +279,7 @@ def infer(self): parent_context, result_context.tree_node ) else: - if result_context.api_type == 'function': + if is_function: yield CompiledBoundMethod(result_context) else: yield result_context @@ -306,7 +320,7 @@ def get_function_execution(self, arguments=None): class CompiledBoundMethod(compiled.CompiledObject): def __init__(self, func): super(CompiledBoundMethod, self).__init__( - func.evaluator, func.obj, func.parent_context, func.tree_node) + func.evaluator, func.access_handle, func.parent_context, func.tree_node) def get_param_names(self): return list(super(CompiledBoundMethod, self).get_param_names())[1:] @@ -317,7 +331,7 @@ def infer(self): return super(InstanceNameDefinition, self).infer() -class LazyInstanceName(filters.TreeNameDefinition): +class SelfName(filters.TreeNameDefinition): """ This name calculates the parent_context lazily. """ @@ -331,7 +345,7 @@ def parent_context(self): return self._instance.create_instance_context(self.class_context, self.tree_name) -class LazyInstanceClassName(LazyInstanceName): +class LazyInstanceClassName(SelfName): @iterator_to_context_set def infer(self): for result_context in super(LazyInstanceClassName, self).infer(): @@ -384,8 +398,11 @@ def _convert_names(self, names): return [self.name_class(self.context, self._class_context, name) for name in names] -class SelfNameFilter(InstanceClassFilter): - name_class = LazyInstanceName +class SelfAttributeFilter(InstanceClassFilter): + """ + This class basically filters all the use cases where `self.*` was assigned. + """ + name_class = SelfName def _filter(self, names): names = self._filter_self_names(names) diff --git a/pythonFiles/jedi/evaluate/context/iterable.py b/pythonFiles/jedi/evaluate/context/iterable.py index d0f468e43425..df012f50dd8a 100644 --- a/pythonFiles/jedi/evaluate/context/iterable.py +++ b/pythonFiles/jedi/evaluate/context/iterable.py @@ -22,74 +22,56 @@ """ from jedi import debug from jedi import settings +from jedi._compatibility import force_unicode, is_py3 +from jedi.cache import memoize_method from jedi.evaluate import compiled from jedi.evaluate import analysis from jedi.evaluate import recursion from jedi.evaluate.lazy_context import LazyKnownContext, LazyKnownContexts, \ LazyTreeContext -from jedi.evaluate.helpers import is_string, predefine_names, evaluate_call_of_leaf +from jedi.evaluate.helpers import get_int_or_none, is_string, \ + predefine_names, evaluate_call_of_leaf from jedi.evaluate.utils import safe_property from jedi.evaluate.utils import to_list from jedi.evaluate.cache import evaluator_method_cache -from jedi.evaluate.filters import ParserTreeFilter, has_builtin_methods, \ - register_builtin_method, SpecialMethodFilter +from jedi.evaluate.filters import ParserTreeFilter, BuiltinOverwrite, \ + publish_method from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS, Context, \ TreeContext, ContextualizedNode from jedi.parser_utils import get_comp_fors -class AbstractIterable(Context): - builtin_methods = {} - api_type = 'instance' +class IterableMixin(object): + def py__stop_iteration_returns(self): + return ContextSet(compiled.builtin_from_name(self.evaluator, u'None')) - def __init__(self, evaluator): - super(AbstractIterable, self).__init__(evaluator, evaluator.BUILTINS) - def get_filters(self, search_global, until_position=None, origin_scope=None): - raise NotImplementedError - - @property - def name(self): - return compiled.CompiledContextName(self, self.array_type) - - -@has_builtin_methods -class GeneratorMixin(object): +class GeneratorBase(BuiltinOverwrite, IterableMixin): array_type = None + special_object_identifier = u'GENERATOR_OBJECT' - @register_builtin_method('send') - @register_builtin_method('next', python_version_match=2) - @register_builtin_method('__next__', python_version_match=3) + @publish_method('send') + @publish_method('next', python_version_match=2) + @publish_method('__next__', python_version_match=3) def py__next__(self): - # TODO add TypeError if params are given. return ContextSet.from_sets(lazy_context.infer() for lazy_context in self.py__iter__()) - def get_filters(self, search_global, until_position=None, origin_scope=None): - gen_obj = compiled.get_special_object(self.evaluator, 'GENERATOR_OBJECT') - yield SpecialMethodFilter(self, self.builtin_methods, gen_obj) - for filter in gen_obj.get_filters(search_global): - yield filter - - def py__bool__(self): - return True - - def py__class__(self): - gen_obj = compiled.get_special_object(self.evaluator, 'GENERATOR_OBJECT') - return gen_obj.py__class__() - @property def name(self): return compiled.CompiledContextName(self, 'generator') -class Generator(GeneratorMixin, Context): +class Generator(GeneratorBase): """Handling of `yield` functions.""" def __init__(self, evaluator, func_execution_context): - super(Generator, self).__init__(evaluator, parent_context=evaluator.BUILTINS) + super(Generator, self).__init__(evaluator) self._func_execution_context = func_execution_context def py__iter__(self): - return self._func_execution_context.get_yield_values() + return self._func_execution_context.get_yield_lazy_contexts() + + def py__stop_iteration_returns(self): + return self._func_execution_context.get_return_values() def __repr__(self): return "<%s of %s>" % (type(self).__name__, self._func_execution_context) @@ -111,32 +93,33 @@ def get_filters(self, search_global, until_position=None, origin_scope=None): yield ParserTreeFilter(self.evaluator, self) -class Comprehension(AbstractIterable): - @staticmethod - def from_atom(evaluator, context, atom): - bracket = atom.children[0] - if bracket == '{': - if atom.children[1].children[1] == ':': - cls = DictComprehension - else: - cls = SetComprehension - elif bracket == '(': - cls = GeneratorComprehension - elif bracket == '[': - cls = ListComprehension - return cls(evaluator, context, atom) +def comprehension_from_atom(evaluator, context, atom): + bracket = atom.children[0] + if bracket == '{': + if atom.children[1].children[1] == ':': + cls = DictComprehension + else: + cls = SetComprehension + elif bracket == '(': + cls = GeneratorComprehension + elif bracket == '[': + cls = ListComprehension + return cls(evaluator, context, atom) + +class ComprehensionMixin(object): def __init__(self, evaluator, defining_context, atom): - super(Comprehension, self).__init__(evaluator) + super(ComprehensionMixin, self).__init__(evaluator) self._defining_context = defining_context self._atom = atom def _get_comprehension(self): + "return 'a for a in b'" # The atom contains a testlist_comp return self._atom.children[1] def _get_comp_for(self): - # The atom contains a testlist_comp + "return CompFor('for a in b')" return self._get_comprehension().children[1] def _eval_node(self, index=0): @@ -154,13 +137,17 @@ def _get_comp_for_context(self, parent_context, comp_for): def _nested(self, comp_fors, parent_context=None): comp_for = comp_fors[0] - input_node = comp_for.children[3] + + is_async = 'async' == comp_for.children[comp_for.children.index('for') - 1] + + input_node = comp_for.children[comp_for.children.index('in') + 1] parent_context = parent_context or self._defining_context input_types = parent_context.eval_node(input_node) + # TODO: simulate await if self.is_async cn = ContextualizedNode(parent_context, input_node) - iterated = input_types.iterate(cn) - exprlist = comp_for.children[1] + iterated = input_types.iterate(cn, is_async=is_async) + exprlist = comp_for.children[comp_for.children.index('for') + 1] for i, lazy_context in enumerate(iterated): types = lazy_context.infer() dct = unpack_tuple_to_dict(parent_context, types, exprlist) @@ -194,14 +181,18 @@ def __repr__(self): return "<%s of %s>" % (type(self).__name__, self._atom) -class ArrayMixin(object): - def get_filters(self, search_global, until_position=None, origin_scope=None): - # `array.type` is a string with the type, e.g. 'list'. +class Sequence(BuiltinOverwrite, IterableMixin): + api_type = u'instance' + + @property + def name(self): + return compiled.CompiledContextName(self, self.array_type) + + @memoize_method + def get_object(self): compiled_obj = compiled.builtin_from_name(self.evaluator, self.array_type) - yield SpecialMethodFilter(self, self.builtin_methods, compiled_obj) - for typ in compiled_obj.execute_evaluated(self): - for filter in typ.get_filters(): - yield filter + only_obj, = compiled_obj.execute_evaluated(self) + return only_obj def py__bool__(self): return None # We don't know the length, because of appends. @@ -211,7 +202,7 @@ def py__class__(self): @safe_property def parent(self): - return self.evaluator.BUILTINS + return self.evaluator.builtins_module def dict_values(self): return ContextSet.from_sets( @@ -220,8 +211,8 @@ def dict_values(self): ) -class ListComprehension(ArrayMixin, Comprehension): - array_type = 'list' +class ListComprehension(ComprehensionMixin, Sequence): + array_type = u'list' def py__getitem__(self, index): if isinstance(index, slice): @@ -231,13 +222,12 @@ def py__getitem__(self, index): return all_types[index].infer() -class SetComprehension(ArrayMixin, Comprehension): - array_type = 'set' +class SetComprehension(ComprehensionMixin, Sequence): + array_type = u'set' -@has_builtin_methods -class DictComprehension(ArrayMixin, Comprehension): - array_type = 'dict' +class DictComprehension(ComprehensionMixin, Sequence): + array_type = u'dict' def _get_comp_for(self): return self._get_comprehension().children[3] @@ -250,38 +240,38 @@ def py__getitem__(self, index): for keys, values in self._iterate(): for k in keys: if isinstance(k, compiled.CompiledObject): - if k.obj == index: + if k.get_safe_value(default=object()) == index: return values return self.dict_values() def dict_values(self): return ContextSet.from_sets(values for keys, values in self._iterate()) - @register_builtin_method('values') + @publish_method('values') def _imitate_values(self): lazy_context = LazyKnownContexts(self.dict_values()) - return ContextSet(FakeSequence(self.evaluator, 'list', [lazy_context])) + return ContextSet(FakeSequence(self.evaluator, u'list', [lazy_context])) - @register_builtin_method('items') + @publish_method('items') def _imitate_items(self): items = ContextSet.from_iterable( FakeSequence( - self.evaluator, 'tuple' + self.evaluator, u'tuple' (LazyKnownContexts(keys), LazyKnownContexts(values)) ) for keys, values in self._iterate() ) - return create_evaluated_sequence_set(self.evaluator, items, sequence_type='list') + return create_evaluated_sequence_set(self.evaluator, items, sequence_type=u'list') -class GeneratorComprehension(GeneratorMixin, Comprehension): +class GeneratorComprehension(ComprehensionMixin, GeneratorBase): pass -class SequenceLiteralContext(ArrayMixin, AbstractIterable): - mapping = {'(': 'tuple', - '[': 'list', - '{': 'set'} +class SequenceLiteralContext(Sequence): + mapping = {'(': u'tuple', + '[': u'list', + '{': u'set'} def __init__(self, evaluator, defining_context, atom): super(SequenceLiteralContext, self).__init__(evaluator) @@ -289,18 +279,19 @@ def __init__(self, evaluator, defining_context, atom): self._defining_context = defining_context if self.atom.type in ('testlist_star_expr', 'testlist'): - self.array_type = 'tuple' + self.array_type = u'tuple' else: self.array_type = SequenceLiteralContext.mapping[atom.children[0]] """The builtin name of the array (list, set, tuple or dict).""" def py__getitem__(self, index): """Here the index is an int/str. Raises IndexError/KeyError.""" - if self.array_type == 'dict': + if self.array_type == u'dict': + compiled_obj_index = compiled.create_simple_object(self.evaluator, index) for key, value in self._items(): for k in self._defining_context.eval_node(key): if isinstance(k, compiled.CompiledObject) \ - and index == k.obj: + and k.execute_operation(compiled_obj_index, u'==').get_safe_value(): return self._defining_context.eval_node(value) raise KeyError('No key found in dictionary %s.' % self) @@ -315,7 +306,7 @@ def py__iter__(self): While values returns the possible values for any array field, this function returns the value for a certain index. """ - if self.array_type == 'dict': + if self.array_type == u'dict': # Get keys. types = ContextSet() for k, _ in self._items(): @@ -333,7 +324,7 @@ def py__iter__(self): def _values(self): """Returns a list of a list of node.""" - if self.array_type == 'dict': + if self.array_type == u'dict': return ContextSet.from_sets(v for k, v in self._items()) else: return self._items() @@ -373,37 +364,36 @@ def exact_key_items(self): for key_node, value in self._items(): for key in self._defining_context.eval_node(key_node): if is_string(key): - yield key.obj, LazyTreeContext(self._defining_context, value) + yield key.get_safe_value(), LazyTreeContext(self._defining_context, value) def __repr__(self): return "<%s of %s>" % (self.__class__.__name__, self.atom) -@has_builtin_methods class DictLiteralContext(SequenceLiteralContext): - array_type = 'dict' + array_type = u'dict' def __init__(self, evaluator, defining_context, atom): super(SequenceLiteralContext, self).__init__(evaluator) self._defining_context = defining_context self.atom = atom - @register_builtin_method('values') + @publish_method('values') def _imitate_values(self): lazy_context = LazyKnownContexts(self.dict_values()) - return ContextSet(FakeSequence(self.evaluator, 'list', [lazy_context])) + return ContextSet(FakeSequence(self.evaluator, u'list', [lazy_context])) - @register_builtin_method('items') + @publish_method('items') def _imitate_items(self): lazy_contexts = [ LazyKnownContext(FakeSequence( - self.evaluator, 'tuple', + self.evaluator, u'tuple', (LazyTreeContext(self._defining_context, key_node), LazyTreeContext(self._defining_context, value_node)) )) for key_node, value_node in self._items() ] - return ContextSet(FakeSequence(self.evaluator, 'list', lazy_contexts)) + return ContextSet(FakeSequence(self.evaluator, u'list', lazy_contexts)) class _FakeArray(SequenceLiteralContext): @@ -437,16 +427,38 @@ def __repr__(self): class FakeDict(_FakeArray): def __init__(self, evaluator, dct): - super(FakeDict, self).__init__(evaluator, dct, 'dict') + super(FakeDict, self).__init__(evaluator, dct, u'dict') self._dct = dct def py__iter__(self): for key in self._dct: - yield LazyKnownContext(compiled.create(self.evaluator, key)) + yield LazyKnownContext(compiled.create_simple_object(self.evaluator, key)) def py__getitem__(self, index): + if is_py3 and self.evaluator.environment.version_info.major == 2: + # In Python 2 bytes and unicode compare. + if isinstance(index, bytes): + index_unicode = force_unicode(index) + try: + return self._dct[index_unicode].infer() + except KeyError: + pass + elif isinstance(index, str): + index_bytes = index.encode('utf-8') + try: + return self._dct[index_bytes].infer() + except KeyError: + pass + return self._dct[index].infer() + @publish_method('values') + def _values(self): + return ContextSet(FakeSequence( + self.evaluator, u'tuple', + [LazyKnownContexts(self.dict_values())] + )) + def dict_values(self): return ContextSet.from_sets(lazy_context.infer() for lazy_context in self._dct.values()) @@ -649,7 +661,7 @@ def py__iter__(self): for addition in additions: yield addition - def iterate(self, contextualized_node=None): + def iterate(self, contextualized_node=None, is_async=False): return self.py__iter__() @@ -657,7 +669,7 @@ class Slice(Context): def __init__(self, context, start, stop, step): super(Slice, self).__init__( context.evaluator, - parent_context=context.evaluator.BUILTINS + parent_context=context.evaluator.builtins_module ) self._context = context # all of them are either a Precedence or None. @@ -680,10 +692,9 @@ def get(element): # For simplicity, we want slices to be clear defined with just # one type. Otherwise we will return an empty slice object. raise IndexError - try: - return list(result)[0].obj - except AttributeError: - return None + + context, = result + return get_int_or_none(context) try: return slice(get(self._start), get(self._stop), get(self._step)) diff --git a/pythonFiles/jedi/evaluate/context/klass.py b/pythonFiles/jedi/evaluate/context/klass.py index b7d61d3e16bf..3157250161e2 100644 --- a/pythonFiles/jedi/evaluate/context/klass.py +++ b/pythonFiles/jedi/evaluate/context/klass.py @@ -89,7 +89,7 @@ class ClassContext(use_metaclass(CachedMetaClass, TreeContext)): This class is not only important to extend `tree.Class`, it is also a important for descriptors (if the descriptor methods are evaluated or not). """ - api_type = 'class' + api_type = u'class' def __init__(self, evaluator, parent_context, classdef): super(ClassContext, self).__init__(evaluator, parent_context=parent_context) @@ -136,17 +136,17 @@ def py__bases__(self): arglist = self.tree_node.get_super_arglist() if arglist: from jedi.evaluate import arguments - args = arguments.TreeArguments(self.evaluator, self, arglist) + args = arguments.TreeArguments(self.evaluator, self.parent_context, arglist) return [value for key, value in args.unpack() if key is None] else: - return [LazyKnownContext(compiled.create(self.evaluator, object))] + return [LazyKnownContext(compiled.builtin_from_name(self.evaluator, u'object'))] def py__call__(self, params): from jedi.evaluate.context import TreeInstance return ContextSet(TreeInstance(self.evaluator, self.parent_context, self, params)) def py__class__(self): - return compiled.create(self.evaluator, type) + return compiled.builtin_from_name(self.evaluator, u'type') def get_params(self): from jedi.evaluate.context import AnonymousInstance @@ -182,7 +182,7 @@ def get_function_slot_names(self, name): return [] def get_param_names(self): - for name in self.get_function_slot_names('__init__'): + for name in self.get_function_slot_names(u'__init__'): for context_ in name.infer(): try: method = context_.get_param_names diff --git a/pythonFiles/jedi/evaluate/context/module.py b/pythonFiles/jedi/evaluate/context/module.py index 5ba92cdb1c3e..8d4da11bc91a 100644 --- a/pythonFiles/jedi/evaluate/context/module.py +++ b/pythonFiles/jedi/evaluate/context/module.py @@ -1,14 +1,12 @@ -import pkgutil -import imp import re import os from parso import python_bytes_to_unicode -from jedi._compatibility import use_metaclass -from jedi.evaluate.cache import CachedMetaClass, evaluator_method_cache +from jedi.evaluate.cache import evaluator_method_cache +from jedi._compatibility import iter_modules, all_suffixes from jedi.evaluate.filters import GlobalNameFilter, ContextNameMixin, \ - AbstractNameDefinition, ParserTreeFilter, DictFilter + AbstractNameDefinition, ParserTreeFilter, DictFilter, MergedFilter from jedi.evaluate import compiled from jedi.evaluate.base_context import TreeContext from jedi.evaluate.imports import SubModuleName, infer_import @@ -18,14 +16,14 @@ class _ModuleAttributeName(AbstractNameDefinition): """ For module attributes like __file__, __str__ and so on. """ - api_type = 'instance' + api_type = u'instance' def __init__(self, parent_module, string_name): self.parent_context = parent_module self.string_name = string_name def infer(self): - return compiled.create(self.parent_context.evaluator, str).execute_evaluated() + return compiled.get_string_context_set(self.parent_context.evaluator) class ModuleName(ContextNameMixin, AbstractNameDefinition): @@ -40,23 +38,26 @@ def string_name(self): return self._name -class ModuleContext(use_metaclass(CachedMetaClass, TreeContext)): - api_type = 'module' +class ModuleContext(TreeContext): + api_type = u'module' parent_context = None - def __init__(self, evaluator, module_node, path): + def __init__(self, evaluator, module_node, path, code_lines): super(ModuleContext, self).__init__(evaluator, parent_context=None) self.tree_node = module_node self._path = path + self.code_lines = code_lines def get_filters(self, search_global, until_position=None, origin_scope=None): - yield ParserTreeFilter( - self.evaluator, - context=self, - until_position=until_position, - origin_scope=origin_scope + yield MergedFilter( + ParserTreeFilter( + self.evaluator, + context=self, + until_position=until_position, + origin_scope=origin_scope + ), + GlobalNameFilter(self, self.tree_node), ) - yield GlobalNameFilter(self, self.tree_node) yield DictFilter(self._sub_modules_dict()) yield DictFilter(self._module_attributes_dict()) for star_module in self.star_imports(): @@ -64,7 +65,7 @@ def get_filters(self, search_global, until_position=None, origin_scope=None): # I'm not sure if the star import cache is really that effective anymore # with all the other really fast import caches. Recheck. Also we would need - # to push the star imports into Evaluator.modules, if we reenable this. + # to push the star imports into Evaluator.module_cache, if we reenable this. @evaluator_method_cache([]) def star_imports(self): modules = [] @@ -93,7 +94,7 @@ def _string_name(self): sep = (re.escape(os.path.sep),) * 2 r = re.search(r'([^%s]*?)(%s__init__)?(\.py|\.so)?$' % sep, self._path) # Remove PEP 3149 names - return re.sub('\.[a-z]+-\d{2}[mud]{0,3}$', '', r.group(1)) + return re.sub(r'\.[a-z]+-\d{2}[mud]{0,3}$', '', r.group(1)) @property @evaluator_method_cache() @@ -105,7 +106,7 @@ def _get_init_directory(self): :return: The path to the directory of a package. None in case it's not a package. """ - for suffix, _, _ in imp.get_suffixes(): + for suffix in all_suffixes(): ending = '__init__' + suffix py__file__ = self.py__file__() if py__file__ is not None and py__file__.endswith(ending): @@ -114,7 +115,7 @@ def _get_init_directory(self): return None def py__name__(self): - for name, module in self.evaluator.modules.items(): + for name, module in self.evaluator.module_cache.iterate_modules_with_names(): if module == self and name != '': return name @@ -131,12 +132,12 @@ def py__file__(self): def py__package__(self): if self._get_init_directory() is None: - return re.sub(r'\.?[^\.]+$', '', self.py__name__()) + return re.sub(r'\.?[^.]+$', '', self.py__name__()) else: return self.py__name__() def _py__path__(self): - search_path = self.evaluator.project.sys_path + search_path = self.evaluator.get_sys_path() init_path = self.py__file__() if os.path.basename(init_path) == '__init__.py': with open(init_path, 'rb') as f: @@ -188,7 +189,7 @@ def _sub_modules_dict(self): path = self._path names = {} if path is not None and path.endswith(os.path.sep + '__init__.py'): - mods = pkgutil.iter_modules([os.path.dirname(path)]) + mods = iter_modules([os.path.dirname(path)]) for module_loader, name, is_pkg in mods: # It's obviously a relative import to the current module. names[name] = SubModuleName(self, name) @@ -203,11 +204,9 @@ def _sub_modules_dict(self): return names def py__class__(self): - return compiled.get_special_object(self.evaluator, 'MODULE_CLASS') + return compiled.get_special_object(self.evaluator, u'MODULE_CLASS') def __repr__(self): return "<%s: %s@%s-%s>" % ( self.__class__.__name__, self._string_name, self.tree_node.start_pos[0], self.tree_node.end_pos[0]) - - diff --git a/pythonFiles/jedi/evaluate/context/namespace.py b/pythonFiles/jedi/evaluate/context/namespace.py index e40c23a5e8ca..380d7c2270b9 100644 --- a/pythonFiles/jedi/evaluate/context/namespace.py +++ b/pythonFiles/jedi/evaluate/context/namespace.py @@ -1,11 +1,10 @@ import os from itertools import chain -from jedi._compatibility import use_metaclass -from jedi.evaluate.cache import evaluator_method_cache, CachedMetaClass +from jedi.evaluate.cache import evaluator_method_cache from jedi.evaluate import imports from jedi.evaluate.filters import DictFilter, AbstractNameDefinition -from jedi.evaluate.base_context import NO_CONTEXTS, TreeContext +from jedi.evaluate.base_context import TreeContext, ContextSet class ImplicitNSName(AbstractNameDefinition): @@ -14,27 +13,31 @@ class ImplicitNSName(AbstractNameDefinition): This object will prevent Jedi from raising exceptions """ def __init__(self, implicit_ns_context, string_name): - self.implicit_ns_context = implicit_ns_context + self.parent_context = implicit_ns_context self.string_name = string_name def infer(self): - return NO_CONTEXTS + return ContextSet(self.parent_context) def get_root_context(self): - return self.implicit_ns_context + return self.parent_context -class ImplicitNamespaceContext(use_metaclass(CachedMetaClass, TreeContext)): +class ImplicitNamespaceContext(TreeContext): """ Provides support for implicit namespace packages """ - api_type = 'module' + # Is a module like every other module, because if you import an empty + # folder foobar it will be available as an object: + # . + api_type = u'module' parent_context = None - def __init__(self, evaluator, fullname): + def __init__(self, evaluator, fullname, paths): super(ImplicitNamespaceContext, self).__init__(evaluator, parent_context=None) self.evaluator = evaluator - self.fullname = fullname + self._fullname = fullname + self.paths = paths def get_filters(self, search_global, until_position=None, origin_scope=None): yield DictFilter(self._sub_modules_dict()) @@ -51,7 +54,7 @@ def py__file__(self): def py__package__(self): """Return the fullname """ - return self.fullname + return self._fullname @property def py__path__(self): @@ -61,8 +64,7 @@ def py__path__(self): def _sub_modules_dict(self): names = {} - paths = self.paths - file_names = chain.from_iterable(os.listdir(path) for path in paths) + file_names = chain.from_iterable(os.listdir(path) for path in self.paths) mods = [ file_name.rpartition('.')[0] if '.' in file_name else file_name for file_name in file_names diff --git a/pythonFiles/jedi/evaluate/docstrings.py b/pythonFiles/jedi/evaluate/docstrings.py index f9c1141226e9..a927abd09028 100644 --- a/pythonFiles/jedi/evaluate/docstrings.py +++ b/pythonFiles/jedi/evaluate/docstrings.py @@ -18,7 +18,7 @@ import re from textwrap import dedent -from parso import parse +from parso import parse, ParserSyntaxError from jedi._compatibility import u from jedi.evaluate.utils import indent_block @@ -42,49 +42,59 @@ REST_ROLE_PATTERN = re.compile(r':[^`]+:`([^`]+)`') -try: - from numpydoc.docscrape import NumpyDocString -except ImportError: - def _search_param_in_numpydocstr(docstr, param_str): - return [] +_numpy_doc_string_cache = None - def _search_return_in_numpydocstr(docstr): - return [] -else: - def _search_param_in_numpydocstr(docstr, param_str): - """Search `docstr` (in numpydoc format) for type(-s) of `param_str`.""" - try: - # This is a non-public API. If it ever changes we should be - # prepared and return gracefully. - params = NumpyDocString(docstr)._parsed_data['Parameters'] - except (KeyError, AttributeError): - return [] - for p_name, p_type, p_descr in params: - if p_name == param_str: - m = re.match('([^,]+(,[^,]+)*?)(,[ ]*optional)?$', p_type) - if m: - p_type = m.group(1) - return list(_expand_typestr(p_type)) + +def _get_numpy_doc_string_cls(): + global _numpy_doc_string_cache + try: + from numpydoc.docscrape import NumpyDocString + _numpy_doc_string_cache = NumpyDocString + except ImportError as e: + _numpy_doc_string_cache = e + if isinstance(_numpy_doc_string_cache, ImportError): + raise _numpy_doc_string_cache + return _numpy_doc_string_cache + + +def _search_param_in_numpydocstr(docstr, param_str): + """Search `docstr` (in numpydoc format) for type(-s) of `param_str`.""" + try: + # This is a non-public API. If it ever changes we should be + # prepared and return gracefully. + params = _get_numpy_doc_string_cls()(docstr)._parsed_data['Parameters'] + except (KeyError, AttributeError, ImportError): return [] + for p_name, p_type, p_descr in params: + if p_name == param_str: + m = re.match('([^,]+(,[^,]+)*?)(,[ ]*optional)?$', p_type) + if m: + p_type = m.group(1) + return list(_expand_typestr(p_type)) + return [] - def _search_return_in_numpydocstr(docstr): - """ - Search `docstr` (in numpydoc format) for type(-s) of function returns. - """ - doc = NumpyDocString(docstr) - try: - # This is a non-public API. If it ever changes we should be - # prepared and return gracefully. - returns = doc._parsed_data['Returns'] - returns += doc._parsed_data['Yields'] - except (KeyError, AttributeError): - raise StopIteration - for r_name, r_type, r_descr in returns: - #Return names are optional and if so the type is in the name - if not r_type: - r_type = r_name - for type_ in _expand_typestr(r_type): - yield type_ + +def _search_return_in_numpydocstr(docstr): + """ + Search `docstr` (in numpydoc format) for type(-s) of function returns. + """ + try: + doc = _get_numpy_doc_string_cls()(docstr) + except ImportError: + return + try: + # This is a non-public API. If it ever changes we should be + # prepared and return gracefully. + returns = doc._parsed_data['Returns'] + returns += doc._parsed_data['Yields'] + except (KeyError, AttributeError): + return + for r_name, r_type, r_descr in returns: + # Return names are optional and if so the type is in the name + if not r_type: + r_type = r_name + for type_ in _expand_typestr(r_type): + yield type_ def _expand_typestr(type_str): @@ -145,8 +155,7 @@ def _search_param_in_docstr(docstr, param_str): if match: return [_strip_rst_role(match.group(1))] - return (_search_param_in_numpydocstr(docstr, param_str) or - []) + return _search_param_in_numpydocstr(docstr, param_str) def _strip_rst_role(type_str): @@ -179,7 +188,7 @@ def pseudo_docstring_stuff(): Need this docstring so that if the below part is not valid Python this is still a function. ''' - {0} + {} """)) if string is None: return [] @@ -193,7 +202,10 @@ def pseudo_docstring_stuff(): # will be impossible to use `...` (Ellipsis) as a token. Docstring types # don't need to conform with the current grammar. grammar = module_context.evaluator.latest_grammar - module = grammar.parse(code.format(indent_block(string))) + try: + module = grammar.parse(code.format(indent_block(string)), error_recovery=False) + except ParserSyntaxError: + return [] try: funcdef = next(module.iter_funcdefs()) # First pick suite, then simple_stmt and then the node, @@ -243,7 +255,7 @@ def _execute_array_values(evaluator, array): for typ in lazy_context.infer() ) values.append(LazyKnownContexts(objects)) - return set([FakeSequence(evaluator, array.array_type, values)]) + return {FakeSequence(evaluator, array.array_type, values)} else: return array.execute_evaluated() diff --git a/pythonFiles/jedi/evaluate/dynamic.py b/pythonFiles/jedi/evaluate/dynamic.py index 7d05000dc9d5..9e8d57144bdb 100644 --- a/pythonFiles/jedi/evaluate/dynamic.py +++ b/pythonFiles/jedi/evaluate/dynamic.py @@ -73,24 +73,33 @@ def search_params(evaluator, execution_context, funcdef): # you will see the slowdown, especially in 3.6. return create_default_params(execution_context, funcdef) - debug.dbg('Dynamic param search in %s.', funcdef.name.value, color='MAGENTA') - - module_context = execution_context.get_root_context() - function_executions = _search_function_executions( - evaluator, - module_context, - funcdef - ) - if function_executions: - zipped_params = zip(*list( - function_execution.get_params() - for function_execution in function_executions - )) - params = [MergedExecutedParams(executed_params) for executed_params in zipped_params] - # Evaluate the ExecutedParams to types. + if funcdef.type == 'lambdef': + string_name = _get_lambda_name(funcdef) + if string_name is None: + return create_default_params(execution_context, funcdef) else: - return create_default_params(execution_context, funcdef) - debug.dbg('Dynamic param result finished', color='MAGENTA') + string_name = funcdef.name.value + debug.dbg('Dynamic param search in %s.', string_name, color='MAGENTA') + + try: + module_context = execution_context.get_root_context() + function_executions = _search_function_executions( + evaluator, + module_context, + funcdef, + string_name=string_name, + ) + if function_executions: + zipped_params = zip(*list( + function_execution.get_params() + for function_execution in function_executions + )) + params = [MergedExecutedParams(executed_params) for executed_params in zipped_params] + # Evaluate the ExecutedParams to types. + else: + return create_default_params(execution_context, funcdef) + finally: + debug.dbg('Dynamic param result finished', color='MAGENTA') return params finally: evaluator.dynamic_params_depth -= 1 @@ -98,25 +107,24 @@ def search_params(evaluator, execution_context, funcdef): @evaluator_function_cache(default=None) @to_list -def _search_function_executions(evaluator, module_context, funcdef): +def _search_function_executions(evaluator, module_context, funcdef, string_name): """ Returns a list of param names. """ - func_string_name = funcdef.name.value compare_node = funcdef - if func_string_name == '__init__': + if string_name == '__init__': cls = get_parent_scope(funcdef) if isinstance(cls, tree.Class): - func_string_name = cls.name.value + string_name = cls.name.value compare_node = cls found_executions = False i = 0 for for_mod_context in imports.get_modules_containing_name( - evaluator, [module_context], func_string_name): + evaluator, [module_context], string_name): if not isinstance(module_context, ModuleContext): return - for name, trailer in _get_possible_nodes(for_mod_context, func_string_name): + for name, trailer in _get_possible_nodes(for_mod_context, string_name): i += 1 # This is a simple way to stop Jedi's dynamic param recursion @@ -137,6 +145,18 @@ def _search_function_executions(evaluator, module_context, funcdef): return +def _get_lambda_name(node): + stmt = node.parent + if stmt.type == 'expr_stmt': + first_operator = next(stmt.yield_operators(), None) + if first_operator == '=': + first = stmt.children[0] + if first.type == 'name': + return first.value + + return None + + def _get_possible_nodes(module_context, func_string_name): try: names = module_context.tree_node.get_used_names()[func_string_name] @@ -156,11 +176,9 @@ def _check_name_for_execution(evaluator, context, compare_node, name, trailer): def create_func_excs(): arglist = trailer.children[1] if arglist == ')': - arglist = () + arglist = None args = TreeArguments(evaluator, context, arglist, trailer) - if value_node.type == 'funcdef': - yield value.get_function_execution(args) - else: + if value_node.type == 'classdef': created_instance = instance.TreeInstance( evaluator, value.parent_context, @@ -169,6 +187,8 @@ def create_func_excs(): ) for execution in created_instance.create_init_executions(): yield execution + else: + yield value.get_function_execution(args) for value in evaluator.goto_definitions(context, name): value_node = value.tree_node diff --git a/pythonFiles/jedi/evaluate/filters.py b/pythonFiles/jedi/evaluate/filters.py index 35dff9dace65..4294f2a60004 100644 --- a/pythonFiles/jedi/evaluate/filters.py +++ b/pythonFiles/jedi/evaluate/filters.py @@ -6,7 +6,8 @@ from parso.tree import search_ancestor -from jedi._compatibility import is_py3 +from jedi._compatibility import use_metaclass, Parameter +from jedi.cache import memoize_method from jedi.evaluate import flow_analysis from jedi.evaluate.base_context import ContextSet, Context from jedi.parser_utils import get_parent_scope @@ -27,7 +28,7 @@ def infer(self): def goto(self): # Typically names are already definitions and therefore a goto on that # name will always result on itself. - return set([self]) + return {self} def get_root_context(self): return self.parent_context.get_root_context() @@ -43,6 +44,9 @@ def execute(self, arguments): def execute_evaluated(self, *args, **kwargs): return self.infer().execute_evaluated(*args, **kwargs) + def is_import(self): + return False + @property def api_type(self): return self.parent_context.api_type @@ -56,6 +60,10 @@ def __init__(self, parent_context, tree_name): def goto(self): return self.parent_context.evaluator.goto(self.parent_context, self.tree_name) + def is_import(self): + imp = search_ancestor(self.tree_name, 'import_from', 'import_name') + return imp is not None + @property def string_name(self): return self.tree_name.value @@ -108,12 +116,28 @@ def api_type(self): class ParamName(AbstractTreeName): - api_type = 'param' + api_type = u'param' def __init__(self, parent_context, tree_name): self.parent_context = parent_context self.tree_name = tree_name + def get_kind(self): + tree_param = search_ancestor(self.tree_name, 'param') + if tree_param.star_count == 1: # *args + return Parameter.VAR_POSITIONAL + if tree_param.star_count == 2: # **kwargs + return Parameter.VAR_KEYWORD + + parent = tree_param.parent + for p in parent.children: + if p.type == 'param': + if p.star_count: + return Parameter.KEYWORD_ONLY + if p == tree_param: + break + return Parameter.POSITIONAL_OR_KEYWORD + def infer(self): return self.get_param().infer() @@ -163,7 +187,7 @@ def __init__(self, context, parser_scope): def get(self, name): try: - names = self._used_names[str(name)] + names = self._used_names[name] except KeyError: return [] @@ -213,7 +237,10 @@ def _is_name_reachable(self, name): def _check_flows(self, names): for name in sorted(names, key=lambda name: name.start_pos, reverse=True): check = flow_analysis.reachability_check( - self._node_context, self._parser_scope, name, self._origin_scope + context=self._node_context, + context_scope=self._parser_scope, + node=name, + origin_scope=self._origin_scope ) if check is not flow_analysis.UNREACHABLE: yield name @@ -266,22 +293,42 @@ def __init__(self, dct): def get(self, name): try: - value = self._convert(name, self._dct[str(name)]) + value = self._convert(name, self._dct[name]) except KeyError: return [] - - return list(self._filter([value])) + else: + return list(self._filter([value])) def values(self): - return self._filter(self._convert(*item) for item in self._dct.items()) + def yielder(): + for item in self._dct.items(): + try: + yield self._convert(*item) + except KeyError: + pass + return self._filter(yielder()) def _convert(self, name, value): return value +class MergedFilter(object): + def __init__(self, *filters): + self._filters = filters + + def get(self, name): + return [n for filter in self._filters for n in filter.get(name)] + + def values(self): + return [n for filter in self._filters for n in filter.values()] + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, ', '.join(str(f) for f in self._filters)) + + class _BuiltinMappedMethod(Context): """``Generator.__next__`` ``dict.values`` methods and so on.""" - api_type = 'function' + api_type = u'function' def __init__(self, builtin_context, method, builtin_func): super(_BuiltinMappedMethod, self).__init__( @@ -292,6 +339,7 @@ def __init__(self, builtin_context, method, builtin_func): self._builtin_func = builtin_func def py__call__(self, params): + # TODO add TypeError if params are given/or not correct. return self._method(self.parent_context) def __getattr__(self, name): @@ -304,21 +352,33 @@ class SpecialMethodFilter(DictFilter): classes like Generator (for __next__, etc). """ class SpecialMethodName(AbstractNameDefinition): - api_type = 'function' + api_type = u'function' + + def __init__(self, parent_context, string_name, value, builtin_context): + callable_, python_version = value + if python_version is not None and \ + python_version != parent_context.evaluator.environment.version_info.major: + raise KeyError - def __init__(self, parent_context, string_name, callable_, builtin_context): self.parent_context = parent_context self.string_name = string_name self._callable = callable_ self._builtin_context = builtin_context def infer(self): - filter = next(self._builtin_context.get_filters()) - # We can take the first index, because on builtin methods there's - # always only going to be one name. The same is true for the - # inferred values. - builtin_func = next(iter(filter.get(self.string_name)[0].infer())) - return ContextSet(_BuiltinMappedMethod(self.parent_context, self._callable, builtin_func)) + for filter in self._builtin_context.get_filters(): + # We can take the first index, because on builtin methods there's + # always only going to be one name. The same is true for the + # inferred values. + for name in filter.get(self.string_name): + builtin_func = next(iter(name.infer())) + break + else: + continue + break + return ContextSet( + _BuiltinMappedMethod(self.parent_context, self._callable, builtin_func) + ) def __init__(self, context, dct, builtin_context): super(SpecialMethodFilter, self).__init__(dct) @@ -335,34 +395,58 @@ def _convert(self, name, value): return self.SpecialMethodName(self.context, name, value, self._builtin_context) -def has_builtin_methods(cls): - base_dct = {} - # Need to care properly about inheritance. Builtin Methods should not get - # lost, just because they are not mentioned in a class. - for base_cls in reversed(cls.__bases__): - try: - base_dct.update(base_cls.builtin_methods) - except AttributeError: - pass +class _OverwriteMeta(type): + def __init__(cls, name, bases, dct): + super(_OverwriteMeta, cls).__init__(name, bases, dct) - cls.builtin_methods = base_dct - for func in cls.__dict__.values(): - try: - cls.builtin_methods.update(func.registered_builtin_methods) - except AttributeError: - pass - return cls - - -def register_builtin_method(method_name, python_version_match=None): - def wrapper(func): - if python_version_match and python_version_match != 2 + int(is_py3): - # Some functions do only apply to certain versions. - return func - dct = func.__dict__.setdefault('registered_builtin_methods', {}) - dct[method_name] = func + base_dct = {} + for base_cls in reversed(cls.__bases__): + try: + base_dct.update(base_cls.overwritten_methods) + except AttributeError: + pass + + for func in cls.__dict__.values(): + try: + base_dct.update(func.registered_overwritten_methods) + except AttributeError: + pass + cls.overwritten_methods = base_dct + + +class AbstractObjectOverwrite(use_metaclass(_OverwriteMeta, object)): + def get_object(self): + raise NotImplementedError + + def get_filters(self, search_global, *args, **kwargs): + yield SpecialMethodFilter(self, self.overwritten_methods, self.get_object()) + + for filter in self.get_object().get_filters(search_global): + yield filter + + +class BuiltinOverwrite(Context, AbstractObjectOverwrite): + special_object_identifier = None + + def __init__(self, evaluator): + super(BuiltinOverwrite, self).__init__(evaluator, evaluator.builtins_module) + + @memoize_method + def get_object(self): + from jedi.evaluate import compiled + assert self.special_object_identifier + return compiled.get_special_object(self.evaluator, self.special_object_identifier) + + def py__class__(self): + return self.get_object().py__class__() + + +def publish_method(method_name, python_version_match=None): + def decorator(func): + dct = func.__dict__.setdefault('registered_overwritten_methods', {}) + dct[method_name] = func, python_version_match return func - return wrapper + return decorator def get_global_filters(evaluator, context, until_position, origin_scope): @@ -379,40 +463,37 @@ def get_global_filters(evaluator, context, until_position, origin_scope): ... def func(): ... y = None ... ''')) - >>> module_node = script._get_module_node() + >>> module_node = script._module_node >>> scope = next(module_node.iter_funcdefs()) >>> scope >>> context = script._get_module().create_context(scope) >>> filters = list(get_global_filters(context.evaluator, context, (4, 0), None)) - First we get the names names from the function scope. + First we get the names from the function scope. - >>> no_unicode_pprint(filters[0]) - > + >>> no_unicode_pprint(filters[0]) #doctest: +ELLIPSIS + MergedFilter(, ) >>> sorted(str(n) for n in filters[0].values()) ['', ''] - >>> filters[0]._until_position + >>> filters[0]._filters[0]._until_position (4, 0) + >>> filters[0]._filters[1]._until_position Then it yields the names from one level "lower". In this example, this is - the module scope. As a side note, you can see, that the position in the - filter is now None, because typically the whole module is loaded before the - function is called. + the module scope (including globals). + As a side note, you can see, that the position in the filter is None on the + globals filter, because there the whole module is searched. - >>> filters[1].values() # global names -> there are none in our example. - [] - >>> list(filters[2].values()) # package modules -> Also empty. + >>> list(filters[1].values()) # package modules -> Also empty. [] - >>> sorted(name.string_name for name in filters[3].values()) # Module attributes + >>> sorted(name.string_name for name in filters[2].values()) # Module attributes ['__doc__', '__file__', '__name__', '__package__'] - >>> print(filters[1]._until_position) - None Finally, it yields the builtin filter, if `include_builtin` is true (default). - >>> filters[4].values() #doctest: +ELLIPSIS + >>> filters[3].values() #doctest: +ELLIPSIS [, ...] """ from jedi.evaluate.context.function import FunctionExecutionContext @@ -430,5 +511,5 @@ def get_global_filters(evaluator, context, until_position, origin_scope): context = context.parent_context # Add builtins to the global scope. - for filter in evaluator.BUILTINS.get_filters(search_global=True): + for filter in evaluator.builtins_module.get_filters(search_global=True): yield filter diff --git a/pythonFiles/jedi/evaluate/finder.py b/pythonFiles/jedi/evaluate/finder.py index 96032ae9b792..5e7043f79600 100644 --- a/pythonFiles/jedi/evaluate/finder.py +++ b/pythonFiles/jedi/evaluate/finder.py @@ -56,7 +56,10 @@ def find(self, filters, attribute_lookup): names = self.filter_name(filters) if self._found_predefined_types is not None and names: check = flow_analysis.reachability_check( - self._context, self._context.tree_node, self._name) + context=self._context, + context_scope=self._context.tree_node, + node=self._name, + ) if check is flow_analysis.UNREACHABLE: return ContextSet() return self._found_predefined_types @@ -92,7 +95,26 @@ def _get_origin_scope(self): def get_filters(self, search_global=False): origin_scope = self._get_origin_scope() if search_global: - return get_global_filters(self._evaluator, self._context, self._position, origin_scope) + position = self._position + + # For functions and classes the defaults don't belong to the + # function and get evaluated in the context before the function. So + # make sure to exclude the function/class name. + if origin_scope is not None: + ancestor = search_ancestor(origin_scope, 'funcdef', 'classdef', 'lambdef') + lambdef = None + if ancestor == 'lambdef': + # For lambdas it's even more complicated since parts will + # be evaluated later. + lambdef = ancestor + ancestor = search_ancestor(origin_scope, 'funcdef', 'classdef') + if ancestor is not None: + colon = ancestor.children[-2] + if position < colon.start_pos: + if lambdef is None or position < lambdef.children[-2].start_pos: + position = ancestor.start_pos + + return get_global_filters(self._evaluator, self._context, position, origin_scope) else: return self._context.get_filters(search_global, self._position, origin_scope=origin_scope) @@ -102,8 +124,7 @@ def filter_name(self, filters): ``filters``), until a name fits. """ names = [] - if self._context.predefined_names: - # TODO is this ok? node might not always be a tree.Name + if self._context.predefined_names and isinstance(self._name, tree.Name): node = self._name while node is not None and not is_scope(node): node = node.parent @@ -133,14 +154,14 @@ def filter_name(self, filters): continue break - debug.dbg('finder.filter_name "%s" in (%s): %s@%s', self._string_name, - self._context, names, self._position) + debug.dbg('finder.filter_name %s in (%s): %s@%s', + self._string_name, self._context, names, self._position) return list(names) def _check_getattr(self, inst): """Checks for both __getattr__ and __getattribute__ methods""" # str is important, because it shouldn't be `Name`! - name = compiled.create(self._evaluator, self._string_name) + name = compiled.create_simple_object(self._evaluator, self._string_name) # This is a little bit special. `__getattribute__` is in Python # executed before `__getattr__`. But: I know no use case, where @@ -149,8 +170,8 @@ def _check_getattr(self, inst): # We are inversing this, because a hand-crafted `__getattribute__` # could still call another hand-crafted `__getattr__`, but not the # other way around. - names = (inst.get_function_slot_names('__getattr__') or - inst.get_function_slot_names('__getattribute__')) + names = (inst.get_function_slot_names(u'__getattr__') or + inst.get_function_slot_names(u'__getattribute__')) return inst.execute_function_slots(names, name) def _names_to_types(self, names, attribute_lookup): @@ -248,8 +269,7 @@ def _check_isinstance_type(context, element, search_name): context_set = ContextSet() for cls_or_tup in lazy_context_cls.infer(): - if isinstance(cls_or_tup, iterable.AbstractIterable) and \ - cls_or_tup.array_type == 'tuple': + if isinstance(cls_or_tup, iterable.Sequence) and cls_or_tup.array_type == 'tuple': for lazy_context in cls_or_tup.py__iter__(): for context in lazy_context.infer(): context_set |= context.execute_evaluated() diff --git a/pythonFiles/jedi/evaluate/flow_analysis.py b/pythonFiles/jedi/evaluate/flow_analysis.py index 670b7a71934c..946f5a2ecaf0 100644 --- a/pythonFiles/jedi/evaluate/flow_analysis.py +++ b/pythonFiles/jedi/evaluate/flow_analysis.py @@ -1,4 +1,5 @@ from jedi.parser_utils import get_flow_branch_keyword, is_scope, get_parent_scope +from jedi.evaluate.recursion import execution_allowed class Status(object): @@ -104,9 +105,13 @@ def _break_check(context, context_scope, flow_scope, node): def _check_if(context, node): - types = context.eval_node(node) - values = set(x.py__bool__() for x in types) - if len(values) == 1: - return Status.lookup_table[values.pop()] - else: - return UNSURE + with execution_allowed(context.evaluator, node) as allowed: + if not allowed: + return UNSURE + + types = context.eval_node(node) + values = set(x.py__bool__() for x in types) + if len(values) == 1: + return Status.lookup_table[values.pop()] + else: + return UNSURE diff --git a/pythonFiles/jedi/evaluate/helpers.py b/pythonFiles/jedi/evaluate/helpers.py index 3b21e01bda9e..c6226cde2b70 100644 --- a/pythonFiles/jedi/evaluate/helpers.py +++ b/pythonFiles/jedi/evaluate/helpers.py @@ -176,8 +176,6 @@ def get_module_names(module, all_scopes): @contextmanager def predefine_names(context, flow_scope, dct): predefined = context.predefined_names - if flow_scope in predefined: - raise NotImplementedError('Why does this happen?') predefined[flow_scope] = dct try: yield @@ -190,12 +188,27 @@ def is_compiled(context): def is_string(context): - return is_compiled(context) and isinstance(context.obj, (str, unicode)) + if context.evaluator.environment.version_info.major == 2: + str_classes = (unicode, bytes) + else: + str_classes = (unicode,) + return is_compiled(context) and isinstance(context.get_safe_value(default=None), str_classes) def is_literal(context): return is_number(context) or is_string(context) +def _get_safe_value_or_none(context, accept): + if is_compiled(context): + value = context.get_safe_value(default=None) + if isinstance(value, accept): + return value + + +def get_int_or_none(context): + return _get_safe_value_or_none(context, int) + + def is_number(context): - return is_compiled(context) and isinstance(context.obj, (int, float)) + return _get_safe_value_or_none(context, (int, float)) is not None diff --git a/pythonFiles/jedi/evaluate/imports.py b/pythonFiles/jedi/evaluate/imports.py index ecf656b1a676..bcd3bdc74a4d 100644 --- a/pythonFiles/jedi/evaluate/imports.py +++ b/pythonFiles/jedi/evaluate/imports.py @@ -9,31 +9,48 @@ correct implementation is delegated to _compatibility. This module also supports import autocompletion, which means to complete -statements like ``from datetim`` (curser at the end would return ``datetime``). +statements like ``from datetim`` (cursor at the end would return ``datetime``). """ -import imp import os -import pkgutil -import sys from parso.python import tree from parso.tree import search_ancestor -from parso.cache import parser_cache from parso import python_bytes_to_unicode -from jedi._compatibility import find_module, unicode, ImplicitNSInfo +from jedi._compatibility import unicode, ImplicitNSInfo, force_unicode from jedi import debug from jedi import settings +from jedi.parser_utils import get_cached_code_lines from jedi.evaluate import sys_path from jedi.evaluate import helpers from jedi.evaluate import compiled from jedi.evaluate import analysis -from jedi.evaluate.utils import unite +from jedi.evaluate.utils import unite, dotted_from_fs_path from jedi.evaluate.cache import evaluator_method_cache from jedi.evaluate.filters import AbstractNameDefinition from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS +class ModuleCache(object): + def __init__(self): + self._path_cache = {} + self._name_cache = {} + + def add(self, module, name): + path = module.py__file__() + self._path_cache[path] = module + self._name_cache[name] = module + + def iterate_modules_with_names(self): + return self._name_cache.items() + + def get(self, name): + return self._name_cache[name] + + def get_from_path(self, path): + return self._path_cache[path] + + # This memoization is needed, because otherwise we will infinitely loop on # certain imports. @evaluator_method_cache(default=NO_CONTEXTS) @@ -130,20 +147,13 @@ def __repr__(self): def _add_error(context, name, message=None): # Should be a name, not a string! + if message is None: + name_str = str(name.value) if isinstance(name, tree.Name) else name + message = 'No module named ' + name_str if hasattr(name, 'parent'): analysis.add(context, 'import-error', name, message) - - -def get_init_path(directory_path): - """ - The __init__ file can be searched in a directory. If found return it, else - None. - """ - for suffix, _, _ in imp.get_suffixes(): - path = os.path.join(directory_path, '__init__' + suffix) - if os.path.exists(path): - return path - return None + else: + debug.warning('ImportError without origin: ' + message) class ImportName(AbstractNameDefinition): @@ -204,7 +214,7 @@ def __init__(self, evaluator, import_path, module_context, level=0): if level: base = module_context.py__package__().split('.') - if base == ['']: + if base == [''] or base == ['__main__']: base = [] if level > len(base): path = module_context.py__file__() @@ -226,10 +236,11 @@ def __init__(self, evaluator, import_path, module_context, level=0): else: import_path.insert(0, dir_name) else: - _add_error(module_context, import_path[-1]) + _add_error( + module_context, import_path[-1], + message='Attempted relative import beyond top-level package.' + ) import_path = [] - # TODO add import error. - debug.warning('Attempted relative import beyond top-level package.') # If no path is defined in the module we have no ideas where we # are in the file system. Therefore we cannot know what to do. # In this case we just let the path there and ignore that it's @@ -248,27 +259,19 @@ def str_import_path(self): """Returns the import path as pure strings instead of `Name`.""" return tuple( name.value if isinstance(name, tree.Name) else name - for name in self.import_path) + for name in self.import_path + ) def sys_path_with_modifications(self): - in_path = [] - sys_path_mod = self._evaluator.project.sys_path \ + sys_path_mod = self._evaluator.get_sys_path() \ + sys_path.check_sys_path_modifications(self.module_context) - if self.file_path is not None: - # If you edit e.g. gunicorn, there will be imports like this: - # `from gunicorn import something`. But gunicorn is not in the - # sys.path. Therefore look if gunicorn is a parent directory, #56. - if self.import_path: # TODO is this check really needed? - for path in sys_path.traverse_parents(self.file_path): - if os.path.basename(path) == self.str_import_path[0]: - in_path.append(os.path.dirname(path)) - - # Since we know nothing about the call location of the sys.path, - # it's a possibility that the current directory is the origin of - # the Python execution. - sys_path_mod.insert(0, os.path.dirname(self.file_path)) - - return in_path + sys_path_mod + + if self.import_path and self.file_path is not None \ + and self._evaluator.environment.version_info.major == 2: + # Python2 uses an old strange way of importing relative imports. + sys_path_mod.append(force_unicode(os.path.dirname(self.file_path))) + + return sys_path_mod def follow(self): if not self.import_path: @@ -280,7 +283,7 @@ def _do_import(self, import_path, sys_path): This method is very similar to importlib's `_gcd_import`. """ import_parts = [ - i.value if isinstance(i, tree.Name) else i + force_unicode(i.value if isinstance(i, tree.Name) else i) for i in import_path ] @@ -298,7 +301,7 @@ def _do_import(self, import_path, sys_path): module_name = '.'.join(import_parts) try: - return ContextSet(self._evaluator.modules[module_name]) + return ContextSet(self._evaluator.module_cache.get(module_name)) except KeyError: pass @@ -332,62 +335,43 @@ def _do_import(self, import_path, sys_path): for path in paths: # At the moment we are only using one path. So this is # not important to be correct. - try: - if not isinstance(path, list): - path = [path] - module_file, module_path, is_pkg = \ - find_module(import_parts[-1], path, fullname=module_name) + if not isinstance(path, list): + path = [path] + code, module_path, is_pkg = self._evaluator.compiled_subprocess.get_module_info( + string=import_parts[-1], + path=path, + full_name=module_name + ) + if module_path is not None: break - except ImportError: - module_path = None - if module_path is None: + else: _add_error(self.module_context, import_path[-1]) return NO_CONTEXTS else: - parent_module = None - try: - debug.dbg('search_module %s in %s', import_parts[-1], self.file_path) - # Override the sys.path. It works only good that way. - # Injecting the path directly into `find_module` did not work. - sys.path, temp = sys_path, sys.path - try: - module_file, module_path, is_pkg = \ - find_module(import_parts[-1], fullname=module_name) - finally: - sys.path = temp - except ImportError: + debug.dbg('search_module %s in %s', import_parts[-1], self.file_path) + # Override the sys.path. It works only good that way. + # Injecting the path directly into `find_module` did not work. + code, module_path, is_pkg = self._evaluator.compiled_subprocess.get_module_info( + string=import_parts[-1], + full_name=module_name, + sys_path=sys_path, + ) + if module_path is None: # The module is not a package. _add_error(self.module_context, import_path[-1]) return NO_CONTEXTS - code = None - if is_pkg: - # In this case, we don't have a file yet. Search for the - # __init__ file. - if module_path.endswith(('.zip', '.egg')): - code = module_file.loader.get_source(module_name) - else: - module_path = get_init_path(module_path) - elif module_file: - code = module_file.read() - module_file.close() - - if isinstance(module_path, ImplicitNSInfo): - from jedi.evaluate.context.namespace import ImplicitNamespaceContext - fullname, paths = module_path.name, module_path.paths - module = ImplicitNamespaceContext(self._evaluator, fullname=fullname) - module.paths = paths - elif module_file is None and not module_path.endswith(('.py', '.zip', '.egg')): - module = compiled.load_module(self._evaluator, module_path) - else: - module = _load_module(self._evaluator, module_path, code, sys_path, parent_module) + module = _load_module( + self._evaluator, module_path, code, sys_path, + module_name=module_name, + safe_module_name=True, + ) if module is None: # The file might raise an ImportError e.g. and therefore not be # importable. return NO_CONTEXTS - self._evaluator.modules[module_name] = module return ContextSet(module) def _generate_name(self, name, in_module=None): @@ -401,15 +385,17 @@ def _get_module_names(self, search_path=None, in_module=None): Get the names of all modules in the search_path. This means file names and not names defined in the files. """ + sub = self._evaluator.compiled_subprocess names = [] # add builtin module names if search_path is None and in_module is None: - names += [self._generate_name(name) for name in sys.builtin_module_names] + names += [self._generate_name(name) for name in sub.get_builtin_module_names()] if search_path is None: search_path = self.sys_path_with_modifications() - for module_loader, name, is_pkg in pkgutil.iter_modules(search_path): + + for name in sub.list_module_names(search_path): names.append(self._generate_name(name, in_module=in_module)) return names @@ -448,7 +434,7 @@ def completion_names(self, evaluator, only_modules=False): # implicit namespace packages elif isinstance(context, ImplicitNamespaceContext): paths = context.paths - names += self._get_module_names(paths) + names += self._get_module_names(paths, in_module=context) if only_modules: # In the case of an import like `from x.` we don't need to @@ -476,38 +462,65 @@ def completion_names(self, evaluator, only_modules=False): return names -def _load_module(evaluator, path=None, code=None, sys_path=None, parent_module=None): - if sys_path is None: - sys_path = evaluator.project.sys_path +def _load_module(evaluator, path=None, code=None, sys_path=None, + module_name=None, safe_module_name=False): + try: + return evaluator.module_cache.get(module_name) + except KeyError: + pass + try: + return evaluator.module_cache.get_from_path(path) + except KeyError: + pass - dotted_path = path and compiled.dotted_from_fs_path(path, sys_path) - if path is not None and path.endswith(('.py', '.zip', '.egg')) \ - and dotted_path not in settings.auto_import_modules: + if isinstance(path, ImplicitNSInfo): + from jedi.evaluate.context.namespace import ImplicitNamespaceContext + module = ImplicitNamespaceContext( + evaluator, + fullname=path.name, + paths=path.paths, + ) + else: + if sys_path is None: + sys_path = evaluator.get_sys_path() + + dotted_path = path and dotted_from_fs_path(path, sys_path) + if path is not None and path.endswith(('.py', '.zip', '.egg')) \ + and dotted_path not in settings.auto_import_modules: + + module_node = evaluator.parse( + code=code, path=path, cache=True, diff_cache=True, + cache_path=settings.cache_directory) + + from jedi.evaluate.context import ModuleContext + module = ModuleContext( + evaluator, module_node, + path=path, + code_lines=get_cached_code_lines(evaluator.grammar, path), + ) + else: + module = compiled.load_module(evaluator, path=path, sys_path=sys_path) - module_node = evaluator.grammar.parse( - code=code, path=path, cache=True, diff_cache=True, - cache_path=settings.cache_directory) + if module is not None and module_name is not None: + add_module_to_cache(evaluator, module_name, module, safe=safe_module_name) - from jedi.evaluate.context import ModuleContext - return ModuleContext(evaluator, module_node, path=path) - else: - return compiled.load_module(evaluator, path) + return module -def add_module(evaluator, module_name, module): - if '.' not in module_name: +def add_module_to_cache(evaluator, module_name, module, safe=False): + if not safe and '.' not in module_name: # We cannot add paths with dots, because that would collide with # the sepatator dots for nested packages. Therefore we return # `__main__` in ModuleWrapper.py__name__(), which is similar to # Python behavior. - evaluator.modules[module_name] = module + return + evaluator.module_cache.add(module, module_name) def get_modules_containing_name(evaluator, modules, name): """ Search a name in the directories of modules. """ - from jedi.evaluate.context import ModuleContext def check_directories(paths): for p in paths: if p is not None: @@ -519,28 +532,16 @@ def check_directories(paths): if file_name.endswith('.py'): yield path - def check_python_file(path): - try: - # TODO I don't think we should use the cache here?! - node_cache_item = parser_cache[evaluator.grammar._hashed][path] - except KeyError: - try: - return check_fs(path) - except IOError: - return None - else: - module_node = node_cache_item.node - return ModuleContext(evaluator, module_node, path=path) - def check_fs(path): with open(path, 'rb') as f: code = python_bytes_to_unicode(f.read(), errors='replace') if name in code: - module = _load_module(evaluator, path, code) - - module_name = sys_path.dotted_path_in_sys_path(evaluator.project.sys_path, path) - if module_name is not None: - add_module(evaluator, module_name, module) + e_sys_path = evaluator.get_sys_path() + module_name = sys_path.dotted_path_in_sys_path(e_sys_path, path) + module = _load_module( + evaluator, path, code, + sys_path=e_sys_path, module_name=module_name + ) return module # skip non python modules @@ -565,6 +566,6 @@ def check_fs(path): # Sort here to make issues less random. for p in sorted(paths): # make testing easier, sort it - same results on every interpreter - m = check_python_file(p) + m = check_fs(p) if m is not None and not isinstance(m, compiled.CompiledObject): yield m diff --git a/pythonFiles/jedi/evaluate/param.py b/pythonFiles/jedi/evaluate/param.py index a46394ce8a81..1445ef0c8ff6 100644 --- a/pythonFiles/jedi/evaluate/param.py +++ b/pythonFiles/jedi/evaluate/param.py @@ -97,7 +97,7 @@ def get_params(execution_context, var_args): var_arg_iterator.push_back((key, argument)) break lazy_context_list.append(argument) - seq = iterable.FakeSequence(execution_context.evaluator, 'tuple', lazy_context_list) + seq = iterable.FakeSequence(execution_context.evaluator, u'tuple', lazy_context_list) result_arg = LazyKnownContext(seq) elif param.star_count == 2: # **kwargs param @@ -176,7 +176,7 @@ def _error_argument_count(funcdef, actual_count): def _create_default_param(execution_context, param): if param.star_count == 1: result_arg = LazyKnownContext( - iterable.FakeSequence(execution_context.evaluator, 'tuple', []) + iterable.FakeSequence(execution_context.evaluator, u'tuple', []) ) elif param.star_count == 2: result_arg = LazyKnownContext( @@ -192,4 +192,3 @@ def _create_default_param(execution_context, param): def create_default_params(execution_context, funcdef): return [_create_default_param(execution_context, p) for p in funcdef.get_params()] - diff --git a/pythonFiles/jedi/evaluate/pep0484.py b/pythonFiles/jedi/evaluate/pep0484.py index 820f112c54e0..f23943e1a8a5 100644 --- a/pythonFiles/jedi/evaluate/pep0484.py +++ b/pythonFiles/jedi/evaluate/pep0484.py @@ -22,16 +22,17 @@ import os import re -from parso import ParserSyntaxError +from parso import ParserSyntaxError, parse, split_lines from parso.python import tree +from jedi._compatibility import unicode, force_unicode from jedi.evaluate.cache import evaluator_method_cache from jedi.evaluate import compiled from jedi.evaluate.base_context import NO_CONTEXTS, ContextSet from jedi.evaluate.lazy_context import LazyTreeContext from jedi.evaluate.context import ModuleContext +from jedi.evaluate.helpers import is_string from jedi import debug -from jedi import _compatibility from jedi import parser_utils @@ -41,17 +42,23 @@ def _evaluate_for_annotation(context, annotation, index=None): If index is not None, the annotation is expected to be a tuple and we're interested in that index """ - if annotation is not None: - context_set = context.eval_node(_fix_forward_reference(context, annotation)) - if index is not None: - context_set = context_set.filter( - lambda context: context.array_type == 'tuple' \ - and len(list(context.py__iter__())) >= index - ).py__getitem__(index) - return context_set.execute_evaluated() - else: + context_set = context.eval_node(_fix_forward_reference(context, annotation)) + return context_set.execute_evaluated() + + +def _evaluate_annotation_string(context, string, index=None): + node = _get_forward_reference_node(context, string) + if node is None: return NO_CONTEXTS + context_set = context.eval_node(node) + if index is not None: + context_set = context_set.filter( + lambda context: context.array_type == u'tuple' + and len(list(context.py__iter__())) >= index + ).py__getitem__(index) + return context_set.execute_evaluated() + def _fix_forward_reference(context, node): evaled_nodes = context.eval_node(node) @@ -59,30 +66,111 @@ def _fix_forward_reference(context, node): debug.warning("Eval'ed typing index %s should lead to 1 object, " " not %s" % (node, evaled_nodes)) return node - evaled_node = list(evaled_nodes)[0] - if isinstance(evaled_node, compiled.CompiledObject) and \ - isinstance(evaled_node.obj, str): - try: - new_node = context.evaluator.grammar.parse( - _compatibility.unicode(evaled_node.obj), - start_symbol='eval_input', - error_recovery=False - ) - except ParserSyntaxError: - debug.warning('Annotation not parsed: %s' % evaled_node.obj) - return node - else: - module = node.get_root_node() - parser_utils.move(new_node, module.end_pos[0]) - new_node.parent = context.tree_node - return new_node + + evaled_context = list(evaled_nodes)[0] + if is_string(evaled_context): + result = _get_forward_reference_node(context, evaled_context.get_safe_value()) + if result is not None: + return result + + return node + + +def _get_forward_reference_node(context, string): + try: + new_node = context.evaluator.grammar.parse( + force_unicode(string), + start_symbol='eval_input', + error_recovery=False + ) + except ParserSyntaxError: + debug.warning('Annotation not parsed: %s' % string) + return None else: - return node + module = context.tree_node.get_root_node() + parser_utils.move(new_node, module.end_pos[0]) + new_node.parent = context.tree_node + return new_node + + +def _split_comment_param_declaration(decl_text): + """ + Split decl_text on commas, but group generic expressions + together. + + For example, given "foo, Bar[baz, biz]" we return + ['foo', 'Bar[baz, biz]']. + + """ + try: + node = parse(decl_text, error_recovery=False).children[0] + except ParserSyntaxError: + debug.warning('Comment annotation is not valid Python: %s' % decl_text) + return [] + + if node.type == 'name': + return [node.get_code().strip()] + + params = [] + try: + children = node.children + except AttributeError: + return [] + else: + for child in children: + if child.type in ['name', 'atom_expr', 'power']: + params.append(child.get_code().strip()) + + return params @evaluator_method_cache() def infer_param(execution_context, param): + """ + Infers the type of a function parameter, using type annotations. + """ annotation = param.annotation + if annotation is None: + # If no Python 3-style annotation, look for a Python 2-style comment + # annotation. + # Identify parameters to function in the same sequence as they would + # appear in a type comment. + all_params = [child for child in param.parent.children + if child.type == 'param'] + + node = param.parent.parent + comment = parser_utils.get_following_comment_same_line(node) + if comment is None: + return NO_CONTEXTS + + match = re.match(r"^#\s*type:\s*\(([^#]*)\)\s*->", comment) + if not match: + return NO_CONTEXTS + params_comments = _split_comment_param_declaration(match.group(1)) + + # Find the specific param being investigated + index = all_params.index(param) + # If the number of parameters doesn't match length of type comment, + # ignore first parameter (assume it's self). + if len(params_comments) != len(all_params): + debug.warning( + "Comments length != Params length %s %s", + params_comments, all_params + ) + from jedi.evaluate.context.instance import BaseInstanceFunctionExecution + if isinstance(execution_context, BaseInstanceFunctionExecution): + if index == 0: + # Assume it's self, which is already handled + return NO_CONTEXTS + index -= 1 + if index >= len(params_comments): + return NO_CONTEXTS + + param_comment = params_comments[index] + return _evaluate_annotation_string( + execution_context.get_root_context(), + param_comment + ) module_context = execution_context.get_root_context() return _evaluate_for_annotation(module_context, annotation) @@ -102,12 +190,33 @@ def py__annotations__(funcdef): @evaluator_method_cache() def infer_return_types(function_context): + """ + Infers the type of a function's return value, + according to type annotations. + """ annotation = py__annotations__(function_context.tree_node).get("return", None) + if annotation is None: + # If there is no Python 3-type annotation, look for a Python 2-type annotation + node = function_context.tree_node + comment = parser_utils.get_following_comment_same_line(node) + if comment is None: + return NO_CONTEXTS + + match = re.match(r"^#\s*type:\s*\([^#]*\)\s*->\s*([^#]*)", comment) + if not match: + return NO_CONTEXTS + + return _evaluate_annotation_string( + function_context.get_root_context(), + match.group(1).strip() + ) + module_context = function_context.get_root_context() return _evaluate_for_annotation(module_context, annotation) _typing_module = None +_typing_module_code_lines = None def _get_typing_replacement_module(grammar): @@ -115,14 +224,15 @@ def _get_typing_replacement_module(grammar): The idea is to return our jedi replacement for the PEP-0484 typing module as discussed at https://github.com/davidhalter/jedi/issues/663 """ - global _typing_module + global _typing_module, _typing_module_code_lines if _typing_module is None: typing_path = \ os.path.abspath(os.path.join(__file__, "../jedi_typing.py")) with open(typing_path) as f: - code = _compatibility.unicode(f.read()) + code = unicode(f.read()) _typing_module = grammar.parse(code) - return _typing_module + _typing_module_code_lines = split_lines(code, keepends=True) + return _typing_module, _typing_module_code_lines def py__getitem__(context, typ, node): @@ -152,10 +262,12 @@ def py__getitem__(context, typ, node): # check for the instance typing._Optional (Python 3.6). return context.eval_node(nodes[0]) + module_node, code_lines = _get_typing_replacement_module(context.evaluator.latest_grammar) typing = ModuleContext( context.evaluator, - module_node=_get_typing_replacement_module(context.evaluator.latest_grammar), - path=None + module_node=module_node, + path=None, + code_lines=code_lines, ) factories = typing.py__getattribute__("factory") assert len(factories) == 1 @@ -167,12 +279,12 @@ def py__getitem__(context, typ, node): if isinstance(child, tree.Class)) if type_name not in valid_classnames: return None - compiled_classname = compiled.create(context.evaluator, type_name) + compiled_classname = compiled.create_simple_object(context.evaluator, type_name) from jedi.evaluate.context.iterable import FakeSequence args = FakeSequence( context.evaluator, - "tuple", + u'tuple', [LazyTreeContext(context, n) for n in nodes] ) @@ -213,10 +325,6 @@ def _find_type_from_comment_hint(context, node, varlist, name): if comment is None: return [] match = re.match(r"^#\s*type:\s*([^#]*)", comment) - if not match: + if match is None: return [] - annotation = tree.String( - repr(str(match.group(1).strip())), - node.start_pos) - annotation.parent = node.parent - return _evaluate_for_annotation(context, annotation, index) + return _evaluate_annotation_string(context, match.group(1).strip(), index) diff --git a/pythonFiles/jedi/evaluate/project.py b/pythonFiles/jedi/evaluate/project.py deleted file mode 100644 index b90f0f0c3be8..000000000000 --- a/pythonFiles/jedi/evaluate/project.py +++ /dev/null @@ -1,40 +0,0 @@ -import os -import sys - -from jedi.evaluate.sys_path import get_venv_path, detect_additional_paths -from jedi.cache import underscore_memoization - - -class Project(object): - def __init__(self, sys_path=None): - if sys_path is not None: - self._sys_path = sys_path - - venv = os.getenv('VIRTUAL_ENV') - if venv: - sys_path = get_venv_path(venv) - - if sys_path is None: - sys_path = sys.path - - base_sys_path = list(sys_path) - try: - base_sys_path.remove('') - except ValueError: - pass - - self._base_sys_path = base_sys_path - - def add_script_path(self, script_path): - self._script_path = script_path - - def add_evaluator(self, evaluator): - self._evaluator = evaluator - - @property - @underscore_memoization - def sys_path(self): - if self._script_path is None: - return self._base_sys_path - - return self._base_sys_path + detect_additional_paths(self._evaluator, self._script_path) diff --git a/pythonFiles/jedi/evaluate/recursion.py b/pythonFiles/jedi/evaluate/recursion.py index e2f34a4a9a06..5be3f8be4d29 100644 --- a/pythonFiles/jedi/evaluate/recursion.py +++ b/pythonFiles/jedi/evaluate/recursion.py @@ -49,6 +49,7 @@ A function may not be executed more than this number of times recursively. """ + class RecursionDetector(object): def __init__(self): self.pushed_nodes = [] @@ -67,9 +68,11 @@ def execution_allowed(evaluator, node): node.start_pos) yield False else: - pushed_nodes.append(node) - yield True - pushed_nodes.pop() + try: + pushed_nodes.append(node) + yield True + finally: + pushed_nodes.pop() def execution_recursion_decorator(default=NO_CONTEXTS): @@ -113,7 +116,7 @@ def push_execution(self, execution): self._parent_execution_funcs.append(funcdef) module = execution.get_root_context() - if module == self._evaluator.BUILTINS: + if module == self._evaluator.builtins_module: # We have control over builtins so we know they are not recursing # like crazy. Therefore we just let them execute always, because # they usually just help a lot with getting good results. diff --git a/pythonFiles/jedi/evaluate/site.py b/pythonFiles/jedi/evaluate/site.py deleted file mode 100644 index bf884faefaaf..000000000000 --- a/pythonFiles/jedi/evaluate/site.py +++ /dev/null @@ -1,110 +0,0 @@ -"""An adapted copy of relevant site-packages functionality from Python stdlib. - -This file contains some functions related to handling site-packages in Python -with jedi-specific modifications: - -- the functions operate on sys_path argument rather than global sys.path - -- in .pth files "import ..." lines that allow execution of arbitrary code are - skipped to prevent code injection into jedi interpreter - -""" - -# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, -# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved - -from __future__ import print_function - -import sys -import os - - -def makepath(*paths): - dir = os.path.join(*paths) - try: - dir = os.path.abspath(dir) - except OSError: - pass - return dir, os.path.normcase(dir) - - -def _init_pathinfo(sys_path): - """Return a set containing all existing directory entries from sys_path""" - d = set() - for dir in sys_path: - try: - if os.path.isdir(dir): - dir, dircase = makepath(dir) - d.add(dircase) - except TypeError: - continue - return d - - -def addpackage(sys_path, sitedir, name, known_paths): - """Process a .pth file within the site-packages directory: - For each line in the file, either combine it with sitedir to a path - and add that to known_paths, or execute it if it starts with 'import '. - """ - if known_paths is None: - known_paths = _init_pathinfo(sys_path) - reset = 1 - else: - reset = 0 - fullname = os.path.join(sitedir, name) - try: - f = open(fullname, "r") - except OSError: - return - with f: - for n, line in enumerate(f): - if line.startswith("#"): - continue - try: - if line.startswith(("import ", "import\t")): - # Change by immerrr: don't evaluate import lines to prevent - # code injection into jedi through pth files. - # - # exec(line) - continue - line = line.rstrip() - dir, dircase = makepath(sitedir, line) - if not dircase in known_paths and os.path.exists(dir): - sys_path.append(dir) - known_paths.add(dircase) - except Exception: - print("Error processing line {:d} of {}:\n".format(n+1, fullname), - file=sys.stderr) - import traceback - for record in traceback.format_exception(*sys.exc_info()): - for line in record.splitlines(): - print(' '+line, file=sys.stderr) - print("\nRemainder of file ignored", file=sys.stderr) - break - if reset: - known_paths = None - return known_paths - - -def addsitedir(sys_path, sitedir, known_paths=None): - """Add 'sitedir' argument to sys_path if missing and handle .pth files in - 'sitedir'""" - if known_paths is None: - known_paths = _init_pathinfo(sys_path) - reset = 1 - else: - reset = 0 - sitedir, sitedircase = makepath(sitedir) - if not sitedircase in known_paths: - sys_path.append(sitedir) # Add path component - known_paths.add(sitedircase) - try: - names = os.listdir(sitedir) - except OSError: - return - names = [name for name in names if name.endswith(".pth")] - for name in sorted(names): - addpackage(sys_path, sitedir, name, known_paths) - if reset: - known_paths = None - return known_paths diff --git a/pythonFiles/jedi/evaluate/stdlib.py b/pythonFiles/jedi/evaluate/stdlib.py index 259d692733db..06296d938183 100644 --- a/pythonFiles/jedi/evaluate/stdlib.py +++ b/pythonFiles/jedi/evaluate/stdlib.py @@ -9,9 +9,11 @@ the standard library. The usual way to understand the standard library is the compiled module that returns the types for C-builtins. """ -import collections import re +import parso + +from jedi._compatibility import force_unicode from jedi import debug from jedi.evaluate.arguments import ValuesArguments from jedi.evaluate import analysis @@ -57,7 +59,7 @@ def execute(evaluator, obj, arguments): except AttributeError: pass else: - if obj.parent_context == evaluator.BUILTINS: + if obj.parent_context == evaluator.builtins_module: module_name = 'builtins' elif isinstance(obj.parent_context, ModuleContext): module_name = obj.parent_context.name.string_name @@ -107,10 +109,11 @@ def argument_clinic(string, want_obj=False, want_context=False, want_arguments=F def f(func): def wrapper(evaluator, obj, arguments): debug.dbg('builtin start %s' % obj, color='MAGENTA') + result = NO_CONTEXTS try: lst = list(arguments.eval_argument_clinic(clinic_args)) except ValueError: - return NO_CONTEXTS + pass else: kwargs = {} if want_context: @@ -119,9 +122,10 @@ def wrapper(evaluator, obj, arguments): kwargs['obj'] = obj if want_arguments: kwargs['arguments'] = arguments - return func(evaluator, *lst, **kwargs) + result = func(evaluator, *lst, **kwargs) finally: - debug.dbg('builtin end', color='MAGENTA') + debug.dbg('builtin end: %s', result, color='MAGENTA') + return result return wrapper return f @@ -133,7 +137,7 @@ def builtins_next(evaluator, iterators, defaults): TODO this function is currently not used. It's a stab at implementing next in a different way than fake objects. This would be a bit more flexible. """ - if evaluator.python_version[0] == 2: + if evaluator.environment.version_info.major == 2: name = 'next' else: name = '__next__' @@ -157,7 +161,7 @@ def builtins_getattr(evaluator, objects, names, defaults=None): for obj in objects: for name in names: if is_string(name): - return obj.py__getattribute__(name.obj) + return obj.py__getattribute__(force_unicode(name.get_safe_value())) else: debug.warning('getattr called without str') continue @@ -207,22 +211,24 @@ def builtins_reversed(evaluator, sequences, obj, arguments): # necessary, because `reversed` is a function and autocompletion # would fail in certain cases like `reversed(x).__iter__` if we # just returned the result directly. - seq = iterable.FakeSequence(evaluator, 'list', rev) + seq = iterable.FakeSequence(evaluator, u'list', rev) arguments = ValuesArguments([ContextSet(seq)]) - return ContextSet(CompiledInstance(evaluator, evaluator.BUILTINS, obj, arguments)) + return ContextSet(CompiledInstance(evaluator, evaluator.builtins_module, obj, arguments)) @argument_clinic('obj, type, /', want_arguments=True) def builtins_isinstance(evaluator, objects, types, arguments): bool_results = set() for o in objects: + cls = o.py__class__() try: - mro_func = o.py__class__().py__mro__ + mro_func = cls.py__mro__ except AttributeError: # This is temporary. Everything should have a class attribute in # Python?! Maybe we'll leave it here, because some numpy objects or # whatever might not. - return ContextSet(compiled.create(True), compiled.create(False)) + bool_results = set([True, False]) + break mro = mro_func() @@ -230,7 +236,7 @@ def builtins_isinstance(evaluator, objects, types, arguments): if cls_or_tup.is_class(): bool_results.add(cls_or_tup in mro) elif cls_or_tup.name.string_name == 'tuple' \ - and cls_or_tup.get_root_context() == evaluator.BUILTINS: + and cls_or_tup.get_root_context() == evaluator.builtins_module: # Check for tuples. classes = ContextSet.from_sets( lazy_context.infer() @@ -246,7 +252,10 @@ def builtins_isinstance(evaluator, objects, types, arguments): 'not %s.' % cls_or_tup analysis.add(lazy_context._context, 'type-error-isinstance', node, message) - return ContextSet.from_iterable(compiled.create(evaluator, x) for x in bool_results) + return ContextSet.from_iterable( + compiled.builtin_from_name(evaluator, force_unicode(str(b))) + for b in bool_results + ) def collections_namedtuple(evaluator, obj, arguments): @@ -256,45 +265,54 @@ def collections_namedtuple(evaluator, obj, arguments): This has to be done by processing the namedtuple class template and evaluating the result. - .. note:: |jedi| only supports namedtuples on Python >2.6. - """ - # Namedtuples are not supported on Python 2.6 - if not hasattr(collections, '_class_template'): + collections_context = obj.parent_context + _class_template_set = collections_context.py__getattribute__(u'_class_template') + if not _class_template_set: + # Namedtuples are not supported on Python 2.6, early 2.7, because the + # _class_template variable is not defined, there. return NO_CONTEXTS # Process arguments # TODO here we only use one of the types, we should use all. - name = list(_follow_param(evaluator, arguments, 0))[0].obj + # TODO this is buggy, doesn't need to be a string + name = list(_follow_param(evaluator, arguments, 0))[0].get_safe_value() _fields = list(_follow_param(evaluator, arguments, 1))[0] if isinstance(_fields, compiled.CompiledObject): - fields = _fields.obj.replace(',', ' ').split() - elif isinstance(_fields, iterable.AbstractIterable): + fields = _fields.get_safe_value().replace(',', ' ').split() + elif isinstance(_fields, iterable.Sequence): fields = [ - v.obj + v.get_safe_value() for lazy_context in _fields.py__iter__() - for v in lazy_context.infer() if hasattr(v, 'obj') + for v in lazy_context.infer() if is_string(v) ] else: return NO_CONTEXTS - base = collections._class_template + def get_var(name): + x, = collections_context.py__getattribute__(name) + return x.get_safe_value() + + base = next(iter(_class_template_set)).get_safe_value() base += _NAMEDTUPLE_INIT - # Build source - source = base.format( + # Build source code + code = base.format( typename=name, field_names=tuple(fields), num_fields=len(fields), - arg_list = repr(tuple(fields)).replace("'", "")[1:-1], - repr_fmt=', '.join(collections._repr_template.format(name=name) for name in fields), - field_defs='\n'.join(collections._field_template.format(index=index, name=name) + arg_list=repr(tuple(fields)).replace("u'", "").replace("'", "")[1:-1], + repr_fmt=', '.join(get_var(u'_repr_template').format(name=name) for name in fields), + field_defs='\n'.join(get_var(u'_field_template').format(index=index, name=name) for index, name in enumerate(fields)) ) - # Parse source - module = evaluator.grammar.parse(source) + # Parse source code + module = evaluator.grammar.parse(code) generated_class = next(module.iter_classdefs()) - parent_context = ModuleContext(evaluator, module, '') + parent_context = ModuleContext( + evaluator, module, None, + code_lines=parso.split_lines(code, keepends=True), + ) return ContextSet(ClassContext(evaluator, parent_context, generated_class)) diff --git a/pythonFiles/jedi/evaluate/syntax_tree.py b/pythonFiles/jedi/evaluate/syntax_tree.py index 1d847a4960de..4efe845d476d 100644 --- a/pythonFiles/jedi/evaluate/syntax_tree.py +++ b/pythonFiles/jedi/evaluate/syntax_tree.py @@ -2,10 +2,10 @@ Functions evaluating the syntax tree. """ import copy -import operator as op from parso.python import tree +from jedi._compatibility import force_unicode, unicode from jedi import debug from jedi import parser_utils from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS, ContextualizedNode, \ @@ -17,11 +17,13 @@ from jedi.evaluate import analysis from jedi.evaluate import imports from jedi.evaluate import arguments +from jedi.evaluate.pep0484 import _evaluate_for_annotation from jedi.evaluate.context import ClassContext, FunctionContext from jedi.evaluate.context import iterable from jedi.evaluate.context import TreeInstance, CompiledInstance from jedi.evaluate.finder import NameFinder from jedi.evaluate.helpers import is_string, is_literal, is_number, is_compiled +from jedi.evaluate.compiled.access import COMPARISON_OPERATORS def _limit_context_infers(func): @@ -48,13 +50,25 @@ def wrapper(context, *args, **kwargs): return wrapper +def _py__stop_iteration_returns(generators): + results = ContextSet() + for generator in generators: + try: + method = generator.py__stop_iteration_returns + except AttributeError: + debug.warning('%s is not actually a generator', generator) + else: + results |= method() + return results + + @debug.increase_indent @_limit_context_infers def eval_node(context, element): - debug.dbg('eval_element %s@%s', element, element.start_pos) + debug.dbg('eval_node %s@%s', element, element.start_pos) evaluator = context.evaluator typ = element.type - if typ in ('name', 'number', 'string', 'atom'): + if typ in ('name', 'number', 'string', 'atom', 'strings'): return eval_atom(context, element) elif typ == 'keyword': # For False/True/None @@ -68,22 +82,33 @@ def eval_node(context, element): return eval_expr_stmt(context, element) elif typ in ('power', 'atom_expr'): first_child = element.children[0] - if not (first_child.type == 'keyword' and first_child.value == 'await'): - context_set = eval_atom(context, first_child) - for trailer in element.children[1:]: - if trailer == '**': # has a power operation. - right = evaluator.eval_element(context, element.children[2]) - context_set = _eval_comparison( - evaluator, - context, - context_set, - trailer, - right - ) - break - context_set = eval_trailer(context, context_set, trailer) - return context_set - return NO_CONTEXTS + children = element.children[1:] + had_await = False + if first_child.type == 'keyword' and first_child.value == 'await': + had_await = True + first_child = children.pop(0) + + context_set = eval_atom(context, first_child) + for trailer in children: + if trailer == '**': # has a power operation. + right = context.eval_node(children[1]) + context_set = _eval_comparison( + evaluator, + context, + context_set, + trailer, + right + ) + break + context_set = eval_trailer(context, context_set, trailer) + + if had_await: + await_context_set = context_set.py__getattribute__(u"__await__") + if not await_context_set: + debug.warning('Tried to run py__await__ on context %s', context) + context_set = ContextSet() + return _py__stop_iteration_returns(await_context_set.execute_evaluated()) + return context_set elif typ in ('testlist_star_expr', 'testlist',): # The implicit tuple in statements. return ContextSet(iterable.SequenceLiteralContext(evaluator, context, element)) @@ -100,8 +125,10 @@ def eval_node(context, element): # Must be an ellipsis, other operators are not evaluated. # In Python 2 ellipsis is coded as three single dot tokens, not # as one token 3 dot token. - assert element.value in ('.', '...') - return ContextSet(compiled.create(evaluator, Ellipsis)) + if element.value not in ('.', '...'): + origin = element.parent + raise AssertionError("unhandled operator %s in %s " % (repr(element.value), origin)) + return ContextSet(compiled.builtin_from_name(evaluator, u'Ellipsis')) elif typ == 'dotted_name': context_set = eval_atom(context, element.children[0]) for next_name in element.children[2::2]: @@ -112,6 +139,15 @@ def eval_node(context, element): return eval_node(context, element.children[0]) elif typ == 'annassign': return pep0484._evaluate_for_annotation(context, element.children[1]) + elif typ == 'yield_expr': + if len(element.children) and element.children[1].type == 'yield_arg': + # Implies that it's a yield from. + element = element.children[1].children[1] + generators = context.eval_node(element) + return _py__stop_iteration_returns(generators) + + # Generator.send() is not implemented. + return NO_CONTEXTS else: return eval_or_test(context, element) @@ -119,7 +155,7 @@ def eval_node(context, element): def eval_trailer(context, base_contexts, trailer): trailer_op, node = trailer.children[:2] if node == ')': # `arglist` is optional. - node = () + node = None if trailer_op == '[': trailer_op, node, _ = trailer.children @@ -148,7 +184,7 @@ def eval_trailer(context, base_contexts, trailer): name_or_str=node ) else: - assert trailer_op == '(' + assert trailer_op == '(', 'trailer_op is actually %s' % trailer_op args = arguments.TreeArguments(context.evaluator, context, node, trailer) return base_contexts.execute(args) @@ -173,19 +209,19 @@ def eval_atom(context, atom): ) elif isinstance(atom, tree.Literal): - string = parser_utils.safe_literal_eval(atom.value) - return ContextSet(compiled.create(context.evaluator, string)) + string = context.evaluator.compiled_subprocess.safe_literal_eval(atom.value) + return ContextSet(compiled.create_simple_object(context.evaluator, string)) + elif atom.type == 'strings': + # Will be multiple string. + context_set = eval_atom(context, atom.children[0]) + for string in atom.children[1:]: + right = eval_atom(context, string) + context_set = _eval_comparison(context.evaluator, context, context_set, u'+', right) + return context_set else: c = atom.children - if c[0].type == 'string': - # Will be one string. - context_set = eval_atom(context, c[0]) - for string in c[1:]: - right = eval_atom(context, string) - context_set = _eval_comparison(context.evaluator, context, context_set, '+', right) - return context_set # Parentheses without commas are not tuples. - elif c[0] == '(' and not len(c) == 2 \ + if c[0] == '(' and not len(c) == 2 \ and not(c[1].type == 'testlist_comp' and len(c[1].children) > 1): return context.eval_node(c[1]) @@ -203,7 +239,9 @@ def eval_atom(context, atom): pass if comp_for.type == 'comp_for': - return ContextSet(iterable.Comprehension.from_atom(context.evaluator, context, atom)) + return ContextSet(iterable.comprehension_from_atom( + context.evaluator, context, atom + )) # It's a dict/list/tuple literal. array_node = c[1] @@ -221,7 +259,21 @@ def eval_atom(context, atom): @_limit_context_infers def eval_expr_stmt(context, stmt, seek_name=None): with recursion.execution_allowed(context.evaluator, stmt) as allowed: - if allowed or context.get_root_context() == context.evaluator.BUILTINS: + # Here we allow list/set to recurse under certain conditions. To make + # it possible to resolve stuff like list(set(list(x))), this is + # necessary. + if not allowed and context.get_root_context() == context.evaluator.builtins_module: + try: + instance = context.instance + except AttributeError: + pass + else: + if instance.name.string_name in ('list', 'set'): + c = instance.get_first_non_keyword_argument_contexts() + if instance not in c: + allowed = True + + if allowed: return _eval_expr_stmt(context, stmt, seek_name) return NO_CONTEXTS @@ -286,16 +338,16 @@ def eval_or_test(context, or_test): # handle lazy evaluation of and/or here. if operator in ('and', 'or'): left_bools = set(left.py__bool__() for left in types) - if left_bools == set([True]): + if left_bools == {True}: if operator == 'and': types = context.eval_node(right) - elif left_bools == set([False]): + elif left_bools == {False}: if operator != 'and': types = context.eval_node(right) # Otherwise continue, because of uncertainty. else: types = _eval_comparison(context.evaluator, context, types, operator, - context.eval_node(right)) + context.eval_node(right)) debug.dbg('eval_or_test types %s', types) return types @@ -308,29 +360,16 @@ def eval_factor(context_set, operator): for context in context_set: if operator == '-': if is_number(context): - yield compiled.create(context.evaluator, -context.obj) + yield context.negate() elif operator == 'not': value = context.py__bool__() if value is None: # Uncertainty. return - yield compiled.create(context.evaluator, not value) + yield compiled.create_simple_object(context.evaluator, not value) else: yield context -# Maps Python syntax to the operator module. -COMPARISON_OPERATORS = { - '==': op.eq, - '!=': op.ne, - 'is': op.is_, - 'is not': op.is_not, - '<': op.lt, - '<=': op.le, - '>': op.gt, - '>=': op.ge, -} - - def _literals_to_types(evaluator, result): # Changes literals ('a', 1, 1.0, etc) to its type instances (str(), # int(), float(), etc). @@ -366,49 +405,59 @@ def _eval_comparison(evaluator, context, left_contexts, operator, right_contexts def _is_tuple(context): - return isinstance(context, iterable.AbstractIterable) and context.array_type == 'tuple' + return isinstance(context, iterable.Sequence) and context.array_type == 'tuple' def _is_list(context): - return isinstance(context, iterable.AbstractIterable) and context.array_type == 'list' + return isinstance(context, iterable.Sequence) and context.array_type == 'list' + + +def _bool_to_context(evaluator, bool_): + return compiled.builtin_from_name(evaluator, force_unicode(str(bool_))) def _eval_comparison_part(evaluator, context, left, operator, right): l_is_num = is_number(left) r_is_num = is_number(right) - if operator == '*': + if isinstance(operator, unicode): + str_operator = operator + else: + str_operator = force_unicode(str(operator.value)) + + if str_operator == '*': # for iterables, ignore * operations - if isinstance(left, iterable.AbstractIterable) or is_string(left): + if isinstance(left, iterable.Sequence) or is_string(left): return ContextSet(left) - elif isinstance(right, iterable.AbstractIterable) or is_string(right): + elif isinstance(right, iterable.Sequence) or is_string(right): return ContextSet(right) - elif operator == '+': + elif str_operator == '+': if l_is_num and r_is_num or is_string(left) and is_string(right): - return ContextSet(compiled.create(evaluator, left.obj + right.obj)) + return ContextSet(left.execute_operation(right, str_operator)) elif _is_tuple(left) and _is_tuple(right) or _is_list(left) and _is_list(right): return ContextSet(iterable.MergedArray(evaluator, (left, right))) - elif operator == '-': + elif str_operator == '-': if l_is_num and r_is_num: - return ContextSet(compiled.create(evaluator, left.obj - right.obj)) - elif operator == '%': + return ContextSet(left.execute_operation(right, str_operator)) + elif str_operator == '%': # With strings and numbers the left type typically remains. Except for # `int() % float()`. return ContextSet(left) - elif operator in COMPARISON_OPERATORS: - operation = COMPARISON_OPERATORS[operator] + elif str_operator in COMPARISON_OPERATORS: if is_compiled(left) and is_compiled(right): # Possible, because the return is not an option. Just compare. - left = left.obj - right = right.obj - - try: - result = operation(left, right) - except TypeError: - # Could be True or False. - return ContextSet(compiled.create(evaluator, True), compiled.create(evaluator, False)) + try: + return ContextSet(left.execute_operation(right, str_operator)) + except TypeError: + # Could be True or False. + pass else: - return ContextSet(compiled.create(evaluator, result)) - elif operator == 'in': + if str_operator in ('is', '!=', '==', 'is not'): + operation = COMPARISON_OPERATORS[str_operator] + bool_ = operation(left, right) + return ContextSet(_bool_to_context(evaluator, bool_)) + + return ContextSet(_bool_to_context(evaluator, True), _bool_to_context(evaluator, False)) + elif str_operator == 'in': return NO_CONTEXTS def check(obj): @@ -417,7 +466,7 @@ def check(obj): obj.name.string_name in ('int', 'float') # Static analysis, one is a number, the other one is not. - if operator in ('+', '-') and l_is_num != r_is_num \ + if str_operator in ('+', '-') and l_is_num != r_is_num \ and not (check(left) or check(right)): message = "TypeError: unsupported operand type(s) for +: %s and %s" analysis.add(context, 'type-error-operation', operator, @@ -442,6 +491,22 @@ def _remove_statements(evaluator, context, stmt, name): def tree_name_to_contexts(evaluator, context, tree_name): + + context_set = ContextSet() + module_node = context.get_root_context().tree_node + if module_node is not None: + names = module_node.get_used_names().get(tree_name.value, []) + for name in names: + expr_stmt = name.parent + + correct_scope = parser_utils.get_parent_scope(name) == context.tree_node + + if expr_stmt.type == "expr_stmt" and expr_stmt.children[1].type == "annassign" and correct_scope: + context_set |= _evaluate_for_annotation(context, expr_stmt.children[1].children[1]) + + if context_set: + return context_set + types = [] node = tree_name.get_definition(import_name_always=True) if node is None: @@ -455,7 +520,7 @@ def tree_name_to_contexts(evaluator, context, tree_name): filters = [next(filters)] return finder.find(filters, attribute_lookup=False) elif node.type not in ('import_from', 'import_name'): - raise ValueError("Should not happen.") + raise ValueError("Should not happen. type: %s", node.type) typ = node.type if typ == 'for_stmt': @@ -472,14 +537,18 @@ def tree_name_to_contexts(evaluator, context, tree_name): types = context.predefined_names[node][tree_name.value] except KeyError: cn = ContextualizedNode(context, node.children[3]) - for_types = iterate_contexts(cn.infer(), cn) + for_types = iterate_contexts( + cn.infer(), + contextualized_node=cn, + is_async=node.parent.type == 'async_stmt', + ) c_node = ContextualizedName(context, tree_name) types = check_tuple_assignments(evaluator, c_node, for_types) elif typ == 'expr_stmt': types = _remove_statements(evaluator, context, node, tree_name) elif typ == 'with_stmt': context_managers = context.eval_node(node.get_test_node_from_name(tree_name)) - enter_methods = context_managers.py__getattribute__('__enter__') + enter_methods = context_managers.py__getattribute__(u'__enter__') return enter_methods.execute_evaluated() elif typ in ('import_from', 'import_name'): types = imports.infer_import(context, tree_name) @@ -492,7 +561,7 @@ def tree_name_to_contexts(evaluator, context, tree_name): exceptions = context.eval_node(tree_name.get_previous_sibling().get_previous_sibling()) types = exceptions.execute_evaluated() else: - raise ValueError("Should not happen.") + raise ValueError("Should not happen. type: %s" % typ) return types @@ -583,6 +652,8 @@ def eval_subscript_list(evaluator, context, index): result += [None] * (3 - len(result)) return ContextSet(iterable.Slice(context, *result)) + elif index.type == 'subscriptlist': + return NO_CONTEXTS # No slices return context.eval_node(index) diff --git a/pythonFiles/jedi/evaluate/sys_path.py b/pythonFiles/jedi/evaluate/sys_path.py index 82e5e9df9ceb..d765a6653c02 100644 --- a/pythonFiles/jedi/evaluate/sys_path.py +++ b/pythonFiles/jedi/evaluate/sys_path.py @@ -1,85 +1,27 @@ -import glob import os -import sys -import imp -from jedi.evaluate.site import addsitedir -from jedi._compatibility import unicode +from jedi._compatibility import unicode, force_unicode, all_suffixes from jedi.evaluate.cache import evaluator_method_cache from jedi.evaluate.base_context import ContextualizedNode from jedi.evaluate.helpers import is_string +from jedi.common.utils import traverse_parents +from jedi.parser_utils import get_cached_code_lines from jedi import settings from jedi import debug -from jedi.evaluate.utils import ignored - - -def get_venv_path(venv): - """Get sys.path for specified virtual environment.""" - sys_path = _get_venv_path_dirs(venv) - with ignored(ValueError): - sys_path.remove('') - sys_path = _get_sys_path_with_egglinks(sys_path) - # As of now, get_venv_path_dirs does not scan built-in pythonpath and - # user-local site-packages, let's approximate them using path from Jedi - # interpreter. - return sys_path + sys.path - - -def _get_sys_path_with_egglinks(sys_path): - """Find all paths including those referenced by egg-links. - - Egg-link-referenced directories are inserted into path immediately before - the directory on which their links were found. Such directories are not - taken into consideration by normal import mechanism, but they are traversed - when doing pkg_resources.require. - """ - result = [] - for p in sys_path: - # pkg_resources does not define a specific order for egg-link files - # using os.listdir to enumerate them, we're sorting them to have - # reproducible tests. - for egg_link in sorted(glob.glob(os.path.join(p, '*.egg-link'))): - with open(egg_link) as fd: - for line in fd: - line = line.strip() - if line: - result.append(os.path.join(p, line)) - # pkg_resources package only interprets the first - # non-empty line in egg-link files. - break - result.append(p) - return result - - -def _get_venv_path_dirs(venv): - """Get sys.path for venv without starting up the interpreter.""" - venv = os.path.abspath(venv) - sitedir = _get_venv_sitepackages(venv) - sys_path = [] - addsitedir(sys_path, sitedir) - return sys_path - - -def _get_venv_sitepackages(venv): - if os.name == 'nt': - p = os.path.join(venv, 'lib', 'site-packages') - else: - p = os.path.join(venv, 'lib', 'python%d.%d' % sys.version_info[:2], - 'site-packages') - return p def _abs_path(module_context, path): - module_path = module_context.py__file__() if os.path.isabs(path): return path + module_path = module_context.py__file__() if module_path is None: # In this case we have no idea where we actually are in the file # system. return None base_dir = os.path.dirname(module_path) + path = force_unicode(path) return os.path.abspath(os.path.join(base_dir, path)) @@ -87,7 +29,7 @@ def _paths_from_assignment(module_context, expr_stmt): """ Extracts the assigned strings from an assignment that looks as follows:: - >>> sys.path[0:0] = ['module/path', 'another/module/path'] + sys.path[0:0] = ['module/path', 'another/module/path'] This function is in general pretty tolerant (and therefore 'buggy'). However, it's not a big issue usually to add more paths to Jedi's sys_path, @@ -121,7 +63,7 @@ def _paths_from_assignment(module_context, expr_stmt): for lazy_context in cn.infer().iterate(cn): for context in lazy_context.infer(): if is_string(context): - abs_path = _abs_path(module_context, context.obj) + abs_path = _abs_path(module_context, context.get_safe_value()) if abs_path is not None: yield abs_path @@ -144,7 +86,7 @@ def _paths_from_list_modifications(module_context, trailer1, trailer2): for context in module_context.create_context(arg).eval_node(arg): if is_string(context): - abs_path = _abs_path(module_context, context.obj) + abs_path = _abs_path(module_context, context.get_safe_value()) if abs_path is not None: yield abs_path @@ -187,24 +129,19 @@ def get_sys_path_powers(names): return added -def sys_path_with_modifications(evaluator, module_context): - return evaluator.project.sys_path + check_sys_path_modifications(module_context) - - -def detect_additional_paths(evaluator, script_path): - django_paths = _detect_django_path(script_path) +def discover_buildout_paths(evaluator, script_path): buildout_script_paths = set() for buildout_script_path in _get_buildout_script_paths(script_path): for path in _get_paths_from_buildout_script(evaluator, buildout_script_path): buildout_script_paths.add(path) - return django_paths + list(buildout_script_paths) + return buildout_script_paths def _get_paths_from_buildout_script(evaluator, buildout_script_path): try: - module_node = evaluator.grammar.parse( + module_node = evaluator.parse( path=buildout_script_path, cache=True, cache_path=settings.cache_directory @@ -214,20 +151,14 @@ def _get_paths_from_buildout_script(evaluator, buildout_script_path): return from jedi.evaluate.context import ModuleContext - module = ModuleContext(evaluator, module_node, buildout_script_path) + module = ModuleContext( + evaluator, module_node, buildout_script_path, + code_lines=get_cached_code_lines(evaluator.grammar, buildout_script_path), + ) for path in check_sys_path_modifications(module): yield path -def traverse_parents(path): - while True: - new = os.path.dirname(path) - if new == path: - return - path = new - yield path - - def _get_parent_dir_with_file(path, filename): for parent in traverse_parents(path): if os.path.isfile(os.path.join(parent, filename)): @@ -235,47 +166,34 @@ def _get_parent_dir_with_file(path, filename): return None -def _detect_django_path(module_path): - """ Detects the path of the very well known Django library (if used) """ - result = [] - - for parent in traverse_parents(module_path): - with ignored(IOError): - with open(parent + os.path.sep + 'manage.py'): - debug.dbg('Found django path: %s', module_path) - result.append(parent) - return result - - -def _get_buildout_script_paths(module_path): +def _get_buildout_script_paths(search_path): """ if there is a 'buildout.cfg' file in one of the parent directories of the given module it will return a list of all files in the buildout bin directory that look like python files. - :param module_path: absolute path to the module. - :type module_path: str + :param search_path: absolute path to the module. + :type search_path: str """ - project_root = _get_parent_dir_with_file(module_path, 'buildout.cfg') + project_root = _get_parent_dir_with_file(search_path, 'buildout.cfg') if not project_root: - return [] + return bin_path = os.path.join(project_root, 'bin') if not os.path.exists(bin_path): - return [] - extra_module_paths = [] + return + for filename in os.listdir(bin_path): try: filepath = os.path.join(bin_path, filename) with open(filepath, 'r') as f: firstline = f.readline() if firstline.startswith('#!') and 'python' in firstline: - extra_module_paths.append(filepath) + yield filepath except (UnicodeDecodeError, IOError) as e: - # Probably a binary file; permission error or race cond. because file got deleted - # ignore + # Probably a binary file; permission error or race cond. because + # file got deleted. Ignore it. debug.warning(unicode(e)) continue - return extra_module_paths def dotted_path_in_sys_path(sys_path, module_path): @@ -283,7 +201,7 @@ def dotted_path_in_sys_path(sys_path, module_path): Returns the dotted path inside a sys.path. """ # First remove the suffix. - for suffix, _, _ in imp.get_suffixes(): + for suffix in all_suffixes(): if module_path.endswith(suffix): module_path = module_path[:-len(suffix)] break diff --git a/pythonFiles/jedi/evaluate/utils.py b/pythonFiles/jedi/evaluate/utils.py index 7fc1c246de0d..e00e477441de 100644 --- a/pythonFiles/jedi/evaluate/utils.py +++ b/pythonFiles/jedi/evaluate/utils.py @@ -2,10 +2,19 @@ import sys import contextlib import functools +import re +import os from jedi._compatibility import reraise +_sep = os.path.sep +if os.path.altsep is not None: + _sep += os.path.altsep +_path_re = re.compile('(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep))) +del _sep + + def to_list(func): def wrapper(*args, **kwargs): return list(func(*args, **kwargs)) @@ -108,3 +117,38 @@ def indent_block(text, indention=' '): text = text[:-1] lines = text.split('\n') return '\n'.join(map(lambda s: indention + s, lines)) + temp + + +def dotted_from_fs_path(fs_path, sys_path): + """ + Changes `/usr/lib/python3.4/email/utils.py` to `email.utils`. I.e. + compares the path with sys.path and then returns the dotted_path. If the + path is not in the sys.path, just returns None. + """ + if os.path.basename(fs_path).startswith('__init__.'): + # We are calculating the path. __init__ files are not interesting. + fs_path = os.path.dirname(fs_path) + + # prefer + # - UNIX + # /path/to/pythonX.Y/lib-dynload + # /path/to/pythonX.Y/site-packages + # - Windows + # C:\path\to\DLLs + # C:\path\to\Lib\site-packages + # over + # - UNIX + # /path/to/pythonX.Y + # - Windows + # C:\path\to\Lib + path = '' + for s in sys_path: + if (fs_path.startswith(s) and len(path) < len(s)): + path = s + + # - Window + # X:\path\to\lib-dynload/datetime.pyd => datetime + module_path = fs_path[len(path):].lstrip(os.path.sep).lstrip('/') + # - Window + # Replace like X:\path\to\something/foo/bar.py + return _path_re.sub('', module_path).replace(os.path.sep, '.').replace('/', '.') diff --git a/pythonFiles/jedi/parser_utils.py b/pythonFiles/jedi/parser_utils.py index 59c6408ea1c6..e630265314e4 100644 --- a/pythonFiles/jedi/parser_utils.py +++ b/pythonFiles/jedi/parser_utils.py @@ -1,14 +1,15 @@ import textwrap from inspect import cleandoc -from jedi._compatibility import literal_eval, is_py3 from parso.python import tree +from parso.cache import parser_cache -_EXECUTE_NODES = set([ - 'funcdef', 'classdef', 'import_from', 'import_name', 'test', 'or_test', - 'and_test', 'not_test', 'comparison', 'expr', 'xor_expr', 'and_expr', - 'shift_expr', 'arith_expr', 'atom_expr', 'term', 'factor', 'power', 'atom' -]) +from jedi._compatibility import literal_eval, force_unicode + +_EXECUTE_NODES = {'funcdef', 'classdef', 'import_from', 'import_name', 'test', + 'or_test', 'and_test', 'not_test', 'comparison', 'expr', + 'xor_expr', 'and_expr', 'shift_expr', 'arith_expr', + 'atom_expr', 'term', 'factor', 'power', 'atom'} _FLOW_KEYWORDS = ( 'try', 'except', 'finally', 'else', 'if', 'elif', 'with', 'for', 'while' @@ -112,10 +113,7 @@ def clean_scope_docstring(scope_node): cleaned = cleandoc(safe_literal_eval(node.value)) # Since we want the docstr output to be always unicode, just # force it. - if is_py3 or isinstance(cleaned, unicode): - return cleaned - else: - return unicode(cleaned, 'UTF-8', 'replace') + return force_unicode(cleaned) return '' @@ -205,6 +203,9 @@ def get_following_comment_same_line(node): whitespace = node.children[5].get_first_leaf().prefix elif node.type == 'with_stmt': whitespace = node.children[3].get_first_leaf().prefix + elif node.type == 'funcdef': + # actually on the next line + whitespace = node.children[4].get_first_leaf().get_next_leaf().prefix else: whitespace = node.get_last_leaf().get_next_leaf().prefix except AttributeError: @@ -239,3 +240,11 @@ def get_parent_scope(node, include_flows=False): break scope = scope.parent return scope + + +def get_cached_code_lines(grammar, path): + """ + Basically access the cached code lines in parso. This is not the nicest way + to do this, but we avoid splitting all the lines again. + """ + return parser_cache[grammar._hashed][path].lines diff --git a/pythonFiles/jedi/refactoring.py b/pythonFiles/jedi/refactoring.py index ee938427fcee..6c1d74d1bdb8 100644 --- a/pythonFiles/jedi/refactoring.py +++ b/pythonFiles/jedi/refactoring.py @@ -1,11 +1,14 @@ """ +THIS is not in active development, please check +https://github.com/davidhalter/jedi/issues/667 first before editing. + Introduce some basic refactoring functions to |jedi|. This module is still in a very early development stage and needs much testing and improvement. .. warning:: I won't do too much here, but if anyone wants to step in, please do. Refactoring is none of my priorities -It uses the |jedi| `API `_ and supports currently the +It uses the |jedi| `API `_ and supports currently the following functions (sometimes bug-prone): - rename @@ -50,9 +53,8 @@ def diff(self): def rename(script, new_name): """ The `args` / `kwargs` params are the same as in `api.Script`. - :param operation: The refactoring operation to execute. - :type operation: str - :type source: str + :param new_name: The new name of the script. + :param script: The source Script object. :return: list of changed lines/changed files """ return Refactoring(_rename(script.usages(), new_name)) @@ -105,11 +107,12 @@ def extract(script, new_name): user_stmt = script._parser.user_stmt() - # TODO care for multiline extracts + # TODO care for multi-line extracts dct = {} if user_stmt: pos = script._pos line_index = pos[0] - 1 + # Be careful here. 'array_for_pos' does not exist in 'helpers'. arr, index = helpers.array_for_pos(user_stmt, pos) if arr is not None: start_pos = arr[index].start_pos @@ -120,7 +123,7 @@ def extract(script, new_name): start_line = new_lines[start_pos[0] - 1] text = start_line[start_pos[1]:e] for l in range(start_pos[0], end_pos[0] - 1): - text += '\n' + l + text += '\n' + str(l) if e is None: end_line = new_lines[end_pos[0] - 1] text += '\n' + end_line[:end_pos[1]] @@ -140,7 +143,7 @@ def extract(script, new_name): new_lines[start_pos[0] - 1] = start_line new_lines[start_pos[0]:end_pos[0] - 1] = [] - # add parentheses in multiline case + # add parentheses in multi-line case open_brackets = ['(', '[', '{'] close_brackets = [')', ']', '}'] if '\n' in text and not (text[0] in open_brackets and text[-1] == @@ -172,7 +175,7 @@ def inline(script): inlines = sorted(inlines, key=lambda x: (x.module_path, x.line, x.column), reverse=True) expression_list = stmt.expression_list() - # don't allow multiline refactorings for now. + # don't allow multi-line refactorings for now. assert stmt.start_pos[0] == stmt.end_pos[0] index = stmt.start_pos[0] - 1 diff --git a/pythonFiles/jedi/utils.py b/pythonFiles/jedi/utils.py index 177524c50168..0f42e7d55858 100644 --- a/pythonFiles/jedi/utils.py +++ b/pythonFiles/jedi/utils.py @@ -89,12 +89,13 @@ def complete(self, text, state): lines = split_lines(text) position = (len(lines), len(lines[-1])) name = get_on_completion_name( - interpreter._get_module_node(), + interpreter._module_node, lines, position ) before = text[:len(text) - len(name)] completions = interpreter.completions() + logging.debug("REPL completions: %s", completions) except: logging.error("REPL Completion error:\n" + traceback.format_exc()) raise @@ -108,6 +109,11 @@ def complete(self, text, state): return None try: + # Need to import this one as well to make sure it's executed before + # this code. This didn't use to be an issue until 3.3. Starting with + # 3.4 this is different, it always overwrites the completer if it's not + # already imported here. + import rlcompleter import readline except ImportError: print("Jedi: Module readline not available.") diff --git a/pythonFiles/parso/__init__.py b/pythonFiles/parso/__init__.py index f0a0fc4f5015..c4cce53ea690 100644 --- a/pythonFiles/parso/__init__.py +++ b/pythonFiles/parso/__init__.py @@ -43,7 +43,7 @@ from parso.utils import split_lines, python_bytes_to_unicode -__version__ = '0.1.1' +__version__ = '0.2.0' def parse(code=None, **kwargs): diff --git a/pythonFiles/parso/_compatibility.py b/pythonFiles/parso/_compatibility.py index 9ddf23dc6786..db411eebf981 100644 --- a/pythonFiles/parso/_compatibility.py +++ b/pythonFiles/parso/_compatibility.py @@ -36,7 +36,7 @@ def use_metaclass(meta, *bases): def u(string): """Cast to unicode DAMMIT! Written because Python2 repr always implicitly casts to a string, so we - have to cast back to a unicode (and we now that we always deal with valid + have to cast back to a unicode (and we know that we always deal with valid unicode, because we check that in the beginning). """ if py_version >= 30: diff --git a/pythonFiles/parso/grammar.py b/pythonFiles/parso/grammar.py index 2cf26d77fb27..c825b5554c0e 100644 --- a/pythonFiles/parso/grammar.py +++ b/pythonFiles/parso/grammar.py @@ -12,7 +12,6 @@ from parso.python.parser import Parser as PythonParser from parso.python.errors import ErrorFinderConfig from parso.python import pep8 -from parso.python import fstring _loaded_grammars = {} @@ -73,7 +72,7 @@ def parse(self, code=None, **kwargs): :py:class:`parso.python.tree.Module`. """ if 'start_pos' in kwargs: - raise TypeError("parse() got an unexpected keyworda argument.") + raise TypeError("parse() got an unexpected keyword argument.") return self._parse(code=code, **kwargs) def _parse(self, code=None, error_recovery=True, path=None, @@ -186,7 +185,6 @@ def _get_normalizer_issues(self, node, normalizer_config=None): normalizer.walk(node) return normalizer.issues - def __repr__(self): labels = self._pgen_grammar.number2symbol.values() txt = ' '.join(list(labels)[:3]) + ' ...' @@ -215,34 +213,6 @@ def _tokenize(self, code): return tokenize(code, self.version_info) -class PythonFStringGrammar(Grammar): - _token_namespace = fstring.TokenNamespace - _start_symbol = 'fstring' - - def __init__(self): - super(PythonFStringGrammar, self).__init__( - text=fstring.GRAMMAR, - tokenizer=fstring.tokenize, - parser=fstring.Parser - ) - - def parse(self, code, **kwargs): - return self._parse(code, **kwargs) - - def _parse(self, code, error_recovery=True, start_pos=(1, 0)): - tokens = self._tokenizer(code, start_pos=start_pos) - p = self._parser( - self._pgen_grammar, - error_recovery=error_recovery, - start_symbol=self._start_symbol, - ) - return p.parse(tokens=tokens) - - def parse_leaf(self, leaf, error_recovery=True): - code = leaf._get_payload() - return self.parse(code, error_recovery=True, start_pos=leaf.start_pos) - - def load_grammar(**kwargs): """ Loads a :py:class:`parso.Grammar`. The default version is the current Python @@ -273,10 +243,6 @@ def load_grammar(language='python', version=None): except FileNotFoundError: message = "Python version %s is currently not supported." % version raise NotImplementedError(message) - elif language == 'python-f-string': - if version is not None: - raise NotImplementedError("Currently different versions are not supported.") - return PythonFStringGrammar() else: raise NotImplementedError("No support for language %s." % language) diff --git a/pythonFiles/parso/pgen2/pgen.py b/pythonFiles/parso/pgen2/pgen.py index 10ef6ffd1532..a3e39fa5fe74 100644 --- a/pythonFiles/parso/pgen2/pgen.py +++ b/pythonFiles/parso/pgen2/pgen.py @@ -28,6 +28,7 @@ def make_grammar(self): c = grammar.Grammar(self._bnf_text) names = list(self.dfas.keys()) names.sort() + # TODO do we still need this? names.remove(self.startsymbol) names.insert(0, self.startsymbol) for name in names: @@ -316,8 +317,8 @@ def _parse_atom(self): def _expect(self, type): if self.type != type: - self._raise_error("expected %s, got %s(%s)", - type, self.type, self.value) + self._raise_error("expected %s(%s), got %s(%s)", + type, token.tok_name[type], self.type, self.value) value = self.value self._gettoken() return value diff --git a/pythonFiles/parso/python/diff.py b/pythonFiles/parso/python/diff.py index c2e44fd3cb21..96c6e5f2ca41 100644 --- a/pythonFiles/parso/python/diff.py +++ b/pythonFiles/parso/python/diff.py @@ -133,7 +133,7 @@ def update(self, old_lines, new_lines): LOG.debug('diff: line_lengths old: %s, new: %s' % (len(old_lines), line_length)) for operation, i1, i2, j1, j2 in opcodes: - LOG.debug('diff %s old[%s:%s] new[%s:%s]', + LOG.debug('diff code[%s] old[%s:%s] new[%s:%s]', operation, i1 + 1, i2, j1 + 1, j2) if j2 == line_length and new_lines[-1] == '': @@ -454,7 +454,7 @@ def _remove_endmarker(self, tree_nodes): self._last_prefix = '' if is_endmarker: try: - separation = last_leaf.prefix.rindex('\n') + separation = last_leaf.prefix.rindex('\n') + 1 except ValueError: pass else: @@ -462,7 +462,7 @@ def _remove_endmarker(self, tree_nodes): # That is not relevant if parentheses were opened. Always parse # until the end of a line. last_leaf.prefix, self._last_prefix = \ - last_leaf.prefix[:separation + 1], last_leaf.prefix[separation + 1:] + last_leaf.prefix[:separation], last_leaf.prefix[separation:] first_leaf = tree_nodes[0].get_first_leaf() first_leaf.prefix = self.prefix + first_leaf.prefix @@ -472,7 +472,6 @@ def _remove_endmarker(self, tree_nodes): self.prefix = last_leaf.prefix tree_nodes = tree_nodes[:-1] - return tree_nodes def copy_nodes(self, tree_nodes, until_line, line_offset): @@ -492,6 +491,13 @@ def _copy_nodes(self, tos, nodes, until_line, line_offset): new_tos = tos for node in nodes: if node.type == 'endmarker': + # We basically removed the endmarker, but we are not allowed to + # remove the newline at the end of the line, otherwise it's + # going to be missing. + try: + self.prefix = node.prefix[:node.prefix.rindex('\n') + 1] + except ValueError: + pass # Endmarkers just distort all the checks below. Remove them. break diff --git a/pythonFiles/parso/python/errors.py b/pythonFiles/parso/python/errors.py index 65296568b54c..cfb8380ea743 100644 --- a/pythonFiles/parso/python/errors.py +++ b/pythonFiles/parso/python/errors.py @@ -563,7 +563,8 @@ def is_issue(self, leaf): and self._normalizer.version == (3, 5): self.add_issue(self.get_node(leaf), message=self.message_async_yield) -@ErrorFinder.register_rule(type='atom') + +@ErrorFinder.register_rule(type='strings') class _BytesAndStringMix(SyntaxRule): # e.g. 's' b'' message = "cannot mix bytes and nonbytes literals" @@ -744,7 +745,12 @@ def is_issue(self, node): @ErrorFinder.register_rule(type='arglist') class _ArglistRule(SyntaxRule): - message = "Generator expression must be parenthesized if not sole argument" + @property + def message(self): + if self._normalizer.version < (3, 7): + return "Generator expression must be parenthesized if not sole argument" + else: + return "Generator expression must be parenthesized" def is_issue(self, node): first_arg = node.children[0] @@ -837,101 +843,36 @@ def is_issue(self, try_stmt): self.add_issue(default_except, message=self.message) -@ErrorFinder.register_rule(type='string') +@ErrorFinder.register_rule(type='fstring') class _FStringRule(SyntaxRule): _fstring_grammar = None - message_empty = "f-string: empty expression not allowed" # f'{}' - message_single_closing = "f-string: single '}' is not allowed" # f'}' message_nested = "f-string: expressions nested too deeply" - message_backslash = "f-string expression part cannot include a backslash" # f'{"\"}' or f'{"\\"}' - message_comment = "f-string expression part cannot include '#'" # f'{#}' - message_unterminated_string = "f-string: unterminated string" # f'{"}' message_conversion = "f-string: invalid conversion character: expected 's', 'r', or 'a'" - message_incomplete = "f-string: expecting '}'" # f'{' - message_syntax = "invalid syntax" - @classmethod - def _load_grammar(cls): - import parso + def _check_format_spec(self, format_spec, depth): + self._check_fstring_contents(format_spec.children[1:], depth) - if cls._fstring_grammar is None: - cls._fstring_grammar = parso.load_grammar(language='python-f-string') - return cls._fstring_grammar + def _check_fstring_expr(self, fstring_expr, depth): + if depth >= 2: + self.add_issue(fstring_expr, message=self.message_nested) - def is_issue(self, fstring): - if 'f' not in fstring.string_prefix.lower(): - return + conversion = fstring_expr.children[2] + if conversion.type == 'fstring_conversion': + name = conversion.children[1] + if name.value not in ('s', 'r', 'a'): + self.add_issue(name, message=self.message_conversion) - parsed = self._load_grammar().parse_leaf(fstring) - for child in parsed.children: - if child.type == 'expression': - self._check_expression(child) - elif child.type == 'error_node': - next_ = child.get_next_leaf() - if next_.type == 'error_leaf' and next_.original_type == 'unterminated_string': - self.add_issue(next_, message=self.message_unterminated_string) - # At this point nothing more is comming except the error - # leaf that we've already checked here. - break - self.add_issue(child, message=self.message_incomplete) - elif child.type == 'error_leaf': - self.add_issue(child, message=self.message_single_closing) - - def _check_python_expr(self, python_expr): - value = python_expr.value - if '\\' in value: - self.add_issue(python_expr, message=self.message_backslash) - return - if '#' in value: - self.add_issue(python_expr, message=self.message_comment) - return - if re.match('\s*$', value) is not None: - self.add_issue(python_expr, message=self.message_empty) - return - - # This is now nested parsing. We parsed the fstring and now - # we're parsing Python again. - try: - # CPython has a bit of a special ways to parse Python code within - # f-strings. It wraps the code in brackets to make sure that - # whitespace doesn't make problems (indentation/newlines). - # Just use that algorithm as well here and adapt start positions. - start_pos = python_expr.start_pos - start_pos = start_pos[0], start_pos[1] - 1 - eval_input = self._normalizer.grammar._parse( - '(%s)' % value, - start_symbol='eval_input', - start_pos=start_pos, - error_recovery=False - ) - except ParserSyntaxError as e: - self.add_issue(e.error_leaf, message=self.message_syntax) - return + format_spec = fstring_expr.children[-2] + if format_spec.type == 'fstring_format_spec': + self._check_format_spec(format_spec, depth + 1) - issues = self._normalizer.grammar.iter_errors(eval_input) - self._normalizer.issues += issues - - def _check_format_spec(self, format_spec): - for expression in format_spec.children[1:]: - nested_format_spec = expression.children[-2] - if nested_format_spec.type == 'format_spec': - if len(nested_format_spec.children) > 1: - self.add_issue( - nested_format_spec.children[1], - message=self.message_nested - ) - - self._check_expression(expression) + def is_issue(self, fstring): + self._check_fstring_contents(fstring.children[1:-1]) - def _check_expression(self, expression): - for c in expression.children: - if c.type == 'python_expr': - self._check_python_expr(c) - elif c.type == 'conversion': - if c.value not in ('s', 'r', 'a'): - self.add_issue(c, message=self.message_conversion) - elif c.type == 'format_spec': - self._check_format_spec(c) + def _check_fstring_contents(self, children, depth=0): + for fstring_content in children: + if fstring_content.type == 'fstring_expr': + self._check_fstring_expr(fstring_content, depth) class _CheckAssignmentRule(SyntaxRule): @@ -944,7 +885,7 @@ def _check_assignment(self, node, is_deletion=False): first, second = node.children[:2] error = _get_comprehension_type(node) if error is None: - if second.type in ('dictorsetmaker', 'string'): + if second.type == 'dictorsetmaker': error = 'literal' elif first in ('(', '['): if second.type == 'yield_expr': @@ -963,7 +904,7 @@ def _check_assignment(self, node, is_deletion=False): error = 'Ellipsis' elif type_ == 'comparison': error = 'comparison' - elif type_ in ('string', 'number'): + elif type_ in ('string', 'number', 'strings'): error = 'literal' elif type_ == 'yield_expr': # This one seems to be a slightly different warning in Python. diff --git a/pythonFiles/parso/python/fstring.py b/pythonFiles/parso/python/fstring.py deleted file mode 100644 index a8fe7b452df5..000000000000 --- a/pythonFiles/parso/python/fstring.py +++ /dev/null @@ -1,211 +0,0 @@ -import re - -from itertools import count -from parso.utils import PythonVersionInfo -from parso.utils import split_lines -from parso.python.tokenize import Token -from parso import parser -from parso.tree import TypedLeaf, ErrorNode, ErrorLeaf - -version36 = PythonVersionInfo(3, 6) - - -class TokenNamespace: - _c = count() - LBRACE = next(_c) - RBRACE = next(_c) - ENDMARKER = next(_c) - COLON = next(_c) - CONVERSION = next(_c) - PYTHON_EXPR = next(_c) - EXCLAMATION_MARK = next(_c) - UNTERMINATED_STRING = next(_c) - - token_map = dict((v, k) for k, v in locals().items() if not k.startswith('_')) - - @classmethod - def generate_token_id(cls, string): - if string == '{': - return cls.LBRACE - elif string == '}': - return cls.RBRACE - elif string == '!': - return cls.EXCLAMATION_MARK - elif string == ':': - return cls.COLON - return getattr(cls, string) - - -GRAMMAR = """ -fstring: expression* ENDMARKER -format_spec: ':' expression* -expression: '{' PYTHON_EXPR [ '!' CONVERSION ] [ format_spec ] '}' -""" - -_prefix = r'((?:[^{}]+)*)' -_expr = _prefix + r'(\{|\}|$)' -_in_expr = r'([^{}\[\]:"\'!]*)(.?)' -# There's only one conversion character allowed. But the rules have to be -# checked later anyway, so allow more here. This makes error recovery nicer. -_conversion = r'([^={}:]*)(.?)' - -_compiled_expr = re.compile(_expr) -_compiled_in_expr = re.compile(_in_expr) -_compiled_conversion = re.compile(_conversion) - - -def tokenize(code, start_pos=(1, 0)): - def add_to_pos(string): - lines = split_lines(string) - l = len(lines[-1]) - if len(lines) > 1: - start_pos[0] += len(lines) - 1 - start_pos[1] = l - else: - start_pos[1] += l - - def tok(value, type=None, prefix=''): - if type is None: - type = TokenNamespace.generate_token_id(value) - - add_to_pos(prefix) - token = Token(type, value, tuple(start_pos), prefix) - add_to_pos(value) - return token - - start = 0 - recursion_level = 0 - added_prefix = '' - start_pos = list(start_pos) - while True: - match = _compiled_expr.match(code, start) - prefix = added_prefix + match.group(1) - found = match.group(2) - start = match.end() - if not found: - # We're at the end. - break - - if found == '}': - if recursion_level == 0 and len(code) > start and code[start] == '}': - # This is a }} escape. - added_prefix = prefix + '}}' - start += 1 - continue - - recursion_level = max(0, recursion_level - 1) - yield tok(found, prefix=prefix) - added_prefix = '' - else: - assert found == '{' - if recursion_level == 0 and len(code) > start and code[start] == '{': - # This is a {{ escape. - added_prefix = prefix + '{{' - start += 1 - continue - - recursion_level += 1 - yield tok(found, prefix=prefix) - added_prefix = '' - - expression = '' - squared_count = 0 - curly_count = 0 - while True: - expr_match = _compiled_in_expr.match(code, start) - expression += expr_match.group(1) - found = expr_match.group(2) - start = expr_match.end() - - if found == '{': - curly_count += 1 - expression += found - elif found == '}' and curly_count > 0: - curly_count -= 1 - expression += found - elif found == '[': - squared_count += 1 - expression += found - elif found == ']': - # Use a max function here, because the Python code might - # just have syntax errors. - squared_count = max(0, squared_count - 1) - expression += found - elif found == ':' and (squared_count or curly_count): - expression += found - elif found in ('"', "'"): - search = found - if len(code) > start + 1 and \ - code[start] == found == code[start+1]: - search *= 3 - start += 2 - - index = code.find(search, start) - if index == -1: - yield tok(expression, type=TokenNamespace.PYTHON_EXPR) - yield tok( - found + code[start:], - type=TokenNamespace.UNTERMINATED_STRING, - ) - start = len(code) - break - expression += found + code[start:index+1] - start = index + 1 - elif found == '!' and len(code) > start and code[start] == '=': - # This is a python `!=` and not a conversion. - expression += found - else: - yield tok(expression, type=TokenNamespace.PYTHON_EXPR) - if found: - yield tok(found) - break - - if found == '!': - conversion_match = _compiled_conversion.match(code, start) - found = conversion_match.group(2) - start = conversion_match.end() - yield tok(conversion_match.group(1), type=TokenNamespace.CONVERSION) - if found: - yield tok(found) - if found == '}': - recursion_level -= 1 - - # We don't need to handle everything after ':', because that is - # basically new tokens. - - yield tok('', type=TokenNamespace.ENDMARKER, prefix=prefix) - - -class Parser(parser.BaseParser): - def parse(self, tokens): - node = super(Parser, self).parse(tokens) - if isinstance(node, self.default_leaf): # Is an endmarker. - # If there's no curly braces we get back a non-module. We always - # want an fstring. - node = self.default_node('fstring', [node]) - - return node - - def convert_leaf(self, pgen_grammar, type, value, prefix, start_pos): - # TODO this is so ugly. - leaf_type = TokenNamespace.token_map[type].lower() - return TypedLeaf(leaf_type, value, start_pos, prefix) - - def error_recovery(self, pgen_grammar, stack, arcs, typ, value, start_pos, prefix, - add_token_callback): - if not self._error_recovery: - return super(Parser, self).error_recovery( - pgen_grammar, stack, arcs, typ, value, start_pos, prefix, - add_token_callback - ) - - token_type = TokenNamespace.token_map[typ].lower() - if len(stack) == 1: - error_leaf = ErrorLeaf(token_type, value, start_pos, prefix) - stack[0][2][1].append(error_leaf) - else: - dfa, state, (type_, nodes) = stack[1] - stack[0][2][1].append(ErrorNode(nodes)) - stack[1:] = [] - - add_token_callback(typ, value, start_pos, prefix) diff --git a/pythonFiles/parso/python/grammar26.txt b/pythonFiles/parso/python/grammar26.txt index b972a41d6a4a..d9cede2e9da9 100644 --- a/pythonFiles/parso/python/grammar26.txt +++ b/pythonFiles/parso/python/grammar26.txt @@ -119,7 +119,8 @@ atom: ('(' [yield_expr|testlist_comp] ')' | '[' [listmaker] ']' | '{' [dictorsetmaker] '}' | '`' testlist1 '`' | - NAME | NUMBER | STRING+) + NAME | NUMBER | strings) +strings: STRING+ listmaker: test ( list_for | (',' test)* [','] ) # Dave: Renamed testlist_gexpr to testlist_comp, because in 2.7+ this is the # default. It's more consistent like this. diff --git a/pythonFiles/parso/python/grammar27.txt b/pythonFiles/parso/python/grammar27.txt index 4c3f33da32d5..359f12b43e1f 100644 --- a/pythonFiles/parso/python/grammar27.txt +++ b/pythonFiles/parso/python/grammar27.txt @@ -104,7 +104,8 @@ atom: ('(' [yield_expr|testlist_comp] ')' | '[' [listmaker] ']' | '{' [dictorsetmaker] '}' | '`' testlist1 '`' | - NAME | NUMBER | STRING+) + NAME | NUMBER | strings) +strings: STRING+ listmaker: test ( list_for | (',' test)* [','] ) testlist_comp: test ( comp_for | (',' test)* [','] ) lambdef: 'lambda' [varargslist] ':' test diff --git a/pythonFiles/parso/python/grammar33.txt b/pythonFiles/parso/python/grammar33.txt index d7aaffd60e14..3a5580926797 100644 --- a/pythonFiles/parso/python/grammar33.txt +++ b/pythonFiles/parso/python/grammar33.txt @@ -103,7 +103,8 @@ power: atom trailer* ['**' factor] atom: ('(' [yield_expr|testlist_comp] ')' | '[' [testlist_comp] ']' | '{' [dictorsetmaker] '}' | - NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False') + NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False') +strings: STRING+ testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME subscriptlist: subscript (',' subscript)* [','] diff --git a/pythonFiles/parso/python/grammar34.txt b/pythonFiles/parso/python/grammar34.txt index 05c3181627db..324bba18753d 100644 --- a/pythonFiles/parso/python/grammar34.txt +++ b/pythonFiles/parso/python/grammar34.txt @@ -103,7 +103,8 @@ power: atom trailer* ['**' factor] atom: ('(' [yield_expr|testlist_comp] ')' | '[' [testlist_comp] ']' | '{' [dictorsetmaker] '}' | - NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False') + NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False') +strings: STRING+ testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME subscriptlist: subscript (',' subscript)* [','] diff --git a/pythonFiles/parso/python/grammar35.txt b/pythonFiles/parso/python/grammar35.txt index c38217f3f97f..5868b8f7031a 100644 --- a/pythonFiles/parso/python/grammar35.txt +++ b/pythonFiles/parso/python/grammar35.txt @@ -110,7 +110,8 @@ atom_expr: ['await'] atom trailer* atom: ('(' [yield_expr|testlist_comp] ')' | '[' [testlist_comp] ']' | '{' [dictorsetmaker] '}' | - NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False') + NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False') +strings: STRING+ testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME subscriptlist: subscript (',' subscript)* [','] diff --git a/pythonFiles/parso/python/grammar36.txt b/pythonFiles/parso/python/grammar36.txt index e76147e9e4fc..b82c1fec1145 100644 --- a/pythonFiles/parso/python/grammar36.txt +++ b/pythonFiles/parso/python/grammar36.txt @@ -108,7 +108,7 @@ atom_expr: ['await'] atom trailer* atom: ('(' [yield_expr|testlist_comp] ')' | '[' [testlist_comp] ']' | '{' [dictorsetmaker] '}' | - NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False') + NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False') testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME subscriptlist: subscript (',' subscript)* [','] @@ -148,3 +148,10 @@ encoding_decl: NAME yield_expr: 'yield' [yield_arg] yield_arg: 'from' test | testlist + +strings: (STRING | fstring)+ +fstring: FSTRING_START fstring_content* FSTRING_END +fstring_content: FSTRING_STRING | fstring_expr +fstring_conversion: '!' NAME +fstring_expr: '{' testlist_comp [ fstring_conversion ] [ fstring_format_spec ] '}' +fstring_format_spec: ':' fstring_content* diff --git a/pythonFiles/parso/python/grammar37.txt b/pythonFiles/parso/python/grammar37.txt index e76147e9e4fc..7d112f79852b 100644 --- a/pythonFiles/parso/python/grammar37.txt +++ b/pythonFiles/parso/python/grammar37.txt @@ -108,7 +108,7 @@ atom_expr: ['await'] atom trailer* atom: ('(' [yield_expr|testlist_comp] ')' | '[' [testlist_comp] ']' | '{' [dictorsetmaker] '}' | - NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False') + NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False') testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME subscriptlist: subscript (',' subscript)* [','] @@ -148,3 +148,10 @@ encoding_decl: NAME yield_expr: 'yield' [yield_arg] yield_arg: 'from' test | testlist + +strings: (STRING | fstring)+ +fstring: FSTRING_START fstring_content* FSTRING_END +fstring_content: FSTRING_STRING | fstring_expr +fstring_conversion: '!' NAME +fstring_expr: '{' testlist [ fstring_conversion ] [ fstring_format_spec ] '}' +fstring_format_spec: ':' fstring_content* diff --git a/pythonFiles/parso/python/parser.py b/pythonFiles/parso/python/parser.py index 1897f53e8d6f..7cdf987ab365 100644 --- a/pythonFiles/parso/python/parser.py +++ b/pythonFiles/parso/python/parser.py @@ -1,6 +1,7 @@ from parso.python import tree from parso.python.token import (DEDENT, INDENT, ENDMARKER, NEWLINE, NUMBER, - STRING, tok_name, NAME) + STRING, tok_name, NAME, FSTRING_STRING, + FSTRING_START, FSTRING_END) from parso.parser import BaseParser from parso.pgen2.parse import token_to_ilabel @@ -50,6 +51,17 @@ class structure of different scopes. } default_node = tree.PythonNode + # Names/Keywords are handled separately + _leaf_map = { + STRING: tree.String, + NUMBER: tree.Number, + NEWLINE: tree.Newline, + ENDMARKER: tree.EndMarker, + FSTRING_STRING: tree.FStringString, + FSTRING_START: tree.FStringStart, + FSTRING_END: tree.FStringEnd, + } + def __init__(self, pgen_grammar, error_recovery=True, start_symbol='file_input'): super(Parser, self).__init__(pgen_grammar, start_symbol, error_recovery=error_recovery) @@ -121,16 +133,8 @@ def convert_leaf(self, pgen_grammar, type, value, prefix, start_pos): return tree.Keyword(value, start_pos, prefix) else: return tree.Name(value, start_pos, prefix) - elif type == STRING: - return tree.String(value, start_pos, prefix) - elif type == NUMBER: - return tree.Number(value, start_pos, prefix) - elif type == NEWLINE: - return tree.Newline(value, start_pos, prefix) - elif type == ENDMARKER: - return tree.EndMarker(value, start_pos, prefix) - else: - return tree.Operator(value, start_pos, prefix) + + return self._leaf_map.get(type, tree.Operator)(value, start_pos, prefix) def error_recovery(self, pgen_grammar, stack, arcs, typ, value, start_pos, prefix, add_token_callback): diff --git a/pythonFiles/parso/python/token.py b/pythonFiles/parso/python/token.py index fb590a5f28c6..dd849b01daa7 100644 --- a/pythonFiles/parso/python/token.py +++ b/pythonFiles/parso/python/token.py @@ -32,6 +32,14 @@ ERROR_DEDENT = next(_counter) tok_name[ERROR_DEDENT] = 'ERROR_DEDENT' +FSTRING_START = next(_counter) +tok_name[FSTRING_START] = 'FSTRING_START' +FSTRING_END = next(_counter) +tok_name[FSTRING_END] = 'FSTRING_END' +FSTRING_STRING = next(_counter) +tok_name[FSTRING_STRING] = 'FSTRING_STRING' +EXCLAMATION = next(_counter) +tok_name[EXCLAMATION] = 'EXCLAMATION' # Map from operator to number (since tokenize doesn't do this) @@ -84,6 +92,7 @@ //= DOUBLESLASHEQUAL -> RARROW ... ELLIPSIS +! EXCLAMATION """ opmap = {} diff --git a/pythonFiles/parso/python/tokenize.py b/pythonFiles/parso/python/tokenize.py index ecd2437f5ebb..31f081d9b804 100644 --- a/pythonFiles/parso/python/tokenize.py +++ b/pythonFiles/parso/python/tokenize.py @@ -20,14 +20,15 @@ from parso.python.token import (tok_name, ENDMARKER, STRING, NUMBER, opmap, NAME, ERRORTOKEN, NEWLINE, INDENT, DEDENT, - ERROR_DEDENT) + ERROR_DEDENT, FSTRING_STRING, FSTRING_START, + FSTRING_END) from parso._compatibility import py_version from parso.utils import split_lines TokenCollection = namedtuple( 'TokenCollection', - 'pseudo_token single_quoted triple_quoted endpats always_break_tokens', + 'pseudo_token single_quoted triple_quoted endpats fstring_pattern_map always_break_tokens', ) BOM_UTF8_STRING = BOM_UTF8.decode('utf-8') @@ -52,32 +53,35 @@ def group(*choices, **kwargs): return start + '|'.join(choices) + ')' -def any(*choices): - return group(*choices) + '*' - - def maybe(*choices): return group(*choices) + '?' # Return the empty string, plus all of the valid string prefixes. -def _all_string_prefixes(version_info): +def _all_string_prefixes(version_info, include_fstring=False, only_fstring=False): def different_case_versions(prefix): for s in _itertools.product(*[(c, c.upper()) for c in prefix]): yield ''.join(s) # The valid string prefixes. Only contain the lower case versions, # and don't contain any permuations (include 'fr', but not # 'rf'). The various permutations will be generated. - _valid_string_prefixes = ['b', 'r', 'u'] + valid_string_prefixes = ['b', 'r', 'u'] if version_info >= (3, 0): - _valid_string_prefixes.append('br') + valid_string_prefixes.append('br') - if version_info >= (3, 6): - _valid_string_prefixes += ['f', 'fr'] + result = set(['']) + if version_info >= (3, 6) and include_fstring: + f = ['f', 'fr'] + if only_fstring: + valid_string_prefixes = f + result = set() + else: + valid_string_prefixes += f + elif only_fstring: + return set() # if we add binary f-strings, add: ['fb', 'fbr'] - result = set(['']) - for prefix in _valid_string_prefixes: + for prefix in valid_string_prefixes: for t in _itertools.permutations(prefix): # create a list with upper and lower versions of each # character @@ -102,6 +106,10 @@ def _get_token_collection(version_info): return result +fstring_string_single_line = _compile(r'(?:[^{}\r\n]+|\{\{|\}\})+') +fstring_string_multi_line = _compile(r'(?:[^{}]+|\{\{|\}\})+') + + def _create_token_collection(version_info): # Note: we use unicode matching for names ("\w") but ascii matching for # number literals. @@ -141,6 +149,9 @@ def _create_token_collection(version_info): # StringPrefix can be the empty string (making it optional). possible_prefixes = _all_string_prefixes(version_info) StringPrefix = group(*possible_prefixes) + StringPrefixWithF = group(*_all_string_prefixes(version_info, include_fstring=True)) + fstring_prefixes = _all_string_prefixes(version_info, include_fstring=True, only_fstring=True) + FStringStart = group(*fstring_prefixes) # Tail end of ' string. Single = r"[^'\\]*(?:\\.[^'\\]*)*'" @@ -150,14 +161,14 @@ def _create_token_collection(version_info): Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" # Tail end of """ string. Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' - Triple = group(StringPrefix + "'''", StringPrefix + '"""') + Triple = group(StringPrefixWithF + "'''", StringPrefixWithF + '"""') # Because of leftmost-then-longest match semantics, be sure to put the # longest operators first (e.g., if = came before ==, == would get # recognized as two instances of =). - Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=", + Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"//=?", r"->", - r"[+\-*/%&@`|^=<>]=?", + r"[+\-*/%&@`|^!=<>]=?", r"~") Bracket = '[][(){}]' @@ -174,7 +185,12 @@ def _create_token_collection(version_info): group("'", r'\\\r?\n'), StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' + group('"', r'\\\r?\n')) - PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple) + pseudo_extra_pool = [Comment, Triple] + all_quotes = '"', "'", '"""', "'''" + if fstring_prefixes: + pseudo_extra_pool.append(FStringStart + group(*all_quotes)) + + PseudoExtras = group(r'\\\r?\n|\Z', *pseudo_extra_pool) PseudoToken = group(Whitespace, capture=True) + \ group(PseudoExtras, Number, Funny, ContStr, Name, capture=True) @@ -192,18 +208,24 @@ def _create_token_collection(version_info): # including the opening quotes. single_quoted = set() triple_quoted = set() + fstring_pattern_map = {} for t in possible_prefixes: - for p in (t + '"', t + "'"): - single_quoted.add(p) - for p in (t + '"""', t + "'''"): - triple_quoted.add(p) + for quote in '"', "'": + single_quoted.add(t + quote) + + for quote in '"""', "'''": + triple_quoted.add(t + quote) + + for t in fstring_prefixes: + for quote in all_quotes: + fstring_pattern_map[t + quote] = quote ALWAYS_BREAK_TOKENS = (';', 'import', 'class', 'def', 'try', 'except', 'finally', 'while', 'with', 'return') pseudo_token_compiled = _compile(PseudoToken) return TokenCollection( pseudo_token_compiled, single_quoted, triple_quoted, endpats, - ALWAYS_BREAK_TOKENS + fstring_pattern_map, ALWAYS_BREAK_TOKENS ) @@ -226,12 +248,104 @@ def __repr__(self): self._replace(type=self._get_type_name())) +class FStringNode(object): + def __init__(self, quote): + self.quote = quote + self.parentheses_count = 0 + self.previous_lines = '' + self.last_string_start_pos = None + # In the syntax there can be multiple format_spec's nested: + # {x:{y:3}} + self.format_spec_count = 0 + + def open_parentheses(self, character): + self.parentheses_count += 1 + + def close_parentheses(self, character): + self.parentheses_count -= 1 + + def allow_multiline(self): + return len(self.quote) == 3 + + def is_in_expr(self): + return (self.parentheses_count - self.format_spec_count) > 0 + + +def _check_fstring_ending(fstring_stack, token, from_start=False): + fstring_end = float('inf') + fstring_index = None + for i, node in enumerate(fstring_stack): + if from_start: + if token.startswith(node.quote): + fstring_index = i + fstring_end = len(node.quote) + else: + continue + else: + try: + end = token.index(node.quote) + except ValueError: + pass + else: + if fstring_index is None or end < fstring_end: + fstring_index = i + fstring_end = end + return fstring_index, fstring_end + + +def _find_fstring_string(fstring_stack, line, lnum, pos): + tos = fstring_stack[-1] + if tos.is_in_expr(): + return '', pos + else: + new_pos = pos + allow_multiline = tos.allow_multiline() + if allow_multiline: + match = fstring_string_multi_line.match(line, pos) + else: + match = fstring_string_single_line.match(line, pos) + if match is None: + string = tos.previous_lines + else: + if not tos.previous_lines: + tos.last_string_start_pos = (lnum, pos) + + string = match.group(0) + for fstring_stack_node in fstring_stack: + try: + string = string[:string.index(fstring_stack_node.quote)] + except ValueError: + pass # The string was not found. + + new_pos += len(string) + if allow_multiline and string.endswith('\n'): + tos.previous_lines += string + string = '' + else: + string = tos.previous_lines + string + + return string, new_pos + + def tokenize(code, version_info, start_pos=(1, 0)): """Generate tokens from a the source code (string).""" lines = split_lines(code, keepends=True) return tokenize_lines(lines, version_info, start_pos=start_pos) +def _print_tokens(func): + """ + A small helper function to help debug the tokenize_lines function. + """ + def wrapper(*args, **kwargs): + for token in func(*args, **kwargs): + print(token) + yield token + + return wrapper + + +# @_print_tokens def tokenize_lines(lines, version_info, start_pos=(1, 0)): """ A heavily modified Python standard library tokenizer. @@ -240,7 +354,7 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)): token. This idea comes from lib2to3. The prefix contains all information that is irrelevant for the parser like newlines in parentheses or comments. """ - pseudo_token, single_quoted, triple_quoted, endpats, always_break_tokens, = \ + pseudo_token, single_quoted, triple_quoted, endpats, fstring_pattern_map, always_break_tokens, = \ _get_token_collection(version_info) paren_level = 0 # count parentheses indents = [0] @@ -257,6 +371,7 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)): additional_prefix = '' first = True lnum = start_pos[0] - 1 + fstring_stack = [] for line in lines: # loop over lines in stream lnum += 1 pos = 0 @@ -287,6 +402,37 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)): continue while pos < max: + if fstring_stack: + string, pos = _find_fstring_string(fstring_stack, line, lnum, pos) + if string: + yield PythonToken( + FSTRING_STRING, string, + fstring_stack[-1].last_string_start_pos, + # Never has a prefix because it can start anywhere and + # include whitespace. + prefix='' + ) + fstring_stack[-1].previous_lines = '' + continue + + if pos == max: + break + + rest = line[pos:] + fstring_index, end = _check_fstring_ending(fstring_stack, rest, from_start=True) + + if fstring_index is not None: + yield PythonToken( + FSTRING_END, + fstring_stack[fstring_index].quote, + (lnum, pos), + prefix=additional_prefix, + ) + additional_prefix = '' + del fstring_stack[fstring_index:] + pos += end + continue + pseudomatch = pseudo_token.match(line, pos) if not pseudomatch: # scan for tokens txt = line[pos:] @@ -311,10 +457,11 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)): if new_line and initial not in '\r\n#': new_line = False - if paren_level == 0: + if paren_level == 0 and not fstring_stack: i = 0 while line[i] == '\f': i += 1 + # TODO don't we need to change spos as well? start -= 1 if start > indents[-1]: yield PythonToken(INDENT, '', spos, '') @@ -326,11 +473,33 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)): yield PythonToken(DEDENT, '', spos, '') indents.pop() + if fstring_stack: + fstring_index, end = _check_fstring_ending(fstring_stack, token) + if fstring_index is not None: + if end != 0: + yield PythonToken(ERRORTOKEN, token[:end], spos, prefix) + prefix = '' + + yield PythonToken( + FSTRING_END, + fstring_stack[fstring_index].quote, + (lnum, spos[1] + 1), + prefix=prefix + ) + del fstring_stack[fstring_index:] + pos -= len(token) - end + continue + if (initial in numchars or # ordinary number (initial == '.' and token != '.' and token != '...')): yield PythonToken(NUMBER, token, spos, prefix) elif initial in '\r\n': - if not new_line and paren_level == 0: + if any(not f.allow_multiline() for f in fstring_stack): + # Would use fstring_stack.clear, but that's not available + # in Python 2. + fstring_stack[:] = [] + + if not new_line and paren_level == 0 and not fstring_stack: yield PythonToken(NEWLINE, token, spos, prefix) else: additional_prefix = prefix + token @@ -362,8 +531,12 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)): break else: # ordinary string yield PythonToken(STRING, token, spos, prefix) + elif token in fstring_pattern_map: # The start of an fstring. + fstring_stack.append(FStringNode(fstring_pattern_map[token])) + yield PythonToken(FSTRING_START, token, spos, prefix) elif is_identifier(initial): # ordinary name if token in always_break_tokens: + fstring_stack[:] = [] paren_level = 0 while True: indent = indents.pop() @@ -378,9 +551,18 @@ def tokenize_lines(lines, version_info, start_pos=(1, 0)): break else: if token in '([{': - paren_level += 1 + if fstring_stack: + fstring_stack[-1].open_parentheses(token) + else: + paren_level += 1 elif token in ')]}': - paren_level -= 1 + if fstring_stack: + fstring_stack[-1].close_parentheses(token) + else: + paren_level -= 1 + elif token == ':' and fstring_stack \ + and fstring_stack[-1].parentheses_count == 1: + fstring_stack[-1].format_spec_count += 1 try: # This check is needed in any case to check if it's a valid diff --git a/pythonFiles/parso/python/tree.py b/pythonFiles/parso/python/tree.py index eb977800a607..e2bf010bdff0 100644 --- a/pythonFiles/parso/python/tree.py +++ b/pythonFiles/parso/python/tree.py @@ -262,6 +262,33 @@ def _get_payload(self): return match.group(2)[:-len(match.group(1))] +class FStringString(Leaf): + """ + f-strings contain f-string expressions and normal python strings. These are + the string parts of f-strings. + """ + type = 'fstring_string' + __slots__ = () + + +class FStringStart(Leaf): + """ + f-strings contain f-string expressions and normal python strings. These are + the string parts of f-strings. + """ + type = 'fstring_start' + __slots__ = () + + +class FStringEnd(Leaf): + """ + f-strings contain f-string expressions and normal python strings. These are + the string parts of f-strings. + """ + type = 'fstring_end' + __slots__ = () + + class _StringComparisonMixin(object): def __eq__(self, other): """ diff --git a/pythonFiles/parso/tree.py b/pythonFiles/parso/tree.py index 72a14945b0f6..5316795be57c 100644 --- a/pythonFiles/parso/tree.py +++ b/pythonFiles/parso/tree.py @@ -55,7 +55,6 @@ def get_previous_sibling(self): Returns the node immediately preceding this node in this parent's children list. If this node does not have a previous sibling, it is None. - None. """ # Can't use index(); we need to test by identity for i, child in enumerate(self.parent.children): @@ -339,7 +338,7 @@ def __repr__(self): class ErrorNode(BaseNode): """ - A node that containes valid nodes/leaves that we're follow by a token that + A node that contains valid nodes/leaves that we're follow by a token that was invalid. This basically means that the leaf after this node is where Python would mark a syntax error. """ diff --git a/src/client/providers/jediProxy.ts b/src/client/providers/jediProxy.ts index f0136585786f..7a8e47b62b1c 100644 --- a/src/client/providers/jediProxy.ts +++ b/src/client/providers/jediProxy.ts @@ -7,8 +7,7 @@ import * as fs from 'fs-extra'; import * as path from 'path'; import * as pidusage from 'pidusage'; import { setInterval } from 'timers'; -import { Uri } from 'vscode'; -import * as vscode from 'vscode'; +import { CancellationToken, CancellationTokenSource, CompletionItemKind, Disposable, SymbolKind, Uri } from 'vscode'; import { PythonSettings } from '../common/configSettings'; import { debounce, swallowExceptions } from '../common/decorators'; import '../common/extensions'; @@ -22,96 +21,96 @@ import * as logger from './../common/logger'; const IS_WINDOWS = /^win/.test(process.platform); -const pythonVSCodeTypeMappings = new Map(); -pythonVSCodeTypeMappings.set('none', vscode.CompletionItemKind.Value); -pythonVSCodeTypeMappings.set('type', vscode.CompletionItemKind.Class); -pythonVSCodeTypeMappings.set('tuple', vscode.CompletionItemKind.Class); -pythonVSCodeTypeMappings.set('dict', vscode.CompletionItemKind.Class); -pythonVSCodeTypeMappings.set('dictionary', vscode.CompletionItemKind.Class); -pythonVSCodeTypeMappings.set('function', vscode.CompletionItemKind.Function); -pythonVSCodeTypeMappings.set('lambda', vscode.CompletionItemKind.Function); -pythonVSCodeTypeMappings.set('generator', vscode.CompletionItemKind.Function); -pythonVSCodeTypeMappings.set('class', vscode.CompletionItemKind.Class); -pythonVSCodeTypeMappings.set('instance', vscode.CompletionItemKind.Reference); -pythonVSCodeTypeMappings.set('method', vscode.CompletionItemKind.Method); -pythonVSCodeTypeMappings.set('builtin', vscode.CompletionItemKind.Class); -pythonVSCodeTypeMappings.set('builtinfunction', vscode.CompletionItemKind.Function); -pythonVSCodeTypeMappings.set('module', vscode.CompletionItemKind.Module); -pythonVSCodeTypeMappings.set('file', vscode.CompletionItemKind.File); -pythonVSCodeTypeMappings.set('xrange', vscode.CompletionItemKind.Class); -pythonVSCodeTypeMappings.set('slice', vscode.CompletionItemKind.Class); -pythonVSCodeTypeMappings.set('traceback', vscode.CompletionItemKind.Class); -pythonVSCodeTypeMappings.set('frame', vscode.CompletionItemKind.Class); -pythonVSCodeTypeMappings.set('buffer', vscode.CompletionItemKind.Class); -pythonVSCodeTypeMappings.set('dictproxy', vscode.CompletionItemKind.Class); -pythonVSCodeTypeMappings.set('funcdef', vscode.CompletionItemKind.Function); -pythonVSCodeTypeMappings.set('property', vscode.CompletionItemKind.Property); -pythonVSCodeTypeMappings.set('import', vscode.CompletionItemKind.Module); -pythonVSCodeTypeMappings.set('keyword', vscode.CompletionItemKind.Keyword); -pythonVSCodeTypeMappings.set('constant', vscode.CompletionItemKind.Variable); -pythonVSCodeTypeMappings.set('variable', vscode.CompletionItemKind.Variable); -pythonVSCodeTypeMappings.set('value', vscode.CompletionItemKind.Value); -pythonVSCodeTypeMappings.set('param', vscode.CompletionItemKind.Variable); -pythonVSCodeTypeMappings.set('statement', vscode.CompletionItemKind.Keyword); - -const pythonVSCodeSymbolMappings = new Map(); -pythonVSCodeSymbolMappings.set('none', vscode.SymbolKind.Variable); -pythonVSCodeSymbolMappings.set('type', vscode.SymbolKind.Class); -pythonVSCodeSymbolMappings.set('tuple', vscode.SymbolKind.Class); -pythonVSCodeSymbolMappings.set('dict', vscode.SymbolKind.Class); -pythonVSCodeSymbolMappings.set('dictionary', vscode.SymbolKind.Class); -pythonVSCodeSymbolMappings.set('function', vscode.SymbolKind.Function); -pythonVSCodeSymbolMappings.set('lambda', vscode.SymbolKind.Function); -pythonVSCodeSymbolMappings.set('generator', vscode.SymbolKind.Function); -pythonVSCodeSymbolMappings.set('class', vscode.SymbolKind.Class); -pythonVSCodeSymbolMappings.set('instance', vscode.SymbolKind.Class); -pythonVSCodeSymbolMappings.set('method', vscode.SymbolKind.Method); -pythonVSCodeSymbolMappings.set('builtin', vscode.SymbolKind.Class); -pythonVSCodeSymbolMappings.set('builtinfunction', vscode.SymbolKind.Function); -pythonVSCodeSymbolMappings.set('module', vscode.SymbolKind.Module); -pythonVSCodeSymbolMappings.set('file', vscode.SymbolKind.File); -pythonVSCodeSymbolMappings.set('xrange', vscode.SymbolKind.Array); -pythonVSCodeSymbolMappings.set('slice', vscode.SymbolKind.Class); -pythonVSCodeSymbolMappings.set('traceback', vscode.SymbolKind.Class); -pythonVSCodeSymbolMappings.set('frame', vscode.SymbolKind.Class); -pythonVSCodeSymbolMappings.set('buffer', vscode.SymbolKind.Array); -pythonVSCodeSymbolMappings.set('dictproxy', vscode.SymbolKind.Class); -pythonVSCodeSymbolMappings.set('funcdef', vscode.SymbolKind.Function); -pythonVSCodeSymbolMappings.set('property', vscode.SymbolKind.Property); -pythonVSCodeSymbolMappings.set('import', vscode.SymbolKind.Module); -pythonVSCodeSymbolMappings.set('keyword', vscode.SymbolKind.Variable); -pythonVSCodeSymbolMappings.set('constant', vscode.SymbolKind.Constant); -pythonVSCodeSymbolMappings.set('variable', vscode.SymbolKind.Variable); -pythonVSCodeSymbolMappings.set('value', vscode.SymbolKind.Variable); -pythonVSCodeSymbolMappings.set('param', vscode.SymbolKind.Variable); -pythonVSCodeSymbolMappings.set('statement', vscode.SymbolKind.Variable); -pythonVSCodeSymbolMappings.set('boolean', vscode.SymbolKind.Boolean); -pythonVSCodeSymbolMappings.set('int', vscode.SymbolKind.Number); -pythonVSCodeSymbolMappings.set('longlean', vscode.SymbolKind.Number); -pythonVSCodeSymbolMappings.set('float', vscode.SymbolKind.Number); -pythonVSCodeSymbolMappings.set('complex', vscode.SymbolKind.Number); -pythonVSCodeSymbolMappings.set('string', vscode.SymbolKind.String); -pythonVSCodeSymbolMappings.set('unicode', vscode.SymbolKind.String); -pythonVSCodeSymbolMappings.set('list', vscode.SymbolKind.Array); - -function getMappedVSCodeType(pythonType: string): vscode.CompletionItemKind { +const pythonVSCodeTypeMappings = new Map(); +pythonVSCodeTypeMappings.set('none', CompletionItemKind.Value); +pythonVSCodeTypeMappings.set('type', CompletionItemKind.Class); +pythonVSCodeTypeMappings.set('tuple', CompletionItemKind.Class); +pythonVSCodeTypeMappings.set('dict', CompletionItemKind.Class); +pythonVSCodeTypeMappings.set('dictionary', CompletionItemKind.Class); +pythonVSCodeTypeMappings.set('function', CompletionItemKind.Function); +pythonVSCodeTypeMappings.set('lambda', CompletionItemKind.Function); +pythonVSCodeTypeMappings.set('generator', CompletionItemKind.Function); +pythonVSCodeTypeMappings.set('class', CompletionItemKind.Class); +pythonVSCodeTypeMappings.set('instance', CompletionItemKind.Reference); +pythonVSCodeTypeMappings.set('method', CompletionItemKind.Method); +pythonVSCodeTypeMappings.set('builtin', CompletionItemKind.Class); +pythonVSCodeTypeMappings.set('builtinfunction', CompletionItemKind.Function); +pythonVSCodeTypeMappings.set('module', CompletionItemKind.Module); +pythonVSCodeTypeMappings.set('file', CompletionItemKind.File); +pythonVSCodeTypeMappings.set('xrange', CompletionItemKind.Class); +pythonVSCodeTypeMappings.set('slice', CompletionItemKind.Class); +pythonVSCodeTypeMappings.set('traceback', CompletionItemKind.Class); +pythonVSCodeTypeMappings.set('frame', CompletionItemKind.Class); +pythonVSCodeTypeMappings.set('buffer', CompletionItemKind.Class); +pythonVSCodeTypeMappings.set('dictproxy', CompletionItemKind.Class); +pythonVSCodeTypeMappings.set('funcdef', CompletionItemKind.Function); +pythonVSCodeTypeMappings.set('property', CompletionItemKind.Property); +pythonVSCodeTypeMappings.set('import', CompletionItemKind.Module); +pythonVSCodeTypeMappings.set('keyword', CompletionItemKind.Keyword); +pythonVSCodeTypeMappings.set('constant', CompletionItemKind.Variable); +pythonVSCodeTypeMappings.set('variable', CompletionItemKind.Variable); +pythonVSCodeTypeMappings.set('value', CompletionItemKind.Value); +pythonVSCodeTypeMappings.set('param', CompletionItemKind.Variable); +pythonVSCodeTypeMappings.set('statement', CompletionItemKind.Keyword); + +const pythonVSCodeSymbolMappings = new Map(); +pythonVSCodeSymbolMappings.set('none', SymbolKind.Variable); +pythonVSCodeSymbolMappings.set('type', SymbolKind.Class); +pythonVSCodeSymbolMappings.set('tuple', SymbolKind.Class); +pythonVSCodeSymbolMappings.set('dict', SymbolKind.Class); +pythonVSCodeSymbolMappings.set('dictionary', SymbolKind.Class); +pythonVSCodeSymbolMappings.set('function', SymbolKind.Function); +pythonVSCodeSymbolMappings.set('lambda', SymbolKind.Function); +pythonVSCodeSymbolMappings.set('generator', SymbolKind.Function); +pythonVSCodeSymbolMappings.set('class', SymbolKind.Class); +pythonVSCodeSymbolMappings.set('instance', SymbolKind.Class); +pythonVSCodeSymbolMappings.set('method', SymbolKind.Method); +pythonVSCodeSymbolMappings.set('builtin', SymbolKind.Class); +pythonVSCodeSymbolMappings.set('builtinfunction', SymbolKind.Function); +pythonVSCodeSymbolMappings.set('module', SymbolKind.Module); +pythonVSCodeSymbolMappings.set('file', SymbolKind.File); +pythonVSCodeSymbolMappings.set('xrange', SymbolKind.Array); +pythonVSCodeSymbolMappings.set('slice', SymbolKind.Class); +pythonVSCodeSymbolMappings.set('traceback', SymbolKind.Class); +pythonVSCodeSymbolMappings.set('frame', SymbolKind.Class); +pythonVSCodeSymbolMappings.set('buffer', SymbolKind.Array); +pythonVSCodeSymbolMappings.set('dictproxy', SymbolKind.Class); +pythonVSCodeSymbolMappings.set('funcdef', SymbolKind.Function); +pythonVSCodeSymbolMappings.set('property', SymbolKind.Property); +pythonVSCodeSymbolMappings.set('import', SymbolKind.Module); +pythonVSCodeSymbolMappings.set('keyword', SymbolKind.Variable); +pythonVSCodeSymbolMappings.set('constant', SymbolKind.Constant); +pythonVSCodeSymbolMappings.set('variable', SymbolKind.Variable); +pythonVSCodeSymbolMappings.set('value', SymbolKind.Variable); +pythonVSCodeSymbolMappings.set('param', SymbolKind.Variable); +pythonVSCodeSymbolMappings.set('statement', SymbolKind.Variable); +pythonVSCodeSymbolMappings.set('boolean', SymbolKind.Boolean); +pythonVSCodeSymbolMappings.set('int', SymbolKind.Number); +pythonVSCodeSymbolMappings.set('longlean', SymbolKind.Number); +pythonVSCodeSymbolMappings.set('float', SymbolKind.Number); +pythonVSCodeSymbolMappings.set('complex', SymbolKind.Number); +pythonVSCodeSymbolMappings.set('string', SymbolKind.String); +pythonVSCodeSymbolMappings.set('unicode', SymbolKind.String); +pythonVSCodeSymbolMappings.set('list', SymbolKind.Array); + +function getMappedVSCodeType(pythonType: string): CompletionItemKind { if (pythonVSCodeTypeMappings.has(pythonType)) { const value = pythonVSCodeTypeMappings.get(pythonType); if (value) { return value; } } - return vscode.CompletionItemKind.Keyword; + return CompletionItemKind.Keyword; } -function getMappedVSCodeSymbol(pythonType: string): vscode.SymbolKind { +function getMappedVSCodeSymbol(pythonType: string): SymbolKind { if (pythonVSCodeSymbolMappings.has(pythonType)) { const value = pythonVSCodeSymbolMappings.get(pythonType); if (value) { return value; } } - return vscode.SymbolKind.Variable; + return SymbolKind.Variable; } export enum CommandType { @@ -131,7 +130,7 @@ commandNames.set(CommandType.Hover, 'tooltip'); commandNames.set(CommandType.Usages, 'usages'); commandNames.set(CommandType.Symbols, 'names'); -export class JediProxy implements vscode.Disposable { +export class JediProxy implements Disposable { private proc?: ChildProcess; private pythonSettings: PythonSettings; private cmdId: number = 0; @@ -151,7 +150,7 @@ export class JediProxy implements vscode.Disposable { public constructor(private extensionRootDir: string, workspacePath: string, private serviceContainer: IServiceContainer) { this.workspacePath = workspacePath; - this.pythonSettings = PythonSettings.getInstance(vscode.Uri.file(workspacePath)); + this.pythonSettings = PythonSettings.getInstance(Uri.file(workspacePath)); this.lastKnownPythonInterpreter = this.pythonSettings.pythonPath; this.logger = serviceContainer.get(ILogger); this.pythonSettings.on('change', () => this.pythonSettingsChangeHandler()); @@ -315,7 +314,8 @@ export class JediProxy implements vscode.Disposable { args.push('custom'); args.push(this.pythonSettings.jediPath); } - if (Array.isArray(this.pythonSettings.autoComplete.preloadModules) && + if (this.pythonSettings.autoComplete && + Array.isArray(this.pythonSettings.autoComplete.preloadModules) && this.pythonSettings.autoComplete.preloadModules.length > 0) { const modules = this.pythonSettings.autoComplete.preloadModules.filter(m => m.trim().length > 0).join(','); args.push(modules); @@ -636,7 +636,8 @@ export class JediProxy implements vscode.Disposable { } private getConfig() { // Add support for paths relative to workspace. - const extraPaths = this.pythonSettings.autoComplete.extraPaths.map(extraPath => { + const extraPaths = this.pythonSettings.autoComplete ? + this.pythonSettings.autoComplete.extraPaths.map(extraPath => { if (path.isAbsolute(extraPath)) { return extraPath; } @@ -644,7 +645,7 @@ export class JediProxy implements vscode.Disposable { return ''; } return path.join(this.workspacePath, extraPath); - }); + }) : []; // Always add workspace path into extra paths. if (typeof this.workspacePath === 'string') { @@ -686,7 +687,7 @@ export interface ICommand { interface IExecutionCommand extends ICommand { id: number; deferred?: Deferred; - token: vscode.CancellationToken; + token: CancellationToken; delay?: number; } @@ -739,9 +740,9 @@ export interface IReference { } export interface IAutoCompleteItem { - type: vscode.CompletionItemKind; - rawType: vscode.CompletionItemKind; - kind: vscode.SymbolKind; + type: CompletionItemKind; + rawType: CompletionItemKind; + kind: SymbolKind; text: string; description: string; raw_docstring: string; @@ -755,8 +756,8 @@ export interface IDefinitionRange { } export interface IDefinition { rawType: string; - type: vscode.CompletionItemKind; - kind: vscode.SymbolKind; + type: CompletionItemKind; + kind: SymbolKind; text: string; fileName: string; container: string; @@ -764,22 +765,22 @@ export interface IDefinition { } export interface IHoverItem { - kind: vscode.SymbolKind; + kind: SymbolKind; text: string; description: string; docstring: string; signature: string; } -export class JediProxyHandler implements vscode.Disposable { - private commandCancellationTokenSources: Map; +export class JediProxyHandler implements Disposable { + private commandCancellationTokenSources: Map; public get JediProxy(): JediProxy { return this.jediProxy; } public constructor(private jediProxy: JediProxy) { - this.commandCancellationTokenSources = new Map(); + this.commandCancellationTokenSources = new Map(); } public dispose() { @@ -788,7 +789,7 @@ export class JediProxyHandler implements vscode.Dispos } } - public sendCommand(cmd: ICommand, token?: vscode.CancellationToken): Promise { + public sendCommand(cmd: ICommand, token?: CancellationToken): Promise { const executionCmd = >cmd; executionCmd.id = executionCmd.id || this.jediProxy.getNextCommandId(); @@ -799,7 +800,7 @@ export class JediProxyHandler implements vscode.Dispos } } - const cancellation = new vscode.CancellationTokenSource(); + const cancellation = new CancellationTokenSource(); this.commandCancellationTokenSources.set(cmd.command, cancellation); executionCmd.token = cancellation.token; @@ -810,7 +811,7 @@ export class JediProxyHandler implements vscode.Dispos }); } - public sendCommandNonCancellableCommand(cmd: ICommand, token?: vscode.CancellationToken): Promise { + public sendCommandNonCancellableCommand(cmd: ICommand, token?: CancellationToken): Promise { const executionCmd = >cmd; executionCmd.id = executionCmd.id || this.jediProxy.getNextCommandId(); if (token) { diff --git a/src/client/providers/signatureProvider.ts b/src/client/providers/signatureProvider.ts index 12dad261c39b..cf1014296519 100644 --- a/src/client/providers/signatureProvider.ts +++ b/src/client/providers/signatureProvider.ts @@ -1,8 +1,15 @@ 'use strict'; import { EOL } from 'os'; -import * as vscode from 'vscode'; -import { CancellationToken, Position, SignatureHelp, TextDocument } from 'vscode'; +import { + CancellationToken, + ParameterInformation, + Position, + SignatureHelp, + SignatureHelpProvider, + SignatureInformation, + TextDocument +} from 'vscode'; import { JediFactory } from '../languageServices/jediProxyFactory'; import { captureTelemetry } from '../telemetry'; import { SIGNATURE } from '../telemetry/constants'; @@ -45,9 +52,9 @@ function extractParamDocString(paramName: string, docString: string): string { return paramDocString.trim(); } -export class PythonSignatureProvider implements vscode.SignatureHelpProvider { +export class PythonSignatureProvider implements SignatureHelpProvider { public constructor(private jediFactory: JediFactory) { } - private static parseData(data: proxy.IArgumentsResult): vscode.SignatureHelp { + private static parseData(data: proxy.IArgumentsResult): SignatureHelp { if (data && Array.isArray(data.definitions) && data.definitions.length > 0) { const signature = new SignatureHelp(); signature.activeSignature = 0; @@ -60,29 +67,36 @@ export class PythonSignatureProvider implements vscode.SignatureHelpProvider { // Some functions do not come with parameter docs let label: string; let documentation: string; - const validParamInfo = def.params && def.params.length > 0 && def.docstring.startsWith(`${def.name}(`); + const validParamInfo = def.params && def.params.length > 0 && def.docstring && def.docstring.startsWith(`${def.name}(`); if (validParamInfo) { const docLines = def.docstring.splitLines(); label = docLines.shift().trim(); documentation = docLines.join(EOL).trim(); } else { - label = def.description; - documentation = def.docstring; + if (def.params && def.params.length > 0) { + label = `${def.name}(${def.params.map(p => p.name).join(', ')})`; + documentation = def.docstring; + } else { + label = def.description; + documentation = def.docstring; + } } - const sig = { + // tslint:disable-next-line:no-object-literal-type-assertion + const sig = { label, documentation, parameters: [] }; - if (validParamInfo) { + if (def.params && def.params.length) { sig.parameters = def.params.map(arg => { if (arg.docstring.length === 0) { arg.docstring = extractParamDocString(arg.name, def.docstring); } - return { + // tslint:disable-next-line:no-object-literal-type-assertion + return { documentation: arg.docstring.length > 0 ? arg.docstring : arg.description, label: arg.name.trim() }; diff --git a/src/test/signature/signature.jedi.test.ts b/src/test/signature/signature.jedi.test.ts index 1ab80cce964d..0d3a0b5ed90b 100644 --- a/src/test/signature/signature.jedi.test.ts +++ b/src/test/signature/signature.jedi.test.ts @@ -74,13 +74,15 @@ suite('Signatures (Jedi)', () => { new SignatureHelpResult(0, 3, 0, 0, null), new SignatureHelpResult(0, 4, 0, 0, null), new SignatureHelpResult(0, 5, 0, 0, null), - new SignatureHelpResult(0, 6, 1, 0, 'start'), - new SignatureHelpResult(0, 7, 1, 0, 'start'), - new SignatureHelpResult(0, 8, 1, 1, 'stop'), - new SignatureHelpResult(0, 9, 1, 1, 'stop'), - new SignatureHelpResult(0, 10, 1, 1, 'stop'), - new SignatureHelpResult(0, 11, 1, 2, 'step'), - new SignatureHelpResult(1, 0, 1, 2, 'step') + new SignatureHelpResult(0, 6, 1, 0, 'stop'), + new SignatureHelpResult(0, 7, 1, 0, 'stop') + // new SignatureHelpResult(0, 6, 1, 0, 'start'), + // new SignatureHelpResult(0, 7, 1, 0, 'start'), + // new SignatureHelpResult(0, 8, 1, 1, 'stop'), + // new SignatureHelpResult(0, 9, 1, 1, 'stop'), + // new SignatureHelpResult(0, 10, 1, 1, 'stop'), + // new SignatureHelpResult(0, 11, 1, 2, 'step'), + // new SignatureHelpResult(1, 0, 1, 2, 'step') ]; const document = await openDocument(path.join(autoCompPath, 'basicSig.py')); From cfee1092243ff29f907d682664e0d0858d0c33fa Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Mon, 16 Apr 2018 16:07:52 -0700 Subject: [PATCH 38/83] Priority to goto_defition --- pythonFiles/completion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pythonFiles/completion.py b/pythonFiles/completion.py index e530be32b367..2a0d6e3d095b 100644 --- a/pythonFiles/completion.py +++ b/pythonFiles/completion.py @@ -570,7 +570,7 @@ def _process_request(self, request): if lookup == 'definitions': defs = [] try: - defs = self._get_definitionsx(script.goto_assignments(follow_imports=False), request['id']) + defs = self._get_definitionsx(script.goto_definitions(follow_imports=False), request['id']) except: pass try: From d1ff1d9e693415eddd7158892383e89c40853bbb Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 17 Apr 2018 10:34:13 -0700 Subject: [PATCH 39/83] News --- news/1 Enhancements/1400.md | 1 + news/2 Fixes/1033.md | 1 + 2 files changed, 2 insertions(+) create mode 100644 news/1 Enhancements/1400.md create mode 100644 news/2 Fixes/1033.md diff --git a/news/1 Enhancements/1400.md b/news/1 Enhancements/1400.md new file mode 100644 index 000000000000..47b3cf611c88 --- /dev/null +++ b/news/1 Enhancements/1400.md @@ -0,0 +1 @@ +Intergrate Jedi 0.12. See https://github.com/davidhalter/jedi/issues/1063#issuecomment-381417297 for details. \ No newline at end of file diff --git a/news/2 Fixes/1033.md b/news/2 Fixes/1033.md new file mode 100644 index 000000000000..31fec8720909 --- /dev/null +++ b/news/2 Fixes/1033.md @@ -0,0 +1 @@ +Fix go to definition functionality across files. \ No newline at end of file From 1bd1651064b9bd81c07584f402643bcda5b16c43 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 17 Apr 2018 13:10:46 -0700 Subject: [PATCH 40/83] Replace unzip --- package.json | 2 +- src/client/activation/downloader.ts | 42 ++++++++++++++++++++++------- 2 files changed, 34 insertions(+), 10 deletions(-) diff --git a/package.json b/package.json index 434dc231a1d8..74853cf944cd 100644 --- a/package.json +++ b/package.json @@ -1848,6 +1848,7 @@ "md5": "2.2.1", "minimatch": "3.0.4", "named-js-regexp": "1.3.3", + "node-stream-zip": "^1.6.0", "opn": "5.3.0", "pidusage": "1.2.0", "reflect-metadata": "0.1.12", @@ -1862,7 +1863,6 @@ "uint64be": "1.0.1", "unicode": "10.0.0", "untildify": "3.0.2", - "unzip": "0.1.11", "vscode-debugadapter": "1.28.0", "vscode-debugprotocol": "1.28.0", "vscode-extension-telemetry": "0.0.15", diff --git a/src/client/activation/downloader.ts b/src/client/activation/downloader.ts index de634d627316..c48b0b4b2503 100644 --- a/src/client/activation/downloader.ts +++ b/src/client/activation/downloader.ts @@ -5,7 +5,6 @@ import * as fs from 'fs'; import * as path from 'path'; import * as request from 'request'; import * as requestProgress from 'request-progress'; -import * as unzip from 'unzip'; import { ExtensionContext, OutputChannel, ProgressLocation, window } from 'vscode'; import { STANDARD_OUTPUT_CHANNEL } from '../common/constants'; import { noop } from '../common/core.utils'; @@ -16,6 +15,9 @@ import { IServiceContainer } from '../ioc/types'; import { HashVerifier } from './hashVerifier'; import { PlatformData } from './platformData'; +// tslint:disable-next-line:no-require-imports no-var-requires +const StreamZip = require('node-stream-zip'); + const downloadUriPrefix = 'https://pvsc.blob.core.windows.net/python-analysis'; const downloadBaseFileName = 'python-analysis-vscode'; const downloadVersion = '0.1.0'; @@ -109,15 +111,37 @@ export class AnalysisEngineDownloader { const installFolder = path.join(extensionPath, this.engineFolder); const deferred = createDeferred(); - fs.createReadStream(tempFilePath) - .pipe(unzip.Extract({ path: installFolder })) - .on('finish', () => { - deferred.resolve(); - }) - .on('error', (err) => { - deferred.reject(err); + const title = 'Extracting files... '; + await window.withProgress({ + location: ProgressLocation.Window, + title + }, (progress) => { + const zip = new StreamZip({ + file: tempFilePath, + storeEntries: true }); - await deferred.promise; + + let totalFiles = 0; + let extractedFiles = 0; + zip.on('ready', () => { + totalFiles = zip.entriesCount; + if (!fs.existsSync(installFolder)) { + fs.mkdirSync(installFolder); + } + zip.extract(null, installFolder, (err, count) => { + if (err) { + deferred.reject(err); + } else { + deferred.resolve(); + } + zip.close(); + }); + }).on('extract', (entry, file) => { + extractedFiles += 1; + progress.report({ message: `${title}${Math.round(100 * extractedFiles / totalFiles)}%` }); + }); + return deferred.promise; + }); this.output.append('done.'); // Set file to executable From f916ace8140db8e96f03676e42b4410ae278c4ee Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 18 Apr 2018 12:37:28 -0700 Subject: [PATCH 41/83] Linux flavors + test --- src/client/activation/analysis.ts | 2 +- src/client/activation/analysisEngineHashes.ts | 15 +++- src/client/activation/downloader.ts | 8 +- src/client/activation/platformData.ts | 67 +++++++++++++-- src/test/activation/platformData.test.ts | 84 +++++++++++++++++++ 5 files changed, 158 insertions(+), 18 deletions(-) create mode 100644 src/test/activation/platformData.test.ts diff --git a/src/client/activation/analysis.ts b/src/client/activation/analysis.ts index 7ed94f893a8f..d2e853dc7dda 100644 --- a/src/client/activation/analysis.ts +++ b/src/client/activation/analysis.ts @@ -57,7 +57,7 @@ export class AnalysisExtensionActivator implements IExtensionActivator { this.appShell = this.services.get(IApplicationShell); this.output = this.services.get(IOutputChannel, STANDARD_OUTPUT_CHANNEL); this.fs = this.services.get(IFileSystem); - this.platformData = new PlatformData(services.get(IPlatformService)); + this.platformData = new PlatformData(services.get(IPlatformService), this.fs); } public async activate(context: ExtensionContext): Promise { diff --git a/src/client/activation/analysisEngineHashes.ts b/src/client/activation/analysisEngineHashes.ts index 52761329113e..2f9123a46c59 100644 --- a/src/client/activation/analysisEngineHashes.ts +++ b/src/client/activation/analysisEngineHashes.ts @@ -3,7 +3,14 @@ // This file will be replaced by a generated one during the release build // with actual hashes of the uploaded packages. -export const analysis_engine_win_x86_sha512 = ''; -export const analysis_engine_win_x64_sha512 = ''; -export const analysis_engine_osx_x64_sha512 = ''; -export const analysis_engine_linux_x64_sha512 = ''; +// Values are for test purposes only +export const analysis_engine_win_x86_sha512 = 'win-x86'; +export const analysis_engine_win_x64_sha512 = 'win-x64'; +export const analysis_engine_osx_x64_sha512 = 'osx-x64'; +export const analysis_engine_centos_x64_sha512 = 'centos-x64'; +export const analysis_engine_debian_x64_sha512 = 'debian-x64'; +export const analysis_engine_fedora_x64_sha512 = 'fedora-x64'; +export const analysis_engine_ol_x64_sha512 = 'ol-x64'; +export const analysis_engine_opensuse_x64_sha512 = 'opensuse-x64'; +export const analysis_engine_rhel_x64_sha512 = 'rhel-x64'; +export const analysis_engine_ubuntu_x64_sha512 = 'ubuntu-x64'; diff --git a/src/client/activation/downloader.ts b/src/client/activation/downloader.ts index c48b0b4b2503..98a2d2e1bfc2 100644 --- a/src/client/activation/downloader.ts +++ b/src/client/activation/downloader.ts @@ -9,7 +9,7 @@ import { ExtensionContext, OutputChannel, ProgressLocation, window } from 'vscod import { STANDARD_OUTPUT_CHANNEL } from '../common/constants'; import { noop } from '../common/core.utils'; import { createDeferred, createTemporaryFile } from '../common/helpers'; -import { IPlatformService } from '../common/platform/types'; +import { IFileSystem, IPlatformService } from '../common/platform/types'; import { IOutputChannel } from '../common/types'; import { IServiceContainer } from '../ioc/types'; import { HashVerifier } from './hashVerifier'; @@ -31,7 +31,7 @@ export class AnalysisEngineDownloader { constructor(private readonly services: IServiceContainer, private engineFolder: string) { this.output = this.services.get(IOutputChannel, STANDARD_OUTPUT_CHANNEL); this.platform = this.services.get(IPlatformService); - this.platformData = new PlatformData(this.platform); + this.platformData = new PlatformData(this.platform, this.services.get(IFileSystem)); } public async downloadAnalysisEngine(context: ExtensionContext): Promise { @@ -49,7 +49,7 @@ export class AnalysisEngineDownloader { } private async downloadFile(): Promise { - const platformString = this.platformData.getPlatformDesignator(); + const platformString = await this.platformData.getPlatformName(); const remoteFileName = `${downloadBaseFileName}-${platformString}.${downloadVersion}${downloadFileExtension}`; const uri = `${downloadUriPrefix}/${remoteFileName}`; this.output.append(`Downloading ${uri}... `); @@ -98,7 +98,7 @@ export class AnalysisEngineDownloader { this.output.appendLine(''); this.output.append('Verifying download... '); const verifier = new HashVerifier(); - if (!await verifier.verifyHash(filePath, this.platformData.getExpectedHash())) { + if (!await verifier.verifyHash(filePath, await this.platformData.getExpectedHash())) { throw new Error('Hash of the downloaded file does not match.'); } this.output.append('valid.'); diff --git a/src/client/activation/platformData.ts b/src/client/activation/platformData.ts index 541e5a602bef..2a1cb29da461 100644 --- a/src/client/activation/platformData.ts +++ b/src/client/activation/platformData.ts @@ -1,27 +1,54 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -import { IPlatformService } from '../common/platform/types'; +import { IFileSystem, IPlatformService } from '../common/platform/types'; import { - analysis_engine_linux_x64_sha512, + analysis_engine_centos_x64_sha512, + analysis_engine_debian_x64_sha512, + analysis_engine_fedora_x64_sha512, + analysis_engine_ol_x64_sha512, + analysis_engine_opensuse_x64_sha512, analysis_engine_osx_x64_sha512, + analysis_engine_rhel_x64_sha512, + analysis_engine_ubuntu_x64_sha512, analysis_engine_win_x64_sha512, analysis_engine_win_x86_sha512 } from './analysisEngineHashes'; +// '/etc/os-release', ID=flavor +const supportedLinuxFlavors = [ + 'centos', + 'debian', + 'fedora', + 'ol', + 'opensuse', + 'rhel', + 'ubuntu' +]; + export class PlatformData { - constructor(private platform: IPlatformService) { } - public getPlatformDesignator(): string { + constructor(private platform: IPlatformService, private fs: IFileSystem) { } + public async getPlatformName(): Promise { if (this.platform.isWindows) { return this.platform.is64bit ? 'win-x64' : 'win-x86'; } if (this.platform.isMac) { return 'osx-x64'; } - if (this.platform.isLinux && this.platform.is64bit) { - return 'linux-x64'; + if (this.platform.isLinux) { + if (!this.platform.is64bit) { + throw new Error('Python Analysis Engine does not support 32-bit Linux.'); + } + const linuxFlavor = await this.getLinuxFlavor(); + if (linuxFlavor.length === 0) { + throw new Error('Unable to determine Linux flavor from /etc/os-release.'); + } + if (supportedLinuxFlavors.indexOf(linuxFlavor) < 0) { + throw new Error(`${linuxFlavor} is not supported.`); + } + return `${linuxFlavor}-x64`; } - throw new Error('Python Analysis Engine does not support 32-bit Linux.'); + throw new Error('Unknown OS platform.'); } public getEngineDllName(): string { @@ -34,7 +61,7 @@ export class PlatformData { : 'Microsoft.PythonTools.VsCode'; } - public getExpectedHash(): string { + public async getExpectedHash(): Promise { if (this.platform.isWindows) { return this.platform.is64bit ? analysis_engine_win_x64_sha512 : analysis_engine_win_x86_sha512; } @@ -42,8 +69,30 @@ export class PlatformData { return analysis_engine_osx_x64_sha512; } if (this.platform.isLinux && this.platform.is64bit) { - return analysis_engine_linux_x64_sha512; + const linuxFlavor = await this.getLinuxFlavor(); + // tslint:disable-next-line:switch-default + switch (linuxFlavor) { + case 'centos': return analysis_engine_centos_x64_sha512; + case 'debian': return analysis_engine_debian_x64_sha512; + case 'fedora': return analysis_engine_fedora_x64_sha512; + case 'ol': return analysis_engine_ol_x64_sha512; + case 'opensuse': return analysis_engine_opensuse_x64_sha512; + case 'rhel': return analysis_engine_rhel_x64_sha512; + case 'ubuntu': return analysis_engine_ubuntu_x64_sha512; + } } throw new Error('Unknown platform.'); } + + private async getLinuxFlavor(): Promise { + const verFile = '/etc/os-release'; + const data = await this.fs.readFile(verFile); + if (data) { + const res = /ID=(.*)/.exec(data); + if (res && res.length > 1) { + return res[1]; + } + } + return ''; + } } diff --git a/src/test/activation/platformData.test.ts b/src/test/activation/platformData.test.ts new file mode 100644 index 000000000000..4270f4c6a3fc --- /dev/null +++ b/src/test/activation/platformData.test.ts @@ -0,0 +1,84 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// tslint:disable:no-unused-variable +import * as assert from 'assert'; +import * as TypeMoq from 'typemoq'; +import { PlatformData } from '../../client/activation/platformData'; +import { IFileSystem, IPlatformService } from '../../client/common/platform/types'; +import { initialize } from '../initialize'; + +const testDataWinMac = [ + { isWindows: true, is64Bit: true, expectedName: 'win-x64' }, + { isWindows: true, is64Bit: false, expectedName: 'win-x86' }, + { isWindows: false, is64Bit: true, expectedName: 'osx-x64' } +]; + +const testDataLinux = [ + { name: 'centos', expectedName: 'centos-x64' }, + { name: 'debian', expectedName: 'debian-x64' }, + { name: 'fedora', expectedName: 'fedora-x64' }, + { name: 'ol', expectedName: 'ol-x64' }, + { name: 'opensuse', expectedName: 'opensuse-x64' }, + { name: 'rhel', expectedName: 'rhel-x64' }, + { name: 'ubuntu', expectedName: 'ubuntu-x64' } +]; + +const testDataModuleName = [ + { isWindows: true, expectedName: 'Microsoft.PythonTools.VsCode.exe' }, + { isWindows: false, expectedName: 'Microsoft.PythonTools.VsCode' } +]; + +// tslint:disable-next-line:max-func-body-length +suite('Activation - platform data', () => { + suiteSetup(initialize); + + test('Name and hash (Windows/Mac)', async () => { + for (const t of testDataWinMac) { + const platformService = TypeMoq.Mock.ofType(); + platformService.setup(x => x.isWindows).returns(() => t.isWindows); + platformService.setup(x => x.isMac).returns(() => !t.isWindows); + platformService.setup(x => x.is64bit).returns(() => t.is64Bit); + + const fs = TypeMoq.Mock.ofType(); + const pd = new PlatformData(platformService.object, fs.object); + + let actual = await pd.getPlatformName(); + assert.equal(actual, t.expectedName, `${actual} does not match ${t.expectedName}`); + + actual = await pd.getExpectedHash(); + assert.equal(actual, t.expectedName, `${actual} hash not match ${t.expectedName}`); + } + }); + test('Name and hash (Linux)', async () => { + for (const t of testDataLinux) { + const platformService = TypeMoq.Mock.ofType(); + platformService.setup(x => x.isWindows).returns(() => false); + platformService.setup(x => x.isMac).returns(() => false); + platformService.setup(x => x.isLinux).returns(() => true); + platformService.setup(x => x.is64bit).returns(() => true); + + const fs = TypeMoq.Mock.ofType(); + fs.setup(x => x.readFile(TypeMoq.It.isAnyString())).returns(() => Promise.resolve(`NAME="name"\nID=${t.name}\nID_LIKE=debian`)); + const pd = new PlatformData(platformService.object, fs.object); + + let actual = await pd.getPlatformName(); + assert.equal(actual, t.expectedName, `${actual} does not match ${t.expectedName}`); + + actual = await pd.getExpectedHash(); + assert.equal(actual, t.expectedName, `${actual} hash not match ${t.expectedName}`); + } + }); + test('Module name', async () => { + for (const t of testDataModuleName) { + const platformService = TypeMoq.Mock.ofType(); + platformService.setup(x => x.isWindows).returns(() => t.isWindows); + + const fs = TypeMoq.Mock.ofType(); + const pd = new PlatformData(platformService.object, fs.object); + + const actual = pd.getEngineExecutableName(); + assert.equal(actual, t.expectedName, `${actual} does not match ${t.expectedName}`); + } + }); +}); From 28ca25f628c1c9c25a89795bc39d667b3d1e5c55 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 19 Apr 2018 16:53:18 -0700 Subject: [PATCH 42/83] Grammar check --- src/client/formatters/lineFormatter.ts | 31 +- src/client/language/tokenizer.ts | 97 +- .../format/extension.lineFormatter.test.ts | 32 + src/test/language/tokenizer.test.ts | 96 +- .../pythonFiles/formatting/pythonGrammar.py | 1572 +++++++++++++++++ 5 files changed, 1759 insertions(+), 69 deletions(-) create mode 100644 src/test/pythonFiles/formatting/pythonGrammar.py diff --git a/src/client/formatters/lineFormatter.ts b/src/client/formatters/lineFormatter.ts index 046533952464..4b2817667940 100644 --- a/src/client/formatters/lineFormatter.ts +++ b/src/client/formatters/lineFormatter.ts @@ -6,7 +6,7 @@ import Char from 'typescript-char'; import { BraceCounter } from '../language/braceCounter'; import { TextBuilder } from '../language/textBuilder'; import { TextRangeCollection } from '../language/textRangeCollection'; -import { Tokenizer } from '../language/tokenizer'; +import { isPythonKeyword, Tokenizer } from '../language/tokenizer'; import { ITextRangeCollection, IToken, TokenType } from '../language/types'; export class LineFormatter { @@ -52,7 +52,12 @@ export class LineFormatter { if (prev && !this.isOpenBraceType(prev.type) && prev.type !== TokenType.Colon && prev.type !== TokenType.Operator) { this.builder.softAppendSpace(); } - this.builder.append(this.text.substring(t.start, t.end)); + const id = this.text.substring(t.start, t.end); + this.builder.append(id); + if (isPythonKeyword(id) && next && this.isOpenBraceType(next.type)) { + // for x in () + this.builder.softAppendSpace(); + } break; case TokenType.Colon: @@ -150,11 +155,27 @@ export class LineFormatter { } } - // In general, keep tokens separated. - this.builder.softAppendSpace(); - this.builder.append(this.text.substring(t.start, t.end)); + if (t.type === TokenType.Unknown) { + this.handleUnknown(t); + } else { + // In general, keep tokens separated. + this.builder.softAppendSpace(); + this.builder.append(this.text.substring(t.start, t.end)); + } } + private handleUnknown(t: IToken): void { + const prevChar = t.start > 0 ? this.text.charCodeAt(t.start - 1) : 0; + if (prevChar === Char.Space || prevChar === Char.Tab) { + this.builder.softAppendSpace(); + } + this.builder.append(this.text.substring(t.start, t.end)); + + const nextChar = t.end < this.text.length - 1 ? this.text.charCodeAt(t.end) : 0; + if (nextChar === Char.Space || nextChar === Char.Tab) { + this.builder.softAppendSpace(); + } + } private isEqualsInsideArguments(index: number): boolean { if (index < 1) { return false; diff --git a/src/client/language/tokenizer.ts b/src/client/language/tokenizer.ts index e1c8c4b03d9e..9d229920f515 100644 --- a/src/client/language/tokenizer.ts +++ b/src/client/language/tokenizer.ts @@ -26,14 +26,19 @@ class Token extends TextRange implements IToken { } } +const pythonKeywords = [ + 'and', 'assert', 'break', 'class', 'continue', 'def', 'del', + 'elif', 'else', 'except', 'exec', 'False', 'finally', 'for', 'from', + 'global', 'if', 'import', 'in', 'is', 'lambda', 'None', 'nonlocal', + 'not', 'or', 'pass', 'print', 'raise', 'return', 'True', 'try', + 'while', 'with', 'yield' +]; + +export function isPythonKeyword(s: string): boolean { + return pythonKeywords.find((value, index) => value === s) ? true : false; +} + export class Tokenizer implements ITokenizer { - // private keywords = [ - // 'and', 'assert', 'break', 'class', 'continue', 'def', 'del', - // 'elif', 'else', 'except', 'exec', 'False', 'finally', 'for', 'from', - // 'global', 'if', 'import', 'in', 'is', 'lambda', 'None', 'nonlocal', - // 'not', 'or', 'pass', 'print', 'raise', 'return', 'True', 'try', - // 'while', 'with', 'yield' - // ]; private cs: ICharacterStream = new CharacterStream(''); private tokens: IToken[] = []; private floatRegex = /[-+]?(?:(?:\d*\.\d+)|(?:\d+\.?))(?:[Ee][+-]?\d+)?/; @@ -134,7 +139,6 @@ export class Tokenizer implements ITokenizer { this.tokens.push(new Token(TokenType.Colon, this.cs.position, 1)); break; case Char.At: - case Char.Period: this.tokens.push(new Token(TokenType.Operator, this.cs.position, 1)); break; default: @@ -143,6 +147,10 @@ export class Tokenizer implements ITokenizer { return true; } } + if (this.cs.currentChar === Char.Period) { + this.tokens.push(new Token(TokenType.Operator, this.cs.position, 1)); + break; + } if (!this.tryIdentifier()) { if (!this.tryOperator()) { this.handleUnknown(); @@ -170,29 +178,8 @@ export class Tokenizer implements ITokenizer { return false; } + // tslint:disable-next-line:cyclomatic-complexity private isPossibleNumber(): boolean { - if (this.cs.currentChar === Char.Hyphen || this.cs.currentChar === Char.Plus) { - // Next character must be decimal or a dot otherwise - // it is not a number. No whitespace is allowed. - if (isDecimal(this.cs.nextChar) || this.cs.nextChar === Char.Period) { - // Check what previous token is, if any - if (this.tokens.length === 0) { - // At the start of the file this can only be a number - return true; - } - - const prev = this.tokens[this.tokens.length - 1]; - if (prev.type === TokenType.OpenBrace - || prev.type === TokenType.OpenBracket - || prev.type === TokenType.Comma - || prev.type === TokenType.Semicolon - || prev.type === TokenType.Operator) { - return true; - } - } - return false; - } - if (isDecimal(this.cs.currentChar)) { return true; } @@ -201,12 +188,51 @@ export class Tokenizer implements ITokenizer { return true; } + const next = (this.cs.currentChar === Char.Hyphen || this.cs.currentChar === Char.Plus) ? 1 : 0; + // Next character must be decimal or a dot otherwise + // it is not a number. No whitespace is allowed. + if (isDecimal(this.cs.lookAhead(next)) || this.cs.lookAhead(next) === Char.Period) { + // Check what previous token is, if any + if (this.tokens.length === 0) { + // At the start of the file this can only be a number + return true; + } + + const prev = this.tokens[this.tokens.length - 1]; + if (prev.type === TokenType.OpenBrace + || prev.type === TokenType.OpenBracket + || prev.type === TokenType.Comma + || prev.type === TokenType.Semicolon + || prev.type === TokenType.Operator) { + return true; + } + } + + if (this.cs.lookAhead(next) === Char._0) { + const nextNext = this.cs.lookAhead(next + 1); + if (nextNext === Char.x || nextNext === Char.X) { + return true; + } + if (nextNext === Char.b || nextNext === Char.B) { + return true; + } + if (nextNext === Char.o || nextNext === Char.O) { + return true; + } + } + return false; } // tslint:disable-next-line:cyclomatic-complexity private tryNumber(): boolean { const start = this.cs.position; + let leadingSign = 0; + + if (this.cs.currentChar === Char.Hyphen || this.cs.currentChar === Char.Plus) { + this.cs.moveNext(); // Skip leading +/- + leadingSign = 1; + } if (this.cs.currentChar === Char._0) { let radix = 0; @@ -234,20 +260,19 @@ export class Tokenizer implements ITokenizer { } radix = 8; } - const text = this.cs.getText().substr(start, this.cs.position - start); + const text = this.cs.getText().substr(start + leadingSign, this.cs.position - start - leadingSign); if (radix > 0 && parseInt(text.substr(2), radix)) { - this.tokens.push(new Token(TokenType.Number, start, text.length)); + this.tokens.push(new Token(TokenType.Number, start, text.length + leadingSign)); return true; } } - if (isDecimal(this.cs.currentChar) || - this.cs.currentChar === Char.Plus || this.cs.currentChar === Char.Hyphen || this.cs.currentChar === Char.Period) { + if (isDecimal(this.cs.currentChar) || this.cs.currentChar === Char.Period) { const candidate = this.cs.getText().substr(this.cs.position); const re = this.floatRegex.exec(candidate); if (re && re.length > 0 && re[0] && candidate.startsWith(re[0])) { - this.tokens.push(new Token(TokenType.Number, start, re[0].length)); - this.cs.position = start + re[0].length; + this.tokens.push(new Token(TokenType.Number, start, re[0].length + leadingSign)); + this.cs.position = start + re[0].length + leadingSign; return true; } } diff --git a/src/test/format/extension.lineFormatter.test.ts b/src/test/format/extension.lineFormatter.test.ts index 3325c19382a2..2c94c69af2b9 100644 --- a/src/test/format/extension.lineFormatter.test.ts +++ b/src/test/format/extension.lineFormatter.test.ts @@ -3,9 +3,16 @@ // Licensed under the MIT License. import * as assert from 'assert'; +import * as fs from 'fs'; +import * as path from 'path'; +import '../../client/common/extensions'; import { LineFormatter } from '../../client/formatters/lineFormatter'; +const formatFilesPath = path.join(__dirname, '..', '..', '..', 'src', 'test', 'pythonFiles', 'formatting'); +const grammarFile = path.join(formatFilesPath, 'pythonGrammar.py'); + // https://www.python.org/dev/peps/pep-0008/#code-lay-out +// tslint:disable-next-line:max-func-body-length suite('Formatting - line formatter', () => { const formatter = new LineFormatter(); @@ -85,4 +92,29 @@ suite('Formatting - line formatter', () => { const actual = formatter.formatLine('foo( *a, ** b, ! c)'); assert.equal(actual, 'foo(*a, **b, !c)'); }); + test('Brace after keyword', () => { + const actual = formatter.formatLine('for x in(1,2,3)'); + assert.equal(actual, 'for x in (1, 2, 3)'); + }); + test('Dot operator', () => { + const actual = formatter.formatLine('x.y'); + assert.equal(actual, 'x.y'); + }); + test('Unknown tokens no space', () => { + const actual = formatter.formatLine('abc\\n\\'); + assert.equal(actual, 'abc\\n\\'); + }); + test('Unknown tokens with space', () => { + const actual = formatter.formatLine('abc \\n \\'); + assert.equal(actual, 'abc \\n \\'); + }); + test('Grammar file', () => { + const content = fs.readFileSync(grammarFile).toString('utf8'); + const lines = content.splitLines({ trim: false, removeEmptyEntries: false }); + for (let i = 0; i < lines.length; i += 1) { + const line = lines[i]; + const actual = formatter.formatLine(line); + assert.equal(actual, line, `Line ${i + 1} changed: '${line}' to '${actual}'`); + } + }); }); diff --git a/src/test/language/tokenizer.test.ts b/src/test/language/tokenizer.test.ts index 202f0c774297..923504041695 100644 --- a/src/test/language/tokenizer.test.ts +++ b/src/test/language/tokenizer.test.ts @@ -9,14 +9,14 @@ import { TokenType } from '../../client/language/types'; // tslint:disable-next-line:max-func-body-length suite('Language.Tokenizer', () => { - test('Empty', async () => { + test('Empty', () => { const t = new Tokenizer(); const tokens = t.tokenize(''); assert.equal(tokens instanceof TextRangeCollection, true); assert.equal(tokens.count, 0); assert.equal(tokens.length, 0); }); - test('Strings: unclosed', async () => { + test('Strings: unclosed', () => { const t = new Tokenizer(); const tokens = t.tokenize(' "string" """line1\n#line2"""\t\'un#closed'); assert.equal(tokens.count, 3); @@ -28,7 +28,7 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(i).type, TokenType.String); } }); - test('Strings: block next to regular, double-quoted', async () => { + test('Strings: block next to regular, double-quoted', () => { const t = new Tokenizer(); const tokens = t.tokenize('"string""""s2"""'); assert.equal(tokens.count, 2); @@ -40,7 +40,7 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(i).type, TokenType.String); } }); - test('Strings: block next to block, double-quoted', async () => { + test('Strings: block next to block, double-quoted', () => { const t = new Tokenizer(); const tokens = t.tokenize('""""""""'); assert.equal(tokens.count, 2); @@ -52,7 +52,7 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(i).type, TokenType.String); } }); - test('Strings: unclosed sequence of quotes', async () => { + test('Strings: unclosed sequence of quotes', () => { const t = new Tokenizer(); const tokens = t.tokenize('"""""'); assert.equal(tokens.count, 1); @@ -64,7 +64,7 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(i).type, TokenType.String); } }); - test('Strings: single quote escape', async () => { + test('Strings: single quote escape', () => { const t = new Tokenizer(); // tslint:disable-next-line:quotemark const tokens = t.tokenize("'\\'quoted\\''"); @@ -72,14 +72,14 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(0).type, TokenType.String); assert.equal(tokens.getItemAt(0).length, 12); }); - test('Strings: double quote escape', async () => { + test('Strings: double quote escape', () => { const t = new Tokenizer(); const tokens = t.tokenize('"\\"quoted\\""'); assert.equal(tokens.count, 1); assert.equal(tokens.getItemAt(0).type, TokenType.String); assert.equal(tokens.getItemAt(0).length, 12); }); - test('Strings: single quoted f-string ', async () => { + test('Strings: single quoted f-string ', () => { const t = new Tokenizer(); // tslint:disable-next-line:quotemark const tokens = t.tokenize("a+f'quoted'"); @@ -89,7 +89,7 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(2).type, TokenType.String); assert.equal(tokens.getItemAt(2).length, 9); }); - test('Strings: double quoted f-string ', async () => { + test('Strings: double quoted f-string ', () => { const t = new Tokenizer(); const tokens = t.tokenize('x(1,f"quoted")'); assert.equal(tokens.count, 6); @@ -101,7 +101,7 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(4).length, 9); assert.equal(tokens.getItemAt(5).type, TokenType.CloseBrace); }); - test('Strings: single quoted multiline f-string ', async () => { + test('Strings: single quoted multiline f-string ', () => { const t = new Tokenizer(); // tslint:disable-next-line:quotemark const tokens = t.tokenize("f'''quoted'''"); @@ -109,14 +109,14 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(0).type, TokenType.String); assert.equal(tokens.getItemAt(0).length, 13); }); - test('Strings: double quoted multiline f-string ', async () => { + test('Strings: double quoted multiline f-string ', () => { const t = new Tokenizer(); const tokens = t.tokenize('f"""quoted """'); assert.equal(tokens.count, 1); assert.equal(tokens.getItemAt(0).type, TokenType.String); assert.equal(tokens.getItemAt(0).length, 14); }); - test('Strings: escape at the end of single quoted string ', async () => { + test('Strings: escape at the end of single quoted string ', () => { const t = new Tokenizer(); // tslint:disable-next-line:quotemark const tokens = t.tokenize("'quoted\\'\nx"); @@ -125,7 +125,7 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(0).length, 9); assert.equal(tokens.getItemAt(1).type, TokenType.Identifier); }); - test('Strings: escape at the end of double quoted string ', async () => { + test('Strings: escape at the end of double quoted string ', () => { const t = new Tokenizer(); const tokens = t.tokenize('"quoted\\"\nx'); assert.equal(tokens.count, 2); @@ -133,7 +133,7 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(0).length, 9); assert.equal(tokens.getItemAt(1).type, TokenType.Identifier); }); - test('Comments', async () => { + test('Comments', () => { const t = new Tokenizer(); const tokens = t.tokenize(' #co"""mment1\n\t\n#comm\'ent2 '); assert.equal(tokens.count, 2); @@ -145,7 +145,7 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(i).type, TokenType.Comment); } }); - test('Period to operator token', async () => { + test('Period to operator token', () => { const t = new Tokenizer(); const tokens = t.tokenize('x.y'); assert.equal(tokens.count, 3); @@ -154,7 +154,7 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(1).type, TokenType.Operator); assert.equal(tokens.getItemAt(2).type, TokenType.Identifier); }); - test('@ to operator token', async () => { + test('@ to operator token', () => { const t = new Tokenizer(); const tokens = t.tokenize('@x'); assert.equal(tokens.count, 2); @@ -162,14 +162,14 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(0).type, TokenType.Operator); assert.equal(tokens.getItemAt(1).type, TokenType.Identifier); }); - test('Unknown token', async () => { + test('Unknown token', () => { const t = new Tokenizer(); const tokens = t.tokenize('~$'); assert.equal(tokens.count, 1); assert.equal(tokens.getItemAt(0).type, TokenType.Unknown); }); - test('Hex number', async () => { + test('Hex number', () => { const t = new Tokenizer(); const tokens = t.tokenize('1 0X2 0x3 0x'); assert.equal(tokens.count, 4); @@ -186,7 +186,7 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(3).type, TokenType.Unknown); assert.equal(tokens.getItemAt(3).length, 2); }); - test('Binary number', async () => { + test('Binary number', () => { const t = new Tokenizer(); const tokens = t.tokenize('1 0B1 0b010 0b3 0b'); assert.equal(tokens.count, 6); @@ -209,10 +209,10 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(5).type, TokenType.Unknown); assert.equal(tokens.getItemAt(5).length, 2); }); - test('Octal number', async () => { + test('Octal number', () => { const t = new Tokenizer(); - const tokens = t.tokenize('1 0o4 0o077 0o9 0oO'); - assert.equal(tokens.count, 6); + const tokens = t.tokenize('1 0o4 0o077 -0o200 0o9 0oO'); + assert.equal(tokens.count, 7); assert.equal(tokens.getItemAt(0).type, TokenType.Number); assert.equal(tokens.getItemAt(0).length, 1); @@ -224,15 +224,55 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(2).length, 5); assert.equal(tokens.getItemAt(3).type, TokenType.Number); - assert.equal(tokens.getItemAt(3).length, 1); + assert.equal(tokens.getItemAt(3).length, 6); - assert.equal(tokens.getItemAt(4).type, TokenType.Identifier); - assert.equal(tokens.getItemAt(4).length, 2); + assert.equal(tokens.getItemAt(4).type, TokenType.Number); + assert.equal(tokens.getItemAt(4).length, 1); - assert.equal(tokens.getItemAt(5).type, TokenType.Unknown); - assert.equal(tokens.getItemAt(5).length, 3); + assert.equal(tokens.getItemAt(5).type, TokenType.Identifier); + assert.equal(tokens.getItemAt(5).length, 2); + + assert.equal(tokens.getItemAt(6).type, TokenType.Unknown); + assert.equal(tokens.getItemAt(6).length, 3); + }); + test('Decimal number', () => { + const t = new Tokenizer(); + const tokens = t.tokenize('-2147483647 ++2147483647'); + assert.equal(tokens.count, 3); + + assert.equal(tokens.getItemAt(0).type, TokenType.Number); + assert.equal(tokens.getItemAt(0).length, 11); + + assert.equal(tokens.getItemAt(1).type, TokenType.Operator); + assert.equal(tokens.getItemAt(1).length, 1); + + assert.equal(tokens.getItemAt(2).type, TokenType.Number); + assert.equal(tokens.getItemAt(2).length, 11); + }); + test('Floating point number', () => { + const t = new Tokenizer(); + const tokens = t.tokenize('3.0 .2 ++.3e+12 --.4e1'); + assert.equal(tokens.count, 6); + + assert.equal(tokens.getItemAt(0).type, TokenType.Number); + assert.equal(tokens.getItemAt(0).length, 3); + + assert.equal(tokens.getItemAt(1).type, TokenType.Number); + assert.equal(tokens.getItemAt(1).length, 2); + + assert.equal(tokens.getItemAt(2).type, TokenType.Operator); + assert.equal(tokens.getItemAt(2).length, 1); + + assert.equal(tokens.getItemAt(3).type, TokenType.Number); + assert.equal(tokens.getItemAt(3).length, 7); + + assert.equal(tokens.getItemAt(4).type, TokenType.Operator); + assert.equal(tokens.getItemAt(4).length, 1); + + assert.equal(tokens.getItemAt(5).type, TokenType.Number); + assert.equal(tokens.getItemAt(5).length, 5); }); - test('Operators', async () => { + test('Operators', () => { const text = '< <> << <<= ' + '== != > >> >>= >= <=' + '+ -' + diff --git a/src/test/pythonFiles/formatting/pythonGrammar.py b/src/test/pythonFiles/formatting/pythonGrammar.py new file mode 100644 index 000000000000..5224695aebe5 --- /dev/null +++ b/src/test/pythonFiles/formatting/pythonGrammar.py @@ -0,0 +1,1572 @@ +# Python test set -- part 1, grammar. +# This just tests whether the parser accepts them all. + +from test.support import check_syntax_error +import inspect +import unittest +import sys +# testing import * +from sys import * + +# different import patterns to check that __annotations__ does not interfere +# with import machinery +import test.ann_module as ann_module +import typing +from collections import ChainMap +from test import ann_module2 +import test + +# These are shared with test_tokenize and other test modules. +# +# Note: since several test cases filter out floats by looking for "e" and ".", +# don't add hexadecimal literals that contain "e" or "E". +VALID_UNDERSCORE_LITERALS = [ + '0_0_0', + '4_2', + '1_0000_0000', + '0b1001_0100', + '0xffff_ffff', + '0o5_7_7', + '1_00_00.5', + '1_00_00.5e5', + '1_00_00e5_1', + '1e1_0', + '.1_4', + '.1_4e1', + '0b_0', + '0x_f', + '0o_5', + '1_00_00j', + '1_00_00.5j', + '1_00_00e5_1j', + '.1_4j', + '(1_2.5+3_3j)', + '(.5_6j)', +] +INVALID_UNDERSCORE_LITERALS = [ + # Trailing underscores: + '0_', + '42_', + '1.4j_', + '0x_', + '0b1_', + '0xf_', + '0o5_', + '0 if 1_Else 1', + # Underscores in the base selector: + '0_b0', + '0_xf', + '0_o5', + # Old-style octal, still disallowed: + '0_7', + '09_99', + # Multiple consecutive underscores: + '4_______2', + '0.1__4', + '0.1__4j', + '0b1001__0100', + '0xffff__ffff', + '0x___', + '0o5__77', + '1e1__0', + '1e1__0j', + # Underscore right before a dot: + '1_.4', + '1_.4j', + # Underscore right after a dot: + '1._4', + '1._4j', + '._5', + '._5j', + # Underscore right after a sign: + '1.0e+_1', + '1.0e+_1j', + # Underscore right before j: + '1.4_j', + '1.4e5_j', + # Underscore right before e: + '1_e1', + '1.4_e1', + '1.4_e1j', + # Underscore right after e: + '1e_1', + '1.4e_1', + '1.4e_1j', + # Complex cases with parens: + '(1+1.5_j_)', + '(1+1.5_j)', +] + + +class TokenTests(unittest.TestCase): + + def test_backslash(self): + # Backslash means line continuation: + x = 1 \ + + 1 + self.assertEqual(x, 2, 'backslash for line continuation') + + # Backslash does not means continuation in comments :\ + x = 0 + self.assertEqual(x, 0, 'backslash ending comment') + + def test_plain_integers(self): + self.assertEqual(type(000), type(0)) + self.assertEqual(0xff, 255) + self.assertEqual(0o377, 255) + self.assertEqual(2147483647, 0o17777777777) + self.assertEqual(0b1001, 9) + # "0x" is not a valid literal + self.assertRaises(SyntaxError, eval, "0x") + from sys import maxsize + if maxsize == 2147483647: + self.assertEqual(-2147483647 - 1, -0o20000000000) + # XXX -2147483648 + self.assertTrue(0o37777777777 > 0) + self.assertTrue(0xffffffff > 0) + self.assertTrue(0b1111111111111111111111111111111 > 0) + for s in ('2147483648', '0o40000000000', '0x100000000', + '0b10000000000000000000000000000000'): + try: + x = eval(s) + except OverflowError: + self.fail("OverflowError on huge integer literal %r" % s) + elif maxsize == 9223372036854775807: + self.assertEqual(-9223372036854775807 - 1, -0o1000000000000000000000) + self.assertTrue(0o1777777777777777777777 > 0) + self.assertTrue(0xffffffffffffffff > 0) + self.assertTrue(0b11111111111111111111111111111111111111111111111111111111111111 > 0) + for s in '9223372036854775808', '0o2000000000000000000000', \ + '0x10000000000000000', \ + '0b100000000000000000000000000000000000000000000000000000000000000': + try: + x = eval(s) + except OverflowError: + self.fail("OverflowError on huge integer literal %r" % s) + else: + self.fail('Weird maxsize value %r' % maxsize) + + def test_long_integers(self): + x = 0 + x = 0xffffffffffffffff + x = 0Xffffffffffffffff + x = 0o77777777777777777 + x = 0O77777777777777777 + x = 123456789012345678901234567890 + x = 0b100000000000000000000000000000000000000000000000000000000000000000000 + x = 0B111111111111111111111111111111111111111111111111111111111111111111111 + + def test_floats(self): + x = 3.14 + x = 314. + x = 0.314 + # XXX x = 000.314 + x = .314 + x = 3e14 + x = 3E14 + x = 3e-14 + x = 3e+14 + x = 3.e14 + x = .3e14 + x = 3.1e4 + + def test_float_exponent_tokenization(self): + # See issue 21642. + self.assertEqual(1 if 1 else 0, 1) + self.assertEqual(1 if 0 else 0, 0) + self.assertRaises(SyntaxError, eval, "0 if 1Else 0") + + def test_underscore_literals(self): + for lit in VALID_UNDERSCORE_LITERALS: + self.assertEqual(eval(lit), eval(lit.replace('_', ''))) + for lit in INVALID_UNDERSCORE_LITERALS: + self.assertRaises(SyntaxError, eval, lit) + # Sanity check: no literal begins with an underscore + self.assertRaises(NameError, eval, "_0") + + def test_string_literals(self): + x = ''; y = ""; self.assertTrue(len(x) == 0 and x == y) + x = '\''; y = "'"; self.assertTrue(len(x) == 1 and x == y and ord(x) == 39) + x = '"'; y = "\""; self.assertTrue(len(x) == 1 and x == y and ord(x) == 34) + x = "doesn't \"shrink\" does it" + y = 'doesn\'t "shrink" does it' + self.assertTrue(len(x) == 24 and x == y) + x = "does \"shrink\" doesn't it" + y = 'does "shrink" doesn\'t it' + self.assertTrue(len(x) == 24 and x == y) + x = """ +The "quick" +brown fox +jumps over +the 'lazy' dog. +""" + y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n' + self.assertEqual(x, y) + y = ''' +The "quick" +brown fox +jumps over +the 'lazy' dog. +''' + self.assertEqual(x, y) + y = "\n\ +The \"quick\"\n\ +brown fox\n\ +jumps over\n\ +the 'lazy' dog.\n\ +" + self.assertEqual(x, y) + y = '\n\ +The \"quick\"\n\ +brown fox\n\ +jumps over\n\ +the \'lazy\' dog.\n\ +' + self.assertEqual(x, y) + + def test_ellipsis(self): + x = ... + self.assertTrue(x is Ellipsis) + self.assertRaises(SyntaxError, eval, ".. .") + + def test_eof_error(self): + samples = ("def foo(", "\ndef foo(", "def foo(\n") + for s in samples: + with self.assertRaises(SyntaxError) as cm: + compile(s, "", "exec") + self.assertIn("unexpected EOF", str(cm.exception)) + +var_annot_global: int # a global annotated is necessary for test_var_annot + +# custom namespace for testing __annotations__ + +class CNS: + def __init__(self): + self._dct = {} + def __setitem__(self, item, value): + self._dct[item.lower()] = value + def __getitem__(self, item): + return self._dct[item] + + +class GrammarTests(unittest.TestCase): + + check_syntax_error = check_syntax_error + + # single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE + # XXX can't test in a script -- this rule is only used when interactive + + # file_input: (NEWLINE | stmt)* ENDMARKER + # Being tested as this very moment this very module + + # expr_input: testlist NEWLINE + # XXX Hard to test -- used only in calls to input() + + def test_eval_input(self): + # testlist ENDMARKER + x = eval('1, 0 or 1') + + def test_var_annot_basics(self): + # all these should be allowed + var1: int = 5 + var2: [int, str] + my_lst = [42] + def one(): + return 1 + int.new_attr: int + [list][0]: type + my_lst[one() - 1]: int = 5 + self.assertEqual(my_lst, [5]) + + def test_var_annot_syntax_errors(self): + # parser pass + check_syntax_error(self, "def f: int") + check_syntax_error(self, "x: int: str") + check_syntax_error(self, "def f():\n" + " nonlocal x: int\n") + # AST pass + check_syntax_error(self, "[x, 0]: int\n") + check_syntax_error(self, "f(): int\n") + check_syntax_error(self, "(x,): int") + check_syntax_error(self, "def f():\n" + " (x, y): int = (1, 2)\n") + # symtable pass + check_syntax_error(self, "def f():\n" + " x: int\n" + " global x\n") + check_syntax_error(self, "def f():\n" + " global x\n" + " x: int\n") + + def test_var_annot_basic_semantics(self): + # execution order + with self.assertRaises(ZeroDivisionError): + no_name[does_not_exist]: no_name_again = 1 / 0 + with self.assertRaises(NameError): + no_name[does_not_exist]: 1 / 0 = 0 + global var_annot_global + + # function semantics + def f(): + st: str = "Hello" + a.b: int = (1, 2) + return st + self.assertEqual(f.__annotations__, {}) + def f_OK(): + x: 1 / 0 + f_OK() + def fbad(): + x: int + print(x) + with self.assertRaises(UnboundLocalError): + fbad() + def f2bad(): + (no_such_global): int + print(no_such_global) + try: + f2bad() + except Exception as e: + self.assertIs(type(e), NameError) + + # class semantics + class C: + __foo: int + s: str = "attr" + z = 2 + def __init__(self, x): + self.x: int = x + self.assertEqual(C.__annotations__, {'_C__foo': int, 's': str}) + with self.assertRaises(NameError): + class CBad: + no_such_name_defined.attr: int = 0 + with self.assertRaises(NameError): + class Cbad2(C): + x: int + x.y: list = [] + + def test_var_annot_metaclass_semantics(self): + class CMeta(type): + @classmethod + def __prepare__(metacls, name, bases, **kwds): + return {'__annotations__': CNS()} + class CC(metaclass=CMeta): + XX: 'ANNOT' + self.assertEqual(CC.__annotations__['xx'], 'ANNOT') + + def test_var_annot_module_semantics(self): + with self.assertRaises(AttributeError): + print(test.__annotations__) + self.assertEqual(ann_module.__annotations__, + {1: 2, 'x': int, 'y': str, 'f': typing.Tuple[int, int]}) + self.assertEqual(ann_module.M.__annotations__, + {'123': 123, 'o': type}) + self.assertEqual(ann_module2.__annotations__, {}) + + def test_var_annot_in_module(self): + # check that functions fail the same way when executed + # outside of module where they were defined + from test.ann_module3 import f_bad_ann, g_bad_ann, D_bad_ann + with self.assertRaises(NameError): + f_bad_ann() + with self.assertRaises(NameError): + g_bad_ann() + with self.assertRaises(NameError): + D_bad_ann(5) + + def test_var_annot_simple_exec(self): + gns = {}; lns = {} + exec("'docstring'\n" + "__annotations__[1] = 2\n" + "x: int = 5\n", gns, lns) + self.assertEqual(lns["__annotations__"], {1: 2, 'x': int}) + with self.assertRaises(KeyError): + gns['__annotations__'] + + def test_var_annot_custom_maps(self): + # tests with custom locals() and __annotations__ + ns = {'__annotations__': CNS()} + exec('X: int; Z: str = "Z"; (w): complex = 1j', ns) + self.assertEqual(ns['__annotations__']['x'], int) + self.assertEqual(ns['__annotations__']['z'], str) + with self.assertRaises(KeyError): + ns['__annotations__']['w'] + nonloc_ns = {} + class CNS2: + def __init__(self): + self._dct = {} + def __setitem__(self, item, value): + nonlocal nonloc_ns + self._dct[item] = value + nonloc_ns[item] = value + def __getitem__(self, item): + return self._dct[item] + exec('x: int = 1', {}, CNS2()) + self.assertEqual(nonloc_ns['__annotations__']['x'], int) + + def test_var_annot_refleak(self): + # complex case: custom locals plus custom __annotations__ + # this was causing refleak + cns = CNS() + nonloc_ns = {'__annotations__': cns} + class CNS2: + def __init__(self): + self._dct = {'__annotations__': cns} + def __setitem__(self, item, value): + nonlocal nonloc_ns + self._dct[item] = value + nonloc_ns[item] = value + def __getitem__(self, item): + return self._dct[item] + exec('X: str', {}, CNS2()) + self.assertEqual(nonloc_ns['__annotations__']['x'], str) + + def test_funcdef(self): + ### [decorators] 'def' NAME parameters ['->' test] ':' suite + ### decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE + ### decorators: decorator+ + ### parameters: '(' [typedargslist] ')' + ### typedargslist: ((tfpdef ['=' test] ',')* + ### ('*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef) + ### | tfpdef ['=' test] (',' tfpdef ['=' test])* [',']) + ### tfpdef: NAME [':' test] + ### varargslist: ((vfpdef ['=' test] ',')* + ### ('*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef) + ### | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) + ### vfpdef: NAME + def f1(): pass + f1() + f1(*()) + f1(*(), **{}) + def f2(one_argument): pass + def f3(two, arguments): pass + self.assertEqual(f2.__code__.co_varnames, ('one_argument',)) + self.assertEqual(f3.__code__.co_varnames, ('two', 'arguments')) + def a1(one_arg,): pass + def a2(two, args,): pass + def v0(*rest): pass + def v1(a, *rest): pass + def v2(a, b, *rest): pass + + f1() + f2(1) + f2(1,) + f3(1, 2) + f3(1, 2,) + v0() + v0(1) + v0(1,) + v0(1,2) + v0(1,2,3,4,5,6,7,8,9,0) + v1(1) + v1(1,) + v1(1,2) + v1(1,2,3) + v1(1,2,3,4,5,6,7,8,9,0) + v2(1,2) + v2(1,2,3) + v2(1,2,3,4) + v2(1,2,3,4,5,6,7,8,9,0) + + def d01(a=1): pass + d01() + d01(1) + d01(*(1,)) + d01(*[] or [2]) + d01(*() or (), *{} and (), **() or {}) + d01(**{'a':2}) + d01(**{'a':2} or {}) + def d11(a, b=1): pass + d11(1) + d11(1, 2) + d11(1, **{'b':2}) + def d21(a, b, c=1): pass + d21(1, 2) + d21(1, 2, 3) + d21(*(1, 2, 3)) + d21(1, *(2, 3)) + d21(1, 2, *(3,)) + d21(1, 2, **{'c':3}) + def d02(a=1, b=2): pass + d02() + d02(1) + d02(1, 2) + d02(*(1, 2)) + d02(1, *(2,)) + d02(1, **{'b':2}) + d02(**{'a': 1, 'b': 2}) + def d12(a, b=1, c=2): pass + d12(1) + d12(1, 2) + d12(1, 2, 3) + def d22(a, b, c=1, d=2): pass + d22(1, 2) + d22(1, 2, 3) + d22(1, 2, 3, 4) + def d01v(a=1, *rest): pass + d01v() + d01v(1) + d01v(1, 2) + d01v(*(1, 2, 3, 4)) + d01v(*(1,)) + d01v(**{'a':2}) + def d11v(a, b=1, *rest): pass + d11v(1) + d11v(1, 2) + d11v(1, 2, 3) + def d21v(a, b, c=1, *rest): pass + d21v(1, 2) + d21v(1, 2, 3) + d21v(1, 2, 3, 4) + d21v(*(1, 2, 3, 4)) + d21v(1, 2, **{'c': 3}) + def d02v(a=1, b=2, *rest): pass + d02v() + d02v(1) + d02v(1, 2) + d02v(1, 2, 3) + d02v(1, *(2, 3, 4)) + d02v(**{'a': 1, 'b': 2}) + def d12v(a, b=1, c=2, *rest): pass + d12v(1) + d12v(1, 2) + d12v(1, 2, 3) + d12v(1, 2, 3, 4) + d12v(*(1, 2, 3, 4)) + d12v(1, 2, *(3, 4, 5)) + d12v(1, *(2,), **{'c': 3}) + def d22v(a, b, c=1, d=2, *rest): pass + d22v(1, 2) + d22v(1, 2, 3) + d22v(1, 2, 3, 4) + d22v(1, 2, 3, 4, 5) + d22v(*(1, 2, 3, 4)) + d22v(1, 2, *(3, 4, 5)) + d22v(1, *(2, 3), **{'d': 4}) + + # keyword argument type tests + try: + str('x', **{b'foo':1 }) + except TypeError: + pass + else: + self.fail('Bytes should not work as keyword argument names') + # keyword only argument tests + def pos0key1(*, key): return key + pos0key1(key=100) + def pos2key2(p1, p2, *, k1, k2=100): return p1,p2,k1,k2 + pos2key2(1, 2, k1=100) + pos2key2(1, 2, k1=100, k2=200) + pos2key2(1, 2, k2=100, k1=200) + def pos2key2dict(p1, p2, *, k1=100, k2, **kwarg): return p1,p2,k1,k2,kwarg + pos2key2dict(1,2,k2=100,tokwarg1=100,tokwarg2=200) + pos2key2dict(1,2,tokwarg1=100,tokwarg2=200, k2=100) + + self.assertRaises(SyntaxError, eval, "def f(*): pass") + self.assertRaises(SyntaxError, eval, "def f(*,): pass") + self.assertRaises(SyntaxError, eval, "def f(*, **kwds): pass") + + # keyword arguments after *arglist + def f(*args, **kwargs): + return args, kwargs + self.assertEqual(f(1, x=2, *[3, 4], y=5), ((1, 3, 4), + {'x':2, 'y':5})) + self.assertEqual(f(1, *(2,3), 4), ((1, 2, 3, 4), {})) + self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)") + self.assertEqual(f(**{'eggs':'scrambled', 'spam':'fried'}), + ((), {'eggs':'scrambled', 'spam':'fried'})) + self.assertEqual(f(spam='fried', **{'eggs':'scrambled'}), + ((), {'eggs':'scrambled', 'spam':'fried'})) + + # Check ast errors in *args and *kwargs + check_syntax_error(self, "f(*g(1=2))") + check_syntax_error(self, "f(**g(1=2))") + + # argument annotation tests + def f(x) -> list: pass + self.assertEqual(f.__annotations__, {'return': list}) + def f(x: int): pass + self.assertEqual(f.__annotations__, {'x': int}) + def f(*x: str): pass + self.assertEqual(f.__annotations__, {'x': str}) + def f(**x: float): pass + self.assertEqual(f.__annotations__, {'x': float}) + def f(x, y: 1+2): pass + self.assertEqual(f.__annotations__, {'y': 3}) + def f(a, b: 1, c: 2, d): pass + self.assertEqual(f.__annotations__, {'b': 1, 'c': 2}) + def f(a, b: 1, c: 2, d, e: 3 = 4, f=5, *g: 6): pass + self.assertEqual(f.__annotations__, + {'b': 1, 'c': 2, 'e': 3, 'g': 6}) + def f(a, b: 1, c: 2, d, e: 3 = 4, f=5, *g: 6, h: 7, i=8, j: 9 = 10, + **k: 11) -> 12: pass + self.assertEqual(f.__annotations__, + {'b': 1, 'c': 2, 'e': 3, 'g': 6, 'h': 7, 'j': 9, + 'k': 11, 'return': 12}) + # Check for issue #20625 -- annotations mangling + class Spam: + def f(self, *, __kw: 1): + pass + class Ham(Spam): pass + self.assertEqual(Spam.f.__annotations__, {'_Spam__kw': 1}) + self.assertEqual(Ham.f.__annotations__, {'_Spam__kw': 1}) + # Check for SF Bug #1697248 - mixing decorators and a return annotation + def null(x): return x + @null + def f(x) -> list: pass + self.assertEqual(f.__annotations__, {'return': list}) + + # test closures with a variety of opargs + closure = 1 + def f(): return closure + def f(x=1): return closure + def f(*, k=1): return closure + def f() -> int: return closure + + # Check trailing commas are permitted in funcdef argument list + def f(a,): pass + def f(*args,): pass + def f(**kwds,): pass + def f(a, *args,): pass + def f(a, **kwds,): pass + def f(*args, b,): pass + def f(*, b,): pass + def f(*args, **kwds,): pass + def f(a, *args, b,): pass + def f(a, *, b,): pass + def f(a, *args, **kwds,): pass + def f(*args, b, **kwds,): pass + def f(*, b, **kwds,): pass + def f(a, *args, b, **kwds,): pass + def f(a, *, b, **kwds,): pass + + def test_lambdef(self): + ### lambdef: 'lambda' [varargslist] ':' test + l1 = lambda : 0 + self.assertEqual(l1(), 0) + l2 = lambda : a[d] # XXX just testing the expression + l3 = lambda : [2 < x for x in [-1, 3, 0]] + self.assertEqual(l3(), [0, 1, 0]) + l4 = lambda x = lambda y = lambda z=1 : z : y() : x() + self.assertEqual(l4(), 1) + l5 = lambda x, y, z=2: x + y + z + self.assertEqual(l5(1, 2), 5) + self.assertEqual(l5(1, 2, 3), 6) + check_syntax_error(self, "lambda x: x = 2") + check_syntax_error(self, "lambda (None,): None") + l6 = lambda x, y, *, k=20: x+y+k + self.assertEqual(l6(1,2), 1+2+20) + self.assertEqual(l6(1,2,k=10), 1+2+10) + + # check that trailing commas are permitted + l10 = lambda a,: 0 + l11 = lambda *args,: 0 + l12 = lambda **kwds,: 0 + l13 = lambda a, *args,: 0 + l14 = lambda a, **kwds,: 0 + l15 = lambda *args, b,: 0 + l16 = lambda *, b,: 0 + l17 = lambda *args, **kwds,: 0 + l18 = lambda a, *args, b,: 0 + l19 = lambda a, *, b,: 0 + l20 = lambda a, *args, **kwds,: 0 + l21 = lambda *args, b, **kwds,: 0 + l22 = lambda *, b, **kwds,: 0 + l23 = lambda a, *args, b, **kwds,: 0 + l24 = lambda a, *, b, **kwds,: 0 + + + ### stmt: simple_stmt | compound_stmt + # Tested below + + def test_simple_stmt(self): + ### simple_stmt: small_stmt (';' small_stmt)* [';'] + x = 1; pass; del x + def foo(): + # verify statements that end with semi-colons + x = 1; pass; del x; + foo() + + ### small_stmt: expr_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt + # Tested below + + def test_expr_stmt(self): + # (exprlist '=')* exprlist + 1 + 1, 2, 3 + x = 1 + x = 1, 2, 3 + x = y = z = 1, 2, 3 + x, y, z = 1, 2, 3 + abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4) + + check_syntax_error(self, "x + 1 = 1") + check_syntax_error(self, "a + 1 = b + 2") + + # Check the heuristic for print & exec covers significant cases + # As well as placing some limits on false positives + def test_former_statements_refer_to_builtins(self): + keywords = "print", "exec" + # Cases where we want the custom error + cases = [ + "{} foo", + "{} {{1:foo}}", + "if 1: {} foo", + "if 1: {} {{1:foo}}", + "if 1:\n {} foo", + "if 1:\n {} {{1:foo}}", + ] + for keyword in keywords: + custom_msg = "call to '{}'".format(keyword) + for case in cases: + source = case.format(keyword) + with self.subTest(source=source): + with self.assertRaisesRegex(SyntaxError, custom_msg): + exec(source) + source = source.replace("foo", "(foo.)") + with self.subTest(source=source): + with self.assertRaisesRegex(SyntaxError, "invalid syntax"): + exec(source) + + def test_del_stmt(self): + # 'del' exprlist + abc = [1,2,3] + x, y, z = abc + xyz = x, y, z + + del abc + del x, y, (z, xyz) + + def test_pass_stmt(self): + # 'pass' + pass + + # flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt + # Tested below + + def test_break_stmt(self): + # 'break' + while 1: break + + def test_continue_stmt(self): + # 'continue' + i = 1 + while i: i = 0; continue + + msg = "" + while not msg: + msg = "ok" + try: + continue + msg = "continue failed to continue inside try" + except: + msg = "continue inside try called except block" + if msg != "ok": + self.fail(msg) + + msg = "" + while not msg: + msg = "finally block not called" + try: + continue + finally: + msg = "ok" + if msg != "ok": + self.fail(msg) + + def test_break_continue_loop(self): + # This test warrants an explanation. It is a test specifically for SF bugs + # #463359 and #462937. The bug is that a 'break' statement executed or + # exception raised inside a try/except inside a loop, *after* a continue + # statement has been executed in that loop, will cause the wrong number of + # arguments to be popped off the stack and the instruction pointer reset to + # a very small number (usually 0.) Because of this, the following test + # *must* written as a function, and the tracking vars *must* be function + # arguments with default values. Otherwise, the test will loop and loop. + + def test_inner(extra_burning_oil = 1, count=0): + big_hippo = 2 + while big_hippo: + count += 1 + try: + if extra_burning_oil and big_hippo == 1: + extra_burning_oil -= 1 + break + big_hippo -= 1 + continue + except: + raise + if count > 2 or big_hippo != 1: + self.fail("continue then break in try/except in loop broken!") + test_inner() + + def test_return(self): + # 'return' [testlist] + def g1(): return + def g2(): return 1 + g1() + x = g2() + check_syntax_error(self, "class foo:return 1") + + def test_break_in_finally(self): + count = 0 + while count < 2: + count += 1 + try: + pass + finally: + break + self.assertEqual(count, 1) + + count = 0 + while count < 2: + count += 1 + try: + continue + finally: + break + self.assertEqual(count, 1) + + count = 0 + while count < 2: + count += 1 + try: + 1/0 + finally: + break + self.assertEqual(count, 1) + + for count in [0, 1]: + self.assertEqual(count, 0) + try: + pass + finally: + break + self.assertEqual(count, 0) + + for count in [0, 1]: + self.assertEqual(count, 0) + try: + continue + finally: + break + self.assertEqual(count, 0) + + for count in [0, 1]: + self.assertEqual(count, 0) + try: + 1/0 + finally: + break + self.assertEqual(count, 0) + + def test_continue_in_finally(self): + count = 0 + while count < 2: + count += 1 + try: + pass + finally: + continue + break + self.assertEqual(count, 2) + + count = 0 + while count < 2: + count += 1 + try: + break + finally: + continue + self.assertEqual(count, 2) + + count = 0 + while count < 2: + count += 1 + try: + 1/0 + finally: + continue + break + self.assertEqual(count, 2) + + for count in [0, 1]: + try: + pass + finally: + continue + break + self.assertEqual(count, 1) + + for count in [0, 1]: + try: + break + finally: + continue + self.assertEqual(count, 1) + + for count in [0, 1]: + try: + 1/0 + finally: + continue + break + self.assertEqual(count, 1) + + def test_return_in_finally(self): + def g1(): + try: + pass + finally: + return 1 + self.assertEqual(g1(), 1) + + def g2(): + try: + return 2 + finally: + return 3 + self.assertEqual(g2(), 3) + + def g3(): + try: + 1/0 + finally: + return 4 + self.assertEqual(g3(), 4) + + def test_yield(self): + # Allowed as standalone statement + def g(): yield 1 + def g(): yield from () + # Allowed as RHS of assignment + def g(): x = yield 1 + def g(): x = yield from () + # Ordinary yield accepts implicit tuples + def g(): yield 1, 1 + def g(): x = yield 1, 1 + # 'yield from' does not + check_syntax_error(self, "def g(): yield from (), 1") + check_syntax_error(self, "def g(): x = yield from (), 1") + # Requires parentheses as subexpression + def g(): 1, (yield 1) + def g(): 1, (yield from ()) + check_syntax_error(self, "def g(): 1, yield 1") + check_syntax_error(self, "def g(): 1, yield from ()") + # Requires parentheses as call argument + def g(): f((yield 1)) + def g(): f((yield 1), 1) + def g(): f((yield from ())) + def g(): f((yield from ()), 1) + check_syntax_error(self, "def g(): f(yield 1)") + check_syntax_error(self, "def g(): f(yield 1, 1)") + check_syntax_error(self, "def g(): f(yield from ())") + check_syntax_error(self, "def g(): f(yield from (), 1)") + # Not allowed at top level + check_syntax_error(self, "yield") + check_syntax_error(self, "yield from") + # Not allowed at class scope + check_syntax_error(self, "class foo:yield 1") + check_syntax_error(self, "class foo:yield from ()") + # Check annotation refleak on SyntaxError + check_syntax_error(self, "def g(a:(yield)): pass") + + def test_yield_in_comprehensions(self): + # Check yield in comprehensions + def g(): [x for x in [(yield 1)]] + def g(): [x for x in [(yield from ())]] + + check = self.check_syntax_error + check("def g(): [(yield x) for x in ()]", + "'yield' inside list comprehension") + check("def g(): [x for x in () if not (yield x)]", + "'yield' inside list comprehension") + check("def g(): [y for x in () for y in [(yield x)]]", + "'yield' inside list comprehension") + check("def g(): {(yield x) for x in ()}", + "'yield' inside set comprehension") + check("def g(): {(yield x): x for x in ()}", + "'yield' inside dict comprehension") + check("def g(): {x: (yield x) for x in ()}", + "'yield' inside dict comprehension") + check("def g(): ((yield x) for x in ())", + "'yield' inside generator expression") + check("def g(): [(yield from x) for x in ()]", + "'yield' inside list comprehension") + check("class C: [(yield x) for x in ()]", + "'yield' inside list comprehension") + check("[(yield x) for x in ()]", + "'yield' inside list comprehension") + + def test_raise(self): + # 'raise' test [',' test] + try: raise RuntimeError('just testing') + except RuntimeError: pass + try: raise KeyboardInterrupt + except KeyboardInterrupt: pass + + def test_import(self): + # 'import' dotted_as_names + import sys + import time, sys + # 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names) + from time import time + from time import (time) + # not testable inside a function, but already done at top of the module + # from sys import * + from sys import path, argv + from sys import (path, argv) + from sys import (path, argv,) + + def test_global(self): + # 'global' NAME (',' NAME)* + global a + global a, b + global one, two, three, four, five, six, seven, eight, nine, ten + + def test_nonlocal(self): + # 'nonlocal' NAME (',' NAME)* + x = 0 + y = 0 + def f(): + nonlocal x + nonlocal x, y + + def test_assert(self): + # assertTruestmt: 'assert' test [',' test] + assert 1 + assert 1, 1 + assert lambda x:x + assert 1, lambda x:x+1 + + try: + assert True + except AssertionError as e: + self.fail("'assert True' should not have raised an AssertionError") + + try: + assert True, 'this should always pass' + except AssertionError as e: + self.fail("'assert True, msg' should not have " + "raised an AssertionError") + + # these tests fail if python is run with -O, so check __debug__ + @unittest.skipUnless(__debug__, "Won't work if __debug__ is False") + def testAssert2(self): + try: + assert 0, "msg" + except AssertionError as e: + self.assertEqual(e.args[0], "msg") + else: + self.fail("AssertionError not raised by assert 0") + + try: + assert False + except AssertionError as e: + self.assertEqual(len(e.args), 0) + else: + self.fail("AssertionError not raised by 'assert False'") + + + ### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef + # Tested below + + def test_if(self): + # 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] + if 1: pass + if 1: pass + else: pass + if 0: pass + elif 0: pass + if 0: pass + elif 0: pass + elif 0: pass + elif 0: pass + else: pass + + def test_while(self): + # 'while' test ':' suite ['else' ':' suite] + while 0: pass + while 0: pass + else: pass + + # Issue1920: "while 0" is optimized away, + # ensure that the "else" clause is still present. + x = 0 + while 0: + x = 1 + else: + x = 2 + self.assertEqual(x, 2) + + def test_for(self): + # 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite] + for i in 1, 2, 3: pass + for i, j, k in (): pass + else: pass + class Squares: + def __init__(self, max): + self.max = max + self.sofar = [] + def __len__(self): return len(self.sofar) + def __getitem__(self, i): + if not 0 <= i < self.max: raise IndexError + n = len(self.sofar) + while n <= i: + self.sofar.append(n*n) + n = n+1 + return self.sofar[i] + n = 0 + for x in Squares(10): n = n+x + if n != 285: + self.fail('for over growing sequence') + + result = [] + for x, in [(1,), (2,), (3,)]: + result.append(x) + self.assertEqual(result, [1, 2, 3]) + + def test_try(self): + ### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite] + ### | 'try' ':' suite 'finally' ':' suite + ### except_clause: 'except' [expr ['as' expr]] + try: + 1/0 + except ZeroDivisionError: + pass + else: + pass + try: 1/0 + except EOFError: pass + except TypeError as msg: pass + except: pass + else: pass + try: 1/0 + except (EOFError, TypeError, ZeroDivisionError): pass + try: 1/0 + except (EOFError, TypeError, ZeroDivisionError) as msg: pass + try: pass + finally: pass + + def test_suite(self): + # simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT + if 1: pass + if 1: + pass + if 1: + # + # + # + pass + pass + # + pass + # + + def test_test(self): + ### and_test ('or' and_test)* + ### and_test: not_test ('and' not_test)* + ### not_test: 'not' not_test | comparison + if not 1: pass + if 1 and 1: pass + if 1 or 1: pass + if not not not 1: pass + if not 1 and 1 and 1: pass + if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass + + def test_comparison(self): + ### comparison: expr (comp_op expr)* + ### comp_op: '<'|'>'|'=='|'>='|'<='|'!='|'in'|'not' 'in'|'is'|'is' 'not' + if 1: pass + x = (1 == 1) + if 1 == 1: pass + if 1 != 1: pass + if 1 < 1: pass + if 1 > 1: pass + if 1 <= 1: pass + if 1 >= 1: pass + if 1 is 1: pass + if 1 is not 1: pass + if 1 in (): pass + if 1 not in (): pass + if 1 < 1 > 1 == 1 >= 1 <= 1 != 1 in 1 not in 1 is 1 is not 1: pass + + def test_binary_mask_ops(self): + x = 1 & 1 + x = 1 ^ 1 + x = 1 | 1 + + def test_shift_ops(self): + x = 1 << 1 + x = 1 >> 1 + x = 1 << 1 >> 1 + + def test_additive_ops(self): + x = 1 + x = 1 + 1 + x = 1 - 1 - 1 + x = 1 - 1 + 1 - 1 + 1 + + def test_multiplicative_ops(self): + x = 1 * 1 + x = 1 / 1 + x = 1 % 1 + x = 1 / 1 * 1 % 1 + + def test_unary_ops(self): + x = +1 + x = -1 + x = ~1 + x = ~1 ^ 1 & 1 | 1 & 1 ^ -1 + x = -1*1/1 + 1*1 - ---1*1 + + def test_selectors(self): + ### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME + ### subscript: expr | [expr] ':' [expr] + + import sys, time + c = sys.path[0] + x = time.time() + x = sys.modules['time'].time() + a = '01234' + c = a[0] + c = a[-1] + s = a[0:5] + s = a[:5] + s = a[0:] + s = a[:] + s = a[-5:] + s = a[:-1] + s = a[-4:-3] + # A rough test of SF bug 1333982. http://python.org/sf/1333982 + # The testing here is fairly incomplete. + # Test cases should include: commas with 1 and 2 colons + d = {} + d[1] = 1 + d[1,] = 2 + d[1,2] = 3 + d[1,2,3] = 4 + L = list(d) + L.sort(key=lambda x: (type(x).__name__, x)) + self.assertEqual(str(L), '[1, (1,), (1, 2), (1, 2, 3)]') + + def test_atoms(self): + ### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictsetmaker] '}' | NAME | NUMBER | STRING + ### dictsetmaker: (test ':' test (',' test ':' test)* [',']) | (test (',' test)* [',']) + + x = (1) + x = (1 or 2 or 3) + x = (1 or 2 or 3, 2, 3) + + x = [] + x = [1] + x = [1 or 2 or 3] + x = [1 or 2 or 3, 2, 3] + x = [] + + x = {} + x = {'one': 1} + x = {'one': 1,} + x = {'one' or 'two': 1 or 2} + x = {'one': 1, 'two': 2} + x = {'one': 1, 'two': 2,} + x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6} + + x = {'one'} + x = {'one', 1,} + x = {'one', 'two', 'three'} + x = {2, 3, 4,} + + x = x + x = 'x' + x = 123 + + ### exprlist: expr (',' expr)* [','] + ### testlist: test (',' test)* [','] + # These have been exercised enough above + + def test_classdef(self): + # 'class' NAME ['(' [testlist] ')'] ':' suite + class B: pass + class B2(): pass + class C1(B): pass + class C2(B): pass + class D(C1, C2, B): pass + class C: + def meth1(self): pass + def meth2(self, arg): pass + def meth3(self, a1, a2): pass + + # decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE + # decorators: decorator+ + # decorated: decorators (classdef | funcdef) + def class_decorator(x): return x + @class_decorator + class G: pass + + def test_dictcomps(self): + # dictorsetmaker: ( (test ':' test (comp_for | + # (',' test ':' test)* [','])) | + # (test (comp_for | (',' test)* [','])) ) + nums = [1, 2, 3] + self.assertEqual({i:i+1 for i in nums}, {1: 2, 2: 3, 3: 4}) + + def test_listcomps(self): + # list comprehension tests + nums = [1, 2, 3, 4, 5] + strs = ["Apple", "Banana", "Coconut"] + spcs = [" Apple", " Banana ", "Coco nut "] + + self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco nut']) + self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15]) + self.assertEqual([x for x in nums if x > 2], [3, 4, 5]) + self.assertEqual([(i, s) for i in nums for s in strs], + [(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'), + (2, 'Apple'), (2, 'Banana'), (2, 'Coconut'), + (3, 'Apple'), (3, 'Banana'), (3, 'Coconut'), + (4, 'Apple'), (4, 'Banana'), (4, 'Coconut'), + (5, 'Apple'), (5, 'Banana'), (5, 'Coconut')]) + self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]], + [(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'), + (3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'), + (5, 'Banana'), (5, 'Coconut')]) + self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)], + [[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]]) + + def test_in_func(l): + return [0 < x < 3 for x in l if x > 2] + + self.assertEqual(test_in_func(nums), [False, False, False]) + + def test_nested_front(): + self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]], + [[1, 2], [3, 4], [5, 6]]) + + test_nested_front() + + check_syntax_error(self, "[i, s for i in nums for s in strs]") + check_syntax_error(self, "[x if y]") + + suppliers = [ + (1, "Boeing"), + (2, "Ford"), + (3, "Macdonalds") + ] + + parts = [ + (10, "Airliner"), + (20, "Engine"), + (30, "Cheeseburger") + ] + + suppart = [ + (1, 10), (1, 20), (2, 20), (3, 30) + ] + + x = [ + (sname, pname) + for (sno, sname) in suppliers + for (pno, pname) in parts + for (sp_sno, sp_pno) in suppart + if sno == sp_sno and pno == sp_pno + ] + + self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'), + ('Macdonalds', 'Cheeseburger')]) + + def test_genexps(self): + # generator expression tests + g = ([x for x in range(10)] for x in range(1)) + self.assertEqual(next(g), [x for x in range(10)]) + try: + next(g) + self.fail('should produce StopIteration exception') + except StopIteration: + pass + + a = 1 + try: + g = (a for d in a) + next(g) + self.fail('should produce TypeError') + except TypeError: + pass + + self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd']) + self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy']) + + a = [x for x in range(10)] + b = (x for x in (y for y in a)) + self.assertEqual(sum(b), sum([x for x in range(10)])) + + self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)])) + self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2])) + self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)])) + self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)])) + self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)])) + self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)])) + self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0) + check_syntax_error(self, "foo(x for x in range(10), 100)") + check_syntax_error(self, "foo(100, x for x in range(10))") + + def test_comprehension_specials(self): + # test for outmost iterable precomputation + x = 10; g = (i for i in range(x)); x = 5 + self.assertEqual(len(list(g)), 10) + + # This should hold, since we're only precomputing outmost iterable. + x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x)) + x = 5; t = True; + self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g)) + + # Grammar allows multiple adjacent 'if's in listcomps and genexps, + # even though it's silly. Make sure it works (ifelse broke this.) + self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7]) + self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7]) + + # verify unpacking single element tuples in listcomp/genexp. + self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6]) + self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9]) + + def test_with_statement(self): + class manager(object): + def __enter__(self): + return (1, 2) + def __exit__(self, *args): + pass + + with manager(): + pass + with manager() as x: + pass + with manager() as (x, y): + pass + with manager(), manager(): + pass + with manager() as x, manager() as y: + pass + with manager() as x, manager(): + pass + + def test_if_else_expr(self): + # Test ifelse expressions in various cases + def _checkeval(msg, ret): + "helper to check that evaluation of expressions is done correctly" + print(msg) + return ret + + # the next line is not allowed anymore + #self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True]) + self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True]) + self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True]) + self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5) + self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5) + self.assertEqual((5 and 6 if 0 else 1), 1) + self.assertEqual(((5 and 6) if 0 else 1), 1) + self.assertEqual((5 and (6 if 1 else 1)), 6) + self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3) + self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1) + self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5) + self.assertEqual((not 5 if 1 else 1), False) + self.assertEqual((not 5 if 0 else 1), 1) + self.assertEqual((6 + 1 if 1 else 2), 7) + self.assertEqual((6 - 1 if 1 else 2), 5) + self.assertEqual((6 * 2 if 1 else 4), 12) + self.assertEqual((6 / 2 if 1 else 3), 3) + self.assertEqual((6 < 4 if 0 else 2), 2) + + def test_paren_evaluation(self): + self.assertEqual(16 // (4 // 2), 8) + self.assertEqual((16 // 4) // 2, 2) + self.assertEqual(16 // 4 // 2, 2) + self.assertTrue(False is (2 is 3)) + self.assertFalse((False is 2) is 3) + self.assertFalse(False is 2 is 3) + + def test_matrix_mul(self): + # This is not intended to be a comprehensive test, rather just to be few + # samples of the @ operator in test_grammar.py. + class M: + def __matmul__(self, o): + return 4 + def __imatmul__(self, o): + self.other = o + return self + m = M() + self.assertEqual(m @ m, 4) + m @= 42 + self.assertEqual(m.other, 42) + + def test_async_await(self): + async def test(): + def sum(): + pass + if 1: + await someobj() + + self.assertEqual(test.__name__, 'test') + self.assertTrue(bool(test.__code__.co_flags & inspect.CO_COROUTINE)) + + def decorator(func): + setattr(func, '_marked', True) + return func + + @decorator + async def test2(): + return 22 + self.assertTrue(test2._marked) + self.assertEqual(test2.__name__, 'test2') + self.assertTrue(bool(test2.__code__.co_flags & inspect.CO_COROUTINE)) + + def test_async_for(self): + class Done(Exception): pass + + class AIter: + def __aiter__(self): + return self + async def __anext__(self): + raise StopAsyncIteration + + async def foo(): + async for i in AIter(): + pass + async for i, j in AIter(): + pass + async for i in AIter(): + pass + else: + pass + raise Done + + with self.assertRaises(Done): + foo().send(None) + + def test_async_with(self): + class Done(Exception): pass + + class manager: + async def __aenter__(self): + return (1, 2) + async def __aexit__(self, *exc): + return False + + async def foo(): + async with manager(): + pass + async with manager() as x: + pass + async with manager() as (x, y): + pass + async with manager(), manager(): + pass + async with manager() as x, manager() as y: + pass + async with manager() as x, manager(): + pass + raise Done + + with self.assertRaises(Done): + foo().send(None) + + +if __name__ == '__main__': + unittest.main() From ad9a3c9ab0a57492102de3f99809ce219e579938 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Fri, 20 Apr 2018 14:13:42 -0700 Subject: [PATCH 43/83] Grammar test --- src/client/formatters/lineFormatter.ts | 114 +++++++++++---- src/client/language/tokenizer.ts | 66 +++++---- .../format/extension.lineFormatter.test.ts | 20 +++ src/test/language/tokenizer.test.ts | 33 ++++- .../pythonFiles/formatting/pythonGrammar.py | 134 +++++++++--------- 5 files changed, 241 insertions(+), 126 deletions(-) diff --git a/src/client/formatters/lineFormatter.ts b/src/client/formatters/lineFormatter.ts index 4b2817667940..b91cac98c14d 100644 --- a/src/client/formatters/lineFormatter.ts +++ b/src/client/formatters/lineFormatter.ts @@ -6,7 +6,7 @@ import Char from 'typescript-char'; import { BraceCounter } from '../language/braceCounter'; import { TextBuilder } from '../language/textBuilder'; import { TextRangeCollection } from '../language/textRangeCollection'; -import { isPythonKeyword, Tokenizer } from '../language/tokenizer'; +import { Tokenizer } from '../language/tokenizer'; import { ITextRangeCollection, IToken, TokenType } from '../language/types'; export class LineFormatter { @@ -43,7 +43,7 @@ export class LineFormatter { case TokenType.Comma: this.builder.append(','); - if (next && !this.isCloseBraceType(next.type)) { + if (next && !this.isCloseBraceType(next.type) && next.type !== TokenType.Colon) { this.builder.softAppendSpace(); } break; @@ -54,7 +54,7 @@ export class LineFormatter { } const id = this.text.substring(t.start, t.end); this.builder.append(id); - if (isPythonKeyword(id) && next && this.isOpenBraceType(next.type)) { + if (this.keywordWithSpaceAfter(id) && next && this.isOpenBraceType(next.type)) { // for x in () this.builder.softAppendSpace(); } @@ -89,8 +89,10 @@ export class LineFormatter { return this.builder.getText(); } + // tslint:disable-next-line:cyclomatic-complexity private handleOperator(index: number): void { const t = this.tokens.getItemAt(index); + const prev = index > 0 ? this.tokens.getItemAt(index - 1) : undefined; if (t.length === 1) { const opCode = this.text.charCodeAt(t.start); switch (opCode) { @@ -104,18 +106,36 @@ export class LineFormatter { case Char.ExclamationMark: this.builder.append(this.text[t.start]); return; + case Char.Asterisk: + if (prev && prev.type === TokenType.Identifier && prev.length === 6 && this.text.substr(prev.start, prev.length) === 'lambda') { + this.builder.softAppendSpace(); + this.builder.append('*'); + return; + } + break; default: break; } + } else if (t.length === 2) { + if (this.text.charCodeAt(t.start) === Char.Asterisk && this.text.charCodeAt(t.start + 1) === Char.Asterisk) { + if (!prev || (prev.type !== TokenType.Identifier && prev.type !== TokenType.Number)) { + this.builder.append('**'); + return; + } + if (prev && prev.type === TokenType.Identifier && prev.length === 6 && this.text.substr(prev.start, prev.length) === 'lambda') { + this.builder.softAppendSpace(); + this.builder.append('**'); + return; + } + } } + // Do not append space if operator is preceded by '(' or ',' as in foo(**kwarg) - if (index > 0) { - const prev = this.tokens.getItemAt(index - 1); - if (this.isOpenBraceType(prev.type) || prev.type === TokenType.Comma) { - this.builder.append(this.text.substring(t.start, t.end)); - return; - } + if (prev && (this.isOpenBraceType(prev.type) || prev.type === TokenType.Comma)) { + this.builder.append(this.text.substring(t.start, t.end)); + return; } + this.builder.softAppendSpace(); this.builder.append(this.text.substring(t.start, t.end)); this.builder.softAppendSpace(); @@ -140,19 +160,17 @@ export class LineFormatter { return; } - if (this.isEqualsInsideArguments(index - 1)) { + const prev = index > 0 ? this.tokens.getItemAt(index - 1) : undefined; + if (prev && prev.length === 1 && this.text.charCodeAt(prev.start) === Char.Equal && this.isEqualsInsideArguments(index - 1)) { // Don't add space around = inside function arguments. this.builder.append(this.text.substring(t.start, t.end)); return; } - if (index > 0) { - const prev = this.tokens.getItemAt(index - 1); - if (this.isOpenBraceType(prev.type) || prev.type === TokenType.Colon) { - // Don't insert space after (, [ or { . - this.builder.append(this.text.substring(t.start, t.end)); - return; - } + if (prev && (this.isOpenBraceType(prev.type) || prev.type === TokenType.Colon)) { + // Don't insert space after (, [ or { . + this.builder.append(this.text.substring(t.start, t.end)); + return; } if (t.type === TokenType.Unknown) { @@ -177,22 +195,47 @@ export class LineFormatter { } } private isEqualsInsideArguments(index: number): boolean { + // Since we don't have complete statement, this is mostly heuristics. + // Therefore the code may not be handling all possible ways of the + // argument list continuation. if (index < 1) { return false; } + const prev = this.tokens.getItemAt(index - 1); - if (prev.type === TokenType.Identifier) { - if (index >= 2) { - // (x=1 or ,x=1 - const prevPrev = this.tokens.getItemAt(index - 2); - return prevPrev.type === TokenType.Comma || prevPrev.type === TokenType.OpenBrace; - } else if (index < this.tokens.count - 2) { - const next = this.tokens.getItemAt(index + 1); - const nextNext = this.tokens.getItemAt(index + 2); - // x=1, or x=1) - if (this.isValueType(next.type)) { - return nextNext.type === TokenType.Comma || nextNext.type === TokenType.CloseBrace; - } + if (prev.type !== TokenType.Identifier) { + return false; + } + + const first = this.tokens.getItemAt(0); + if (first.type === TokenType.Comma) { + return true; // Line starts with commma + } + + const last = this.tokens.getItemAt(this.tokens.count - 1); + if (last.type === TokenType.Comma) { + return true; // Line ends in comma + } + + if (index >= 2) { + // (x=1 or ,x=1 + const prevPrev = this.tokens.getItemAt(index - 2); + return prevPrev.type === TokenType.Comma || prevPrev.type === TokenType.OpenBrace; + } + + if (index >= this.tokens.count - 2) { + return false; + } + + const next = this.tokens.getItemAt(index + 1); + const nextNext = this.tokens.getItemAt(index + 2); + // x=1, or x=1) + if (this.isValueType(next.type)) { + if (nextNext.type === TokenType.CloseBrace) { + return true; + } + if (nextNext.type === TokenType.Comma) { + return last.type === TokenType.CloseBrace; } } return false; @@ -219,4 +262,17 @@ export class LineFormatter { } return false; } + private keywordWithSpaceAfter(s: string): boolean { + return s === 'in' || s === 'return' || s === 'and' || + s === 'or' || s === 'not' || s === 'from' || + s === 'import' || s === 'except' || s === 'for' || + s === 'as' || s === 'is'; + } + private operatorWithoutSpaceAfter(t: IToken): boolean { + if (t.length === 2) { + const text = this.text.substr(t.start, t.length); + return text === '**'; + } + return false; + } } diff --git a/src/client/language/tokenizer.ts b/src/client/language/tokenizer.ts index 9d229920f515..7ceafdccb0e6 100644 --- a/src/client/language/tokenizer.ts +++ b/src/client/language/tokenizer.ts @@ -26,18 +26,6 @@ class Token extends TextRange implements IToken { } } -const pythonKeywords = [ - 'and', 'assert', 'break', 'class', 'continue', 'def', 'del', - 'elif', 'else', 'except', 'exec', 'False', 'finally', 'for', 'from', - 'global', 'if', 'import', 'in', 'is', 'lambda', 'None', 'nonlocal', - 'not', 'or', 'pass', 'print', 'raise', 'return', 'True', 'try', - 'while', 'with', 'yield' -]; - -export function isPythonKeyword(s: string): boolean { - return pythonKeywords.find((value, index) => value === s) ? true : false; -} - export class Tokenizer implements ITokenizer { private cs: ICharacterStream = new CharacterStream(''); private tokens: IToken[] = []; @@ -92,15 +80,17 @@ export class Tokenizer implements ITokenizer { // tslint:disable-next-line:cyclomatic-complexity private handleCharacter(): boolean { - // f-strings - const fString = this.cs.currentChar === Char.f && (this.cs.nextChar === Char.SingleQuote || this.cs.nextChar === Char.DoubleQuote); - if (fString) { - this.cs.moveNext(); - } - const quoteType = this.getQuoteType(); - if (quoteType !== QuoteType.None) { - this.handleString(quoteType, fString); - return true; + // f-strings, b-strings, etc + const stringPrefixLength = this.getStringPrefixLength(); + if (stringPrefixLength >= 0) { + // Indeed a string + this.cs.advance(stringPrefixLength); + + const quoteType = this.getQuoteType(); + if (quoteType !== QuoteType.None) { + this.handleString(quoteType, stringPrefixLength); + return true; + } } if (this.cs.currentChar === Char.Hash) { this.handleComment(); @@ -138,9 +128,6 @@ export class Tokenizer implements ITokenizer { case Char.Colon: this.tokens.push(new Token(TokenType.Colon, this.cs.position, 1)); break; - case Char.At: - this.tokens.push(new Token(TokenType.Operator, this.cs.position, 1)); - break; default: if (this.isPossibleNumber()) { if (this.tryNumber()) { @@ -202,6 +189,7 @@ export class Tokenizer implements ITokenizer { if (prev.type === TokenType.OpenBrace || prev.type === TokenType.OpenBracket || prev.type === TokenType.Comma + || prev.type === TokenType.Colon || prev.type === TokenType.Semicolon || prev.type === TokenType.Operator) { return true; @@ -287,7 +275,6 @@ export class Tokenizer implements ITokenizer { const nextChar = this.cs.nextChar; switch (this.cs.currentChar) { case Char.Plus: - case Char.Hyphen: case Char.Ampersand: case Char.Bar: case Char.Caret: @@ -296,6 +283,10 @@ export class Tokenizer implements ITokenizer { length = nextChar === Char.Equal ? 2 : 1; break; + case Char.Hyphen: + length = nextChar === Char.Equal || nextChar === Char.Greater ? 2 : 1; + break; + case Char.Asterisk: if (nextChar === Char.Asterisk) { length = this.cs.lookAhead(2) === Char.Equal ? 3 : 2; @@ -331,7 +322,7 @@ export class Tokenizer implements ITokenizer { break; case Char.At: - length = nextChar === Char.Equal ? 2 : 0; + length = nextChar === Char.Equal ? 2 : 1; break; default: @@ -359,6 +350,25 @@ export class Tokenizer implements ITokenizer { this.tokens.push(new Token(TokenType.Comment, start, this.cs.position - start)); } + private getStringPrefixLength(): number { + if (this.cs.currentChar === Char.f && (this.cs.nextChar === Char.SingleQuote || this.cs.nextChar === Char.DoubleQuote)) { + return 1; // f-string + } + if (this.cs.currentChar === Char.b || this.cs.currentChar === Char.B || this.cs.currentChar === Char.u || this.cs.currentChar === Char.U) { + if (this.cs.nextChar === Char.SingleQuote || this.cs.nextChar === Char.DoubleQuote) { + // b-string or u-string + return 1; + } + if (this.cs.nextChar === Char.r || this.cs.nextChar === Char.R) { + // b-string or u-string with 'r' suffix + if (this.cs.lookAhead(2) === Char.SingleQuote || this.cs.lookAhead(2) === Char.DoubleQuote) { + return 2; + } + } + } + return this.cs.currentChar === Char.SingleQuote || this.cs.currentChar === Char.DoubleQuote ? 0 : -1; + } + private getQuoteType(): QuoteType { if (this.cs.currentChar === Char.SingleQuote) { return this.cs.nextChar === Char.SingleQuote && this.cs.lookAhead(2) === Char.SingleQuote @@ -373,8 +383,8 @@ export class Tokenizer implements ITokenizer { return QuoteType.None; } - private handleString(quoteType: QuoteType, fString: boolean): void { - const start = fString ? this.cs.position - 1 : this.cs.position; + private handleString(quoteType: QuoteType, stringPrefixLength: number): void { + const start = this.cs.position - stringPrefixLength; if (quoteType === QuoteType.Single || quoteType === QuoteType.Double) { this.cs.moveNext(); this.skipToSingleEndQuote(quoteType === QuoteType.Single diff --git a/src/test/format/extension.lineFormatter.test.ts b/src/test/format/extension.lineFormatter.test.ts index 2c94c69af2b9..a9cb0fa04447 100644 --- a/src/test/format/extension.lineFormatter.test.ts +++ b/src/test/format/extension.lineFormatter.test.ts @@ -88,6 +88,10 @@ suite('Formatting - line formatter', () => { const actual = formatter.formatLine(',x = 1,y =m)'); assert.equal(actual, ', x=1, y=m)'); }); + test('Equals in multiline arguments ending comma', () => { + const actual = formatter.formatLine('x = 1,y =m,'); + assert.equal(actual, 'x=1, y=m,'); + }); test('Operators without following space', () => { const actual = formatter.formatLine('foo( *a, ** b, ! c)'); assert.equal(actual, 'foo(*a, **b, !c)'); @@ -108,6 +112,22 @@ suite('Formatting - line formatter', () => { const actual = formatter.formatLine('abc \\n \\'); assert.equal(actual, 'abc \\n \\'); }); + test('Double asterisk', () => { + const actual = formatter.formatLine('a**2, ** k'); + assert.equal(actual, 'a ** 2, **k'); + }); + test('Lambda', () => { + const actual = formatter.formatLine('lambda * args, :0'); + assert.equal(actual, 'lambda *args,: 0'); + }); + test('Comma expression', () => { + const actual = formatter.formatLine('x=1,2,3'); + assert.equal(actual, 'x = 1, 2, 3'); + }); + test('is exression', () => { + const actual = formatter.formatLine('a( (False is 2) is 3)'); + assert.equal(actual, 'a((False is 2) is 3)'); + }); test('Grammar file', () => { const content = fs.readFileSync(grammarFile).toString('utf8'); const lines = content.splitLines({ trim: false, removeEmptyEntries: false }); diff --git a/src/test/language/tokenizer.test.ts b/src/test/language/tokenizer.test.ts index 923504041695..d7119b7b4f6f 100644 --- a/src/test/language/tokenizer.test.ts +++ b/src/test/language/tokenizer.test.ts @@ -133,6 +133,27 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(0).length, 9); assert.equal(tokens.getItemAt(1).type, TokenType.Identifier); }); + test('Strings: b/u/r-string', () => { + const t = new Tokenizer(); + const tokens = t.tokenize('b"b" u"u" br"br" ur"ur"'); + assert.equal(tokens.count, 4); + assert.equal(tokens.getItemAt(0).type, TokenType.String); + assert.equal(tokens.getItemAt(0).length, 4); + assert.equal(tokens.getItemAt(1).type, TokenType.String); + assert.equal(tokens.getItemAt(1).length, 4); + assert.equal(tokens.getItemAt(2).type, TokenType.String); + assert.equal(tokens.getItemAt(2).length, 6); + assert.equal(tokens.getItemAt(3).type, TokenType.String); + assert.equal(tokens.getItemAt(3).length, 6); + }); + test('Strings: escape at the end of double quoted string ', () => { + const t = new Tokenizer(); + const tokens = t.tokenize('"quoted\\"\nx'); + assert.equal(tokens.count, 2); + assert.equal(tokens.getItemAt(0).type, TokenType.String); + assert.equal(tokens.getItemAt(0).length, 9); + assert.equal(tokens.getItemAt(1).type, TokenType.Identifier); + }); test('Comments', () => { const t = new Tokenizer(); const tokens = t.tokenize(' #co"""mment1\n\t\n#comm\'ent2 '); @@ -249,6 +270,14 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(2).type, TokenType.Number); assert.equal(tokens.getItemAt(2).length, 11); }); + test('Decimal number operator', () => { + const t = new Tokenizer(); + const tokens = t.tokenize('a[: -1]'); + assert.equal(tokens.count, 5); + + assert.equal(tokens.getItemAt(3).type, TokenType.Number); + assert.equal(tokens.getItemAt(3).length, 2); + }); test('Floating point number', () => { const t = new Tokenizer(); const tokens = t.tokenize('3.0 .2 ++.3e+12 --.4e1'); @@ -278,7 +307,7 @@ suite('Language.Tokenizer', () => { '+ -' + '* ** / /= //=' + '*= += -= **= ' + - '& &= | |= ^ ^='; + '& &= | |= ^ ^= ->'; const tokens = new Tokenizer().tokenize(text); const lengths = [ 1, 2, 2, 3, @@ -286,7 +315,7 @@ suite('Language.Tokenizer', () => { 1, 1, 1, 2, 1, 2, 3, 2, 2, 2, 3, - 1, 2, 1, 2, 1, 2]; + 1, 2, 1, 2, 1, 2, 2]; assert.equal(tokens.count, lengths.length); for (let i = 0; i < tokens.count; i += 1) { const t = tokens.getItemAt(i); diff --git a/src/test/pythonFiles/formatting/pythonGrammar.py b/src/test/pythonFiles/formatting/pythonGrammar.py index 5224695aebe5..32b82285c12f 100644 --- a/src/test/pythonFiles/formatting/pythonGrammar.py +++ b/src/test/pythonFiles/formatting/pythonGrammar.py @@ -455,17 +455,17 @@ def v2(a, b, *rest): pass v0() v0(1) v0(1,) - v0(1,2) - v0(1,2,3,4,5,6,7,8,9,0) + v0(1, 2) + v0(1, 2, 3, 4, 5, 6, 7, 8, 9, 0) v1(1) v1(1,) - v1(1,2) - v1(1,2,3) - v1(1,2,3,4,5,6,7,8,9,0) - v2(1,2) - v2(1,2,3) - v2(1,2,3,4) - v2(1,2,3,4,5,6,7,8,9,0) + v1(1, 2) + v1(1, 2, 3) + v1(1, 2, 3, 4, 5, 6, 7, 8, 9, 0) + v2(1, 2) + v2(1, 2, 3) + v2(1, 2, 3, 4) + v2(1, 2, 3, 4, 5, 6, 7, 8, 9, 0) def d01(a=1): pass d01() @@ -473,26 +473,26 @@ def d01(a=1): pass d01(*(1,)) d01(*[] or [2]) d01(*() or (), *{} and (), **() or {}) - d01(**{'a':2}) - d01(**{'a':2} or {}) + d01(**{'a': 2}) + d01(**{'a': 2} or {}) def d11(a, b=1): pass d11(1) d11(1, 2) - d11(1, **{'b':2}) + d11(1, **{'b': 2}) def d21(a, b, c=1): pass d21(1, 2) d21(1, 2, 3) d21(*(1, 2, 3)) d21(1, *(2, 3)) d21(1, 2, *(3,)) - d21(1, 2, **{'c':3}) + d21(1, 2, **{'c': 3}) def d02(a=1, b=2): pass d02() d02(1) d02(1, 2) d02(*(1, 2)) d02(1, *(2,)) - d02(1, **{'b':2}) + d02(1, **{'b': 2}) d02(**{'a': 1, 'b': 2}) def d12(a, b=1, c=2): pass d12(1) @@ -508,7 +508,7 @@ def d01v(a=1, *rest): pass d01v(1, 2) d01v(*(1, 2, 3, 4)) d01v(*(1,)) - d01v(**{'a':2}) + d01v(**{'a': 2}) def d11v(a, b=1, *rest): pass d11v(1) d11v(1, 2) @@ -545,7 +545,7 @@ def d22v(a, b, c=1, d=2, *rest): pass # keyword argument type tests try: - str('x', **{b'foo':1 }) + str('x', **{b'foo': 1}) except TypeError: pass else: @@ -553,13 +553,13 @@ def d22v(a, b, c=1, d=2, *rest): pass # keyword only argument tests def pos0key1(*, key): return key pos0key1(key=100) - def pos2key2(p1, p2, *, k1, k2=100): return p1,p2,k1,k2 + def pos2key2(p1, p2, *, k1, k2=100): return p1, p2, k1, k2 pos2key2(1, 2, k1=100) pos2key2(1, 2, k1=100, k2=200) pos2key2(1, 2, k2=100, k1=200) - def pos2key2dict(p1, p2, *, k1=100, k2, **kwarg): return p1,p2,k1,k2,kwarg - pos2key2dict(1,2,k2=100,tokwarg1=100,tokwarg2=200) - pos2key2dict(1,2,tokwarg1=100,tokwarg2=200, k2=100) + def pos2key2dict(p1, p2, *, k1=100, k2, **kwarg): return p1, p2, k1, k2, kwarg + pos2key2dict(1, 2, k2=100, tokwarg1=100, tokwarg2=200) + pos2key2dict(1, 2, tokwarg1=100, tokwarg2=200, k2=100) self.assertRaises(SyntaxError, eval, "def f(*): pass") self.assertRaises(SyntaxError, eval, "def f(*,): pass") @@ -569,13 +569,13 @@ def pos2key2dict(p1, p2, *, k1=100, k2, **kwarg): return p1,p2,k1,k2,kwarg def f(*args, **kwargs): return args, kwargs self.assertEqual(f(1, x=2, *[3, 4], y=5), ((1, 3, 4), - {'x':2, 'y':5})) - self.assertEqual(f(1, *(2,3), 4), ((1, 2, 3, 4), {})) + {'x': 2, 'y': 5})) + self.assertEqual(f(1, *(2, 3), 4), ((1, 2, 3, 4), {})) self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)") - self.assertEqual(f(**{'eggs':'scrambled', 'spam':'fried'}), - ((), {'eggs':'scrambled', 'spam':'fried'})) - self.assertEqual(f(spam='fried', **{'eggs':'scrambled'}), - ((), {'eggs':'scrambled', 'spam':'fried'})) + self.assertEqual(f(**{'eggs': 'scrambled', 'spam': 'fried'}), + ((), {'eggs': 'scrambled', 'spam': 'fried'})) + self.assertEqual(f(spam='fried', **{'eggs': 'scrambled'}), + ((), {'eggs': 'scrambled', 'spam': 'fried'})) # Check ast errors in *args and *kwargs check_syntax_error(self, "f(*g(1=2))") @@ -590,7 +590,7 @@ def f(*x: str): pass self.assertEqual(f.__annotations__, {'x': str}) def f(**x: float): pass self.assertEqual(f.__annotations__, {'x': float}) - def f(x, y: 1+2): pass + def f(x, y: 1 + 2): pass self.assertEqual(f.__annotations__, {'y': 3}) def f(a, b: 1, c: 2, d): pass self.assertEqual(f.__annotations__, {'b': 1, 'c': 2}) @@ -641,21 +641,21 @@ def f(a, *, b, **kwds,): pass def test_lambdef(self): ### lambdef: 'lambda' [varargslist] ':' test - l1 = lambda : 0 + l1 = lambda: 0 self.assertEqual(l1(), 0) - l2 = lambda : a[d] # XXX just testing the expression - l3 = lambda : [2 < x for x in [-1, 3, 0]] + l2 = lambda: a[d] # XXX just testing the expression + l3 = lambda: [2 < x for x in [-1, 3, 0]] self.assertEqual(l3(), [0, 1, 0]) - l4 = lambda x = lambda y = lambda z=1 : z : y() : x() + l4 = lambda x = lambda y = lambda z = 1: z: y(): x() self.assertEqual(l4(), 1) l5 = lambda x, y, z=2: x + y + z self.assertEqual(l5(1, 2), 5) self.assertEqual(l5(1, 2, 3), 6) check_syntax_error(self, "lambda x: x = 2") check_syntax_error(self, "lambda (None,): None") - l6 = lambda x, y, *, k=20: x+y+k - self.assertEqual(l6(1,2), 1+2+20) - self.assertEqual(l6(1,2,k=10), 1+2+10) + l6 = lambda x, y, *, k=20: x + y + k + self.assertEqual(l6(1, 2), 1 + 2 + 20) + self.assertEqual(l6(1, 2, k=10), 1 + 2 + 10) # check that trailing commas are permitted l10 = lambda a,: 0 @@ -696,8 +696,8 @@ def test_expr_stmt(self): x = 1 x = 1, 2, 3 x = y = z = 1, 2, 3 - x, y, z = 1, 2, 3 - abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4) + x, y, z=1, 2, 3 + abc=a, b, c=x, y, z=xyz = 1, 2, (3, 4) check_syntax_error(self, "x + 1 = 1") check_syntax_error(self, "a + 1 = b + 2") @@ -729,8 +729,8 @@ def test_former_statements_refer_to_builtins(self): def test_del_stmt(self): # 'del' exprlist - abc = [1,2,3] - x, y, z = abc + abc = [1, 2, 3] + x, y, z=abc xyz = x, y, z del abc @@ -783,7 +783,7 @@ def test_break_continue_loop(self): # *must* written as a function, and the tracking vars *must* be function # arguments with default values. Otherwise, the test will loop and loop. - def test_inner(extra_burning_oil = 1, count=0): + def test_inner(extra_burning_oil=1, count=0): big_hippo = 2 while big_hippo: count += 1 @@ -830,7 +830,7 @@ def test_break_in_finally(self): while count < 2: count += 1 try: - 1/0 + 1 / 0 finally: break self.assertEqual(count, 1) @@ -854,7 +854,7 @@ def test_break_in_finally(self): for count in [0, 1]: self.assertEqual(count, 0) try: - 1/0 + 1 / 0 finally: break self.assertEqual(count, 0) @@ -883,7 +883,7 @@ def test_continue_in_finally(self): while count < 2: count += 1 try: - 1/0 + 1 / 0 finally: continue break @@ -906,7 +906,7 @@ def test_continue_in_finally(self): for count in [0, 1]: try: - 1/0 + 1 / 0 finally: continue break @@ -929,7 +929,7 @@ def g2(): def g3(): try: - 1/0 + 1 / 0 finally: return 4 self.assertEqual(g3(), 4) @@ -1035,8 +1035,8 @@ def test_assert(self): # assertTruestmt: 'assert' test [',' test] assert 1 assert 1, 1 - assert lambda x:x - assert 1, lambda x:x+1 + assert lambda x: x + assert 1, lambda x: x + 1 try: assert True @@ -1112,11 +1112,11 @@ def __getitem__(self, i): if not 0 <= i < self.max: raise IndexError n = len(self.sofar) while n <= i: - self.sofar.append(n*n) - n = n+1 + self.sofar.append(n * n) + n = n + 1 return self.sofar[i] n = 0 - for x in Squares(10): n = n+x + for x in Squares(10): n = n + x if n != 285: self.fail('for over growing sequence') @@ -1130,19 +1130,19 @@ def test_try(self): ### | 'try' ':' suite 'finally' ':' suite ### except_clause: 'except' [expr ['as' expr]] try: - 1/0 + 1 / 0 except ZeroDivisionError: pass else: pass - try: 1/0 + try: 1 / 0 except EOFError: pass except TypeError as msg: pass except: pass else: pass - try: 1/0 + try: 1 / 0 except (EOFError, TypeError, ZeroDivisionError): pass - try: 1/0 + try: 1 / 0 except (EOFError, TypeError, ZeroDivisionError) as msg: pass try: pass finally: pass @@ -1217,7 +1217,7 @@ def test_unary_ops(self): x = -1 x = ~1 x = ~1 ^ 1 & 1 | 1 & 1 ^ -1 - x = -1*1/1 + 1*1 - ---1*1 + x = -1 * 1 / 1 + 1 * 1 - -1 * 1 def test_selectors(self): ### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME @@ -1243,8 +1243,8 @@ def test_selectors(self): d = {} d[1] = 1 d[1,] = 2 - d[1,2] = 3 - d[1,2,3] = 4 + d[1, 2] = 3 + d[1, 2, 3] = 4 L = list(d) L.sort(key=lambda x: (type(x).__name__, x)) self.assertEqual(str(L), '[1, (1,), (1, 2), (1, 2, 3)]') @@ -1308,7 +1308,7 @@ def test_dictcomps(self): # (',' test ':' test)* [','])) | # (test (comp_for | (',' test)* [','])) ) nums = [1, 2, 3] - self.assertEqual({i:i+1 for i in nums}, {1: 2, 2: 3, 3: 4}) + self.assertEqual({i: i + 1 for i in nums}, {1: 2, 2: 3, 3: 4}) def test_listcomps(self): # list comprehension tests @@ -1329,7 +1329,7 @@ def test_listcomps(self): [(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'), (3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'), (5, 'Banana'), (5, 'Coconut')]) - self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)], + self.assertEqual([(lambda a:[a ** i for i in range(a + 1)])(j) for j in range(5)], [[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]]) def test_in_func(l): @@ -1338,7 +1338,7 @@ def test_in_func(l): self.assertEqual(test_in_func(nums), [False, False, False]) def test_nested_front(): - self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]], + self.assertEqual([[y for y in [x, x + 1]] for x in [1, 3, 5]], [[1, 2], [3, 4], [5, 6]]) test_nested_front() @@ -1398,8 +1398,8 @@ def test_genexps(self): b = (x for x in (y for y in a)) self.assertEqual(sum(b), sum([x for x in range(10)])) - self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)])) - self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2])) + self.assertEqual(sum(x ** 2 for x in range(10)), sum([x ** 2 for x in range(10)])) + self.assertEqual(sum(x * x for x in range(10) if x % 2), sum([x * x for x in range(10) if x % 2])) self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)])) self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)])) self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)])) @@ -1414,13 +1414,13 @@ def test_comprehension_specials(self): self.assertEqual(len(list(g)), 10) # This should hold, since we're only precomputing outmost iterable. - x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x)) + x = 10; t = False; g = ((i, j) for i in range(x) if t for j in range(x)) x = 5; t = True; - self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g)) + self.assertEqual([(i, j) for i in range(10) for j in range(5)], list(g)) # Grammar allows multiple adjacent 'if's in listcomps and genexps, # even though it's silly. Make sure it works (ifelse broke this.) - self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7]) + self.assertEqual([x for x in range(10) if x % 2 if x % 3], [1, 5, 7]) self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7]) # verify unpacking single element tuples in listcomp/genexp. @@ -1456,8 +1456,8 @@ def _checkeval(msg, ret): # the next line is not allowed anymore #self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True]) - self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True]) - self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True]) + self.assertEqual([x() for x in (lambda:True, lambda:False) if x()], [True]) + self.assertEqual([x(False) for x in (lambda x:False if x else True, lambda x:True if x else False) if x(False)], [True]) self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5) self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5) self.assertEqual((5 and 6 if 0 else 1), 1) @@ -1492,7 +1492,7 @@ def __imatmul__(self, o): self.other = o return self m = M() - self.assertEqual(m @ m, 4) + self.assertEqual(m@m, 4) m @= 42 self.assertEqual(m.other, 42) From ff8dd35c7ee770f10d87df33536b4ada5cc330a4 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Fri, 20 Apr 2018 15:18:03 -0700 Subject: [PATCH 44/83] Test baselines --- package.json | 6 +++++ src/client/formatters/lineFormatter.ts | 7 ----- src/test/definitions/hover.ptvs.test.ts | 32 +++++++++-------------- src/test/signature/signature.ptvs.test.ts | 3 +-- 4 files changed, 20 insertions(+), 28 deletions(-) diff --git a/package.json b/package.json index 74853cf944cd..1be8ae4855e9 100644 --- a/package.json +++ b/package.json @@ -1570,6 +1570,12 @@ "description": "Automatically add brackets for functions.", "scope": "resource" }, + "python.autoComplete.showAdvancedMembers": { + "type": "boolean", + "default": false, + "description": "Controls appearance of methods with double underscores in the completion list.", + "scope": "resource" + }, "python.workspaceSymbols.tagFilePath": { "type": "string", "default": "${workspaceFolder}/.vscode/tags", diff --git a/src/client/formatters/lineFormatter.ts b/src/client/formatters/lineFormatter.ts index b91cac98c14d..2c7b37580f11 100644 --- a/src/client/formatters/lineFormatter.ts +++ b/src/client/formatters/lineFormatter.ts @@ -268,11 +268,4 @@ export class LineFormatter { s === 'import' || s === 'except' || s === 'for' || s === 'as' || s === 'is'; } - private operatorWithoutSpaceAfter(t: IToken): boolean { - if (t.length === 2) { - const text = this.text.substr(t.start, t.length); - return text === '**'; - } - return false; - } } diff --git a/src/test/definitions/hover.ptvs.test.ts b/src/test/definitions/hover.ptvs.test.ts index d2a456efd4bd..4f0f014c7bff 100644 --- a/src/test/definitions/hover.ptvs.test.ts +++ b/src/test/definitions/hover.ptvs.test.ts @@ -53,9 +53,7 @@ suite('Hover Definition (Analysis Engine)', () => { const expected = [ 'obj.method1:', 'method method1 of one.Class1 objects', - '```html', - 'This is method1', - '```' + 'This is method1' ]; verifySignatureLines(actual, expected); }); @@ -70,9 +68,7 @@ suite('Hover Definition (Analysis Engine)', () => { const expected = [ 'two.ct().fun:', 'method fun of two.ct objects', - '```html', - 'This is fun', - '```' + 'This is fun' ]; verifySignatureLines(actual, expected); }); @@ -87,11 +83,9 @@ suite('Hover Definition (Analysis Engine)', () => { const expected = [ 'Foo.bar:', 'four.Foo.bar() -> bool', - '```html', '说明 - keep this line, it works', 'delete following line, it works', '如果存在需要等待审批或正在执行的任务,将不刷新页面', - '```', 'declared in Foo' ]; verifySignatureLines(actual, expected); @@ -107,22 +101,26 @@ suite('Hover Definition (Analysis Engine)', () => { const expected = [ 'four.showMessage:', 'four.showMessage()', - '```html', 'Кюм ут жэмпэр пошжим льаборэж, коммюны янтэрэсщэт нам ед, декта игнота ныморэ жят эи.', - 'Шэа декам экшырки эи, эи зыд эррэм докэндё, векж факэтэ пэрчыквюэрёж ку.', - '```' + 'Шэа декам экшырки эи, эи зыд эррэм докэндё, векж факэтэ пэрчыквюэрёж ку.' ]; verifySignatureLines(actual, expected); }); test('Nothing for keywords (class)', async () => { const def = await openAndHover(fileOne, 5, 1); - assert.equal(def.length, 0, 'Definition length is incorrect'); + if (def.length > 0) { + const actual = normalizeMarkedString(def[0].contents[0]); + assert.equal(actual, '', 'Definition length is incorrect'); + } }); test('Nothing for keywords (for)', async () => { const def = await openAndHover(fileHover, 3, 1); - assert.equal(def!.length, 0, 'Definition length is incorrect'); + if (def.length > 0) { + const actual = normalizeMarkedString(def[0].contents[0]); + assert.equal(actual, '', 'Definition length is incorrect'); + } }); test('Highlighting Class', async () => { @@ -136,15 +134,13 @@ suite('Hover Definition (Analysis Engine)', () => { 'misc.Random:', 'class misc.Random(_random.Random)', 'Random number generator base class used by bound module functions.', - '```html', 'Used to instantiate instances of Random to get generators that don\'t', 'share state.', 'Class Random can also be subclassed if you want to use a different basic', 'generator of your own devising: in that case, override the following', 'methods: random(), seed(), getstate(), and setstate().', 'Optionally, implement a getrandbits() method so that randrange()', - 'can cover arbitrarily large ranges.', - '```' + 'can cover arbitrarily large ranges.' ]; verifySignatureLines(actual, expected); }); @@ -191,9 +187,7 @@ suite('Hover Definition (Analysis Engine)', () => { 'misc.Thread:', 'class misc.Thread(_Verbose)', 'A class that represents a thread of control.', - '```html', - 'This class can be safely subclassed in a limited fashion.', - '```' + 'This class can be safely subclassed in a limited fashion.' ]; verifySignatureLines(actual, expected); }); diff --git a/src/test/signature/signature.ptvs.test.ts b/src/test/signature/signature.ptvs.test.ts index ad8e58508342..8e2f630756d4 100644 --- a/src/test/signature/signature.ptvs.test.ts +++ b/src/test/signature/signature.ptvs.test.ts @@ -79,8 +79,7 @@ suite('Signatures (Analysis Engine)', () => { new SignatureHelpResult(0, 8, 1, 1, 'stop'), new SignatureHelpResult(0, 9, 1, 1, 'stop'), new SignatureHelpResult(0, 10, 1, 1, 'stop'), - new SignatureHelpResult(0, 11, 1, 2, 'step'), - new SignatureHelpResult(1, 0, 1, 2, 'step') + new SignatureHelpResult(0, 11, 1, 2, 'step') ]; const document = await openDocument(path.join(autoCompPath, 'basicSig.py')); From d7806cadaee7a62e3e53999e00b02b15c0fcbc3c Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Fri, 20 Apr 2018 16:01:43 -0700 Subject: [PATCH 45/83] Add news --- news/2 Fixes/1354.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 news/2 Fixes/1354.md diff --git a/news/2 Fixes/1354.md b/news/2 Fixes/1354.md new file mode 100644 index 000000000000..a5485ac00724 --- /dev/null +++ b/news/2 Fixes/1354.md @@ -0,0 +1 @@ +Multiple fixes to format on type \ No newline at end of file From 0b3f316b3fe57990c02a29c1a8b8cf9e93d0ba88 Mon Sep 17 00:00:00 2001 From: Brett Cannon Date: Mon, 23 Apr 2018 10:08:51 -0700 Subject: [PATCH 46/83] Pin dependency [skip ci] --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index e6ae3b5dae76..0b518adeb974 100644 --- a/package.json +++ b/package.json @@ -1866,7 +1866,7 @@ "md5": "2.2.1", "minimatch": "3.0.4", "named-js-regexp": "1.3.3", - "node-stream-zip": "^1.6.0", + "node-stream-zip": "1.6.0", "opn": "5.3.0", "pidusage": "1.2.0", "reflect-metadata": "0.1.12", From f000e5dfd9e4035079819c306a05a8298cc347c8 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 26 Apr 2018 10:14:06 -0700 Subject: [PATCH 47/83] Specify markdown as preferable format --- package.json | 6 ++++++ src/client/activation/analysis.ts | 1 + 2 files changed, 7 insertions(+) diff --git a/package.json b/package.json index 0b518adeb974..70c1dea19602 100644 --- a/package.json +++ b/package.json @@ -1163,6 +1163,12 @@ "default": "${workspaceFolder}/.env", "scope": "resource" }, + "python.jediEnabled": { + "type": "boolean", + "default": true, + "description": "Enables Jedi as IntelliSense engine instead of Microsoft Python Analysis Engine.", + "scope": "resource" + }, "python.jediPath": { "type": "string", "default": "", diff --git a/src/client/activation/analysis.ts b/src/client/activation/analysis.ts index d2e853dc7dda..858000d19302 100644 --- a/src/client/activation/analysis.ts +++ b/src/client/activation/analysis.ts @@ -215,6 +215,7 @@ export class AnalysisExtensionActivator implements IExtensionActivator { properties }, displayOptions: { + preferredFormat: 1, // Markdown trimDocumentationLines: false, maxDocumentationLineLength: 0, trimDocumentationText: false, From ef7c5c75b8e38a9ef99c867bb0f8c74cfc5e4b60 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 26 Apr 2018 12:12:49 -0700 Subject: [PATCH 48/83] Improve function argument detection --- src/client/formatters/lineFormatter.ts | 57 ++++---- src/client/typeFormatters/onEnterFormatter.ts | 16 +-- .../format/extension.lineFormatter.test.ts | 136 ++++++++++-------- .../pythonFiles/formatting/pythonGrammar.py | 8 +- 4 files changed, 123 insertions(+), 94 deletions(-) diff --git a/src/client/formatters/lineFormatter.ts b/src/client/formatters/lineFormatter.ts index 2c7b37580f11..28a71f6ff08d 100644 --- a/src/client/formatters/lineFormatter.ts +++ b/src/client/formatters/lineFormatter.ts @@ -3,6 +3,7 @@ // tslint:disable-next-line:import-name import Char from 'typescript-char'; +import { TextDocument } from 'vscode'; import { BraceCounter } from '../language/braceCounter'; import { TextBuilder } from '../language/textBuilder'; import { TextRangeCollection } from '../language/textRangeCollection'; @@ -14,11 +15,15 @@ export class LineFormatter { private tokens: ITextRangeCollection = new TextRangeCollection([]); private braceCounter = new BraceCounter(); private text = ''; + private document?: TextDocument; + private lineNumber = 0; // tslint:disable-next-line:cyclomatic-complexity - public formatLine(text: string): string { - this.tokens = new Tokenizer().tokenize(text); - this.text = text; + public formatLine(document: TextDocument, lineNumber: number): string { + this.document = document; + this.lineNumber = lineNumber; + this.text = document.lineAt(lineNumber).text; + this.tokens = new Tokenizer().tokenize(this.text); this.builder = new TextBuilder(); this.braceCounter = new BraceCounter(); @@ -107,7 +112,7 @@ export class LineFormatter { this.builder.append(this.text[t.start]); return; case Char.Asterisk: - if (prev && prev.type === TokenType.Identifier && prev.length === 6 && this.text.substr(prev.start, prev.length) === 'lambda') { + if (prev && this.isKeyword(prev, 'lambda')) { this.builder.softAppendSpace(); this.builder.append('*'); return; @@ -122,7 +127,7 @@ export class LineFormatter { this.builder.append('**'); return; } - if (prev && prev.type === TokenType.Identifier && prev.length === 6 && this.text.substr(prev.start, prev.length) === 'lambda') { + if (prev && this.isKeyword(prev, 'lambda')) { this.builder.softAppendSpace(); this.builder.append('**'); return; @@ -194,6 +199,8 @@ export class LineFormatter { this.builder.softAppendSpace(); } } + + // tslint:disable-next-line:cyclomatic-complexity private isEqualsInsideArguments(index: number): boolean { // Since we don't have complete statement, this is mostly heuristics. // Therefore the code may not be handling all possible ways of the @@ -217,28 +224,31 @@ export class LineFormatter { return true; // Line ends in comma } - if (index >= 2) { - // (x=1 or ,x=1 - const prevPrev = this.tokens.getItemAt(index - 2); - return prevPrev.type === TokenType.Comma || prevPrev.type === TokenType.OpenBrace; + if (last.type === TokenType.Comment && this.tokens.count > 1 && this.tokens.getItemAt(this.tokens.count - 2).type === TokenType.Comma) { + return true; // Line ends in comma and then comment } - if (index >= this.tokens.count - 2) { - return false; + if (this.document) { + const prevLine = this.lineNumber > 0 ? this.document.lineAt(this.lineNumber - 1).text : ''; + const prevLineTokens = new Tokenizer().tokenize(prevLine); + if (prevLineTokens.count > 0) { + const lastOnPrevLine = prevLineTokens.getItemAt(prevLineTokens.count - 1); + if (lastOnPrevLine.type === TokenType.Comma) { + return true; // Previous line ends in comma + } + if (lastOnPrevLine.type === TokenType.Comment && prevLineTokens.count > 1 && prevLineTokens.getItemAt(prevLineTokens.count - 2).type === TokenType.Comma) { + return true; // Previous line ends in comma and then comment + } + } } - const next = this.tokens.getItemAt(index + 1); - const nextNext = this.tokens.getItemAt(index + 2); - // x=1, or x=1) - if (this.isValueType(next.type)) { - if (nextNext.type === TokenType.CloseBrace) { + for (let i = 0; i < index; i += 1) { + const t = this.tokens.getItemAt(i); + if (this.isKeyword(t, 'lambda')) { return true; } - if (nextNext.type === TokenType.Comma) { - return last.type === TokenType.CloseBrace; - } } - return false; + return this.braceCounter.isOpened(TokenType.OpenBrace); } private isOpenBraceType(type: TokenType): boolean { @@ -250,10 +260,6 @@ export class LineFormatter { private isBraceType(type: TokenType): boolean { return this.isOpenBraceType(type) || this.isCloseBraceType(type); } - private isValueType(type: TokenType): boolean { - return type === TokenType.Identifier || type === TokenType.Unknown || - type === TokenType.Number || type === TokenType.String; - } private isMultipleStatements(index: number): boolean { for (let i = index; i >= 0; i -= 1) { if (this.tokens.getItemAt(i).type === TokenType.Semicolon) { @@ -268,4 +274,7 @@ export class LineFormatter { s === 'import' || s === 'except' || s === 'for' || s === 'as' || s === 'is'; } + private isKeyword(t: IToken, keyword: string): boolean { + return t.type === TokenType.Identifier && t.length === keyword.length && this.text.substr(t.start, t.length) === keyword; + } } diff --git a/src/client/typeFormatters/onEnterFormatter.ts b/src/client/typeFormatters/onEnterFormatter.ts index 013b2d2a85f9..3e17e714d6ee 100644 --- a/src/client/typeFormatters/onEnterFormatter.ts +++ b/src/client/typeFormatters/onEnterFormatter.ts @@ -1,20 +1,20 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -import * as vscode from 'vscode'; +import { CancellationToken, FormattingOptions, OnTypeFormattingEditProvider, Position, TextDocument, TextEdit } from 'vscode'; import { LineFormatter } from '../formatters/lineFormatter'; import { TokenizerMode, TokenType } from '../language/types'; import { getDocumentTokens } from '../providers/providerUtilities'; -export class OnEnterFormatter implements vscode.OnTypeFormattingEditProvider { +export class OnEnterFormatter implements OnTypeFormattingEditProvider { private readonly formatter = new LineFormatter(); public provideOnTypeFormattingEdits( - document: vscode.TextDocument, - position: vscode.Position, + document: TextDocument, + position: Position, ch: string, - options: vscode.FormattingOptions, - cancellationToken: vscode.CancellationToken): vscode.TextEdit[] { + options: FormattingOptions, + cancellationToken: CancellationToken): TextEdit[] { if (position.line === 0) { return []; } @@ -30,10 +30,10 @@ export class OnEnterFormatter implements vscode.OnTypeFormattingEditProvider { return []; } } - const formatted = this.formatter.formatLine(prevLine.text); + const formatted = this.formatter.formatLine(document, prevLine.lineNumber); if (formatted === prevLine.text) { return []; } - return [new vscode.TextEdit(prevLine.range, formatted)]; + return [new TextEdit(prevLine.range, formatted)]; } } diff --git a/src/test/format/extension.lineFormatter.test.ts b/src/test/format/extension.lineFormatter.test.ts index a9cb0fa04447..46ca5e46f816 100644 --- a/src/test/format/extension.lineFormatter.test.ts +++ b/src/test/format/extension.lineFormatter.test.ts @@ -5,6 +5,8 @@ import * as assert from 'assert'; import * as fs from 'fs'; import * as path from 'path'; +import * as TypeMoq from 'typemoq'; +import { TextDocument, TextLine } from 'vscode'; import '../../client/common/extensions'; import { LineFormatter } from '../../client/formatters/lineFormatter'; @@ -17,124 +19,142 @@ suite('Formatting - line formatter', () => { const formatter = new LineFormatter(); test('Operator spacing', () => { - const actual = formatter.formatLine('( x +1 )*y/ 3'); - assert.equal(actual, '(x + 1) * y / 3'); + testFormatLine('( x +1 )*y/ 3', '(x + 1) * y / 3'); }); test('Braces spacing', () => { - const actual = formatter.formatLine('foo =(0 ,)'); - assert.equal(actual, 'foo = (0,)'); + testFormatLine('foo =(0 ,)', 'foo = (0,)'); }); test('Function arguments', () => { - const actual = formatter.formatLine('foo (0 , x= 1, (3+7) , y , z )'); - assert.equal(actual, 'foo(0, x=1, (3 + 7), y, z)'); + testFormatLine('z=foo (0 , x= 1, (3+7) , y , z )', + 'z = foo(0, x=1, (3 + 7), y, z)'); }); test('Colon regular', () => { - const actual = formatter.formatLine('if x == 4 : print x,y; x,y= y, x'); - assert.equal(actual, 'if x == 4: print x, y; x, y = y, x'); + testFormatLine('if x == 4 : print x,y; x,y= y, x', + 'if x == 4: print x, y; x, y = y, x'); }); test('Colon slices', () => { - const actual = formatter.formatLine('x[1: 30]'); - assert.equal(actual, 'x[1:30]'); + testFormatLine('x[1: 30]', 'x[1:30]'); }); test('Colon slices in arguments', () => { - const actual = formatter.formatLine('spam ( ham[ 1 :3], {eggs : 2})'); - assert.equal(actual, 'spam(ham[1:3], {eggs: 2})'); + testFormatLine('spam ( ham[ 1 :3], {eggs : 2})', + 'spam(ham[1:3], {eggs: 2})'); }); test('Colon slices with double colon', () => { - const actual = formatter.formatLine('ham [1:9 ], ham[ 1: 9: 3], ham[: 9 :3], ham[1: :3], ham [ 1: 9:]'); - assert.equal(actual, 'ham[1:9], ham[1:9:3], ham[:9:3], ham[1::3], ham[1:9:]'); + testFormatLine('ham [1:9 ], ham[ 1: 9: 3], ham[: 9 :3], ham[1: :3], ham [ 1: 9:]', + 'ham[1:9], ham[1:9:3], ham[:9:3], ham[1::3], ham[1:9:]'); }); test('Colon slices with operators', () => { - const actual = formatter.formatLine('ham [lower+ offset :upper+offset]'); - assert.equal(actual, 'ham[lower + offset:upper + offset]'); + testFormatLine('ham [lower+ offset :upper+offset]', + 'ham[lower + offset:upper + offset]'); }); test('Colon slices with functions', () => { - const actual = formatter.formatLine('ham[ : upper_fn ( x) : step_fn(x )], ham[ :: step_fn(x)]'); - assert.equal(actual, 'ham[:upper_fn(x):step_fn(x)], ham[::step_fn(x)]'); + testFormatLine('ham[ : upper_fn ( x) : step_fn(x )], ham[ :: step_fn(x)]', + 'ham[:upper_fn(x):step_fn(x)], ham[::step_fn(x)]'); }); test('Colon in for loop', () => { - const actual = formatter.formatLine('for index in range( len(fruits) ): '); - assert.equal(actual, 'for index in range(len(fruits)):'); + testFormatLine('for index in range( len(fruits) ): ', + 'for index in range(len(fruits)):'); }); test('Nested braces', () => { - const actual = formatter.formatLine('[ 1 :[2: (x,),y]]{1}'); - assert.equal(actual, '[1:[2:(x,), y]]{1}'); + testFormatLine('[ 1 :[2: (x,),y]]{1}', '[1:[2:(x,), y]]{1}'); }); test('Trailing comment', () => { - const actual = formatter.formatLine('x=1 # comment'); - assert.equal(actual, 'x = 1 # comment'); + testFormatLine('x=1 # comment', 'x = 1 # comment'); }); test('Single comment', () => { - const actual = formatter.formatLine('# comment'); - assert.equal(actual, '# comment'); + testFormatLine('# comment', '# comment'); }); test('Comment with leading whitespace', () => { - const actual = formatter.formatLine(' # comment'); - assert.equal(actual, ' # comment'); + testFormatLine(' # comment', ' # comment'); }); test('Equals in first argument', () => { - const actual = formatter.formatLine('foo(x =0)'); - assert.equal(actual, 'foo(x=0)'); + testFormatLine('foo(x =0)', 'foo(x=0)'); }); test('Equals in second argument', () => { - const actual = formatter.formatLine('foo(x,y= \"a\",'); - assert.equal(actual, 'foo(x, y=\"a\",'); + testFormatLine('foo(x,y= \"a\",', 'foo(x, y=\"a\",'); }); test('Equals in multiline arguments', () => { - const actual = formatter.formatLine('x = 1,y =-2)'); - assert.equal(actual, 'x=1, y=-2)'); + testFormatLine2('foo(a,', 'x = 1,y =-2)', 'x=1, y=-2)'); }); test('Equals in multiline arguments starting comma', () => { - const actual = formatter.formatLine(',x = 1,y =m)'); - assert.equal(actual, ', x=1, y=m)'); + testFormatLine(',x = 1,y =m)', ', x=1, y=m)'); }); test('Equals in multiline arguments ending comma', () => { - const actual = formatter.formatLine('x = 1,y =m,'); - assert.equal(actual, 'x=1, y=m,'); + testFormatLine('x = 1,y =m,', 'x=1, y=m,'); }); test('Operators without following space', () => { - const actual = formatter.formatLine('foo( *a, ** b, ! c)'); - assert.equal(actual, 'foo(*a, **b, !c)'); + testFormatLine('foo( *a, ** b, ! c)', 'foo(*a, **b, !c)'); }); test('Brace after keyword', () => { - const actual = formatter.formatLine('for x in(1,2,3)'); - assert.equal(actual, 'for x in (1, 2, 3)'); + testFormatLine('for x in(1,2,3)', 'for x in (1, 2, 3)'); }); test('Dot operator', () => { - const actual = formatter.formatLine('x.y'); - assert.equal(actual, 'x.y'); + testFormatLine('x.y', 'x.y'); }); test('Unknown tokens no space', () => { - const actual = formatter.formatLine('abc\\n\\'); - assert.equal(actual, 'abc\\n\\'); + testFormatLine('abc\\n\\', 'abc\\n\\'); }); test('Unknown tokens with space', () => { - const actual = formatter.formatLine('abc \\n \\'); - assert.equal(actual, 'abc \\n \\'); + testFormatLine('abc \\n \\', 'abc \\n \\'); }); test('Double asterisk', () => { - const actual = formatter.formatLine('a**2, ** k'); - assert.equal(actual, 'a ** 2, **k'); + testFormatLine('a**2, ** k', 'a ** 2, **k'); }); test('Lambda', () => { - const actual = formatter.formatLine('lambda * args, :0'); - assert.equal(actual, 'lambda *args,: 0'); + testFormatLine('lambda * args, :0', 'lambda *args,: 0'); }); test('Comma expression', () => { - const actual = formatter.formatLine('x=1,2,3'); - assert.equal(actual, 'x = 1, 2, 3'); + testFormatLine('x=1,2,3', 'x = 1, 2, 3'); }); test('is exression', () => { - const actual = formatter.formatLine('a( (False is 2) is 3)'); - assert.equal(actual, 'a((False is 2) is 3)'); + testFormatLine('a( (False is 2) is 3)', 'a((False is 2) is 3)'); + }); + test('Function returning tuple', () => { + testFormatLine('x,y=f(a)', 'x, y = f(a)'); }); test('Grammar file', () => { const content = fs.readFileSync(grammarFile).toString('utf8'); const lines = content.splitLines({ trim: false, removeEmptyEntries: false }); + let prevLine = ''; for (let i = 0; i < lines.length; i += 1) { const line = lines[i]; - const actual = formatter.formatLine(line); - assert.equal(actual, line, `Line ${i + 1} changed: '${line}' to '${actual}'`); + const actual = formatLine2(prevLine, line); + assert.equal(actual, line, `Line ${i + 1} changed: '${line.trim()}' to '${actual.trim()}'`); + prevLine = line; } }); + + function testFormatLine(text: string, expected: string): void { + const actual = formatLine(text); + assert.equal(actual, expected); + } + + function formatLine(text: string): string { + const line = TypeMoq.Mock.ofType(); + line.setup(x => x.text).returns(() => text); + + const document = TypeMoq.Mock.ofType(); + document.setup(x => x.lineAt(TypeMoq.It.isAnyNumber())).returns(() => line.object); + + return formatter.formatLine(document.object, 0); + } + + function formatLine2(prevLineText: string, lineText: string): string { + const thisLine = TypeMoq.Mock.ofType(); + thisLine.setup(x => x.text).returns(() => lineText); + + const prevLine = TypeMoq.Mock.ofType(); + prevLine.setup(x => x.text).returns(() => prevLineText); + + const document = TypeMoq.Mock.ofType(); + document.setup(x => x.lineAt(0)).returns(() => prevLine.object); + document.setup(x => x.lineAt(1)).returns(() => thisLine.object); + + return formatter.formatLine(document.object, 1); + } + + function testFormatLine2(prevLineText: string, lineText: string, expected: string): void { + const actual = formatLine2(prevLineText, lineText); + assert.equal(actual, expected); + } }); diff --git a/src/test/pythonFiles/formatting/pythonGrammar.py b/src/test/pythonFiles/formatting/pythonGrammar.py index 32b82285c12f..1a17d94302b5 100644 --- a/src/test/pythonFiles/formatting/pythonGrammar.py +++ b/src/test/pythonFiles/formatting/pythonGrammar.py @@ -646,7 +646,7 @@ def test_lambdef(self): l2 = lambda: a[d] # XXX just testing the expression l3 = lambda: [2 < x for x in [-1, 3, 0]] self.assertEqual(l3(), [0, 1, 0]) - l4 = lambda x = lambda y = lambda z = 1: z: y(): x() + l4 = lambda x=lambda y=lambda z=1: z: y(): x() self.assertEqual(l4(), 1) l5 = lambda x, y, z=2: x + y + z self.assertEqual(l5(1, 2), 5) @@ -696,8 +696,8 @@ def test_expr_stmt(self): x = 1 x = 1, 2, 3 x = y = z = 1, 2, 3 - x, y, z=1, 2, 3 - abc=a, b, c=x, y, z=xyz = 1, 2, (3, 4) + x, y, z = 1, 2, 3 + abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4) check_syntax_error(self, "x + 1 = 1") check_syntax_error(self, "a + 1 = b + 2") @@ -730,7 +730,7 @@ def test_former_statements_refer_to_builtins(self): def test_del_stmt(self): # 'del' exprlist abc = [1, 2, 3] - x, y, z=abc + x, y, z = abc xyz = x, y, z del abc From f4e88c0747e0886d84a79ee1d775ee61220eb6eb Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Fri, 27 Apr 2018 15:25:08 -0700 Subject: [PATCH 49/83] Specify markdown --- src/client/activation/analysis.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client/activation/analysis.ts b/src/client/activation/analysis.ts index d2e853dc7dda..d3c610a8da5e 100644 --- a/src/client/activation/analysis.ts +++ b/src/client/activation/analysis.ts @@ -215,6 +215,7 @@ export class AnalysisExtensionActivator implements IExtensionActivator { properties }, displayOptions: { + preferredFormat: 1, // markdown trimDocumentationLines: false, maxDocumentationLineLength: 0, trimDocumentationText: false, From abff21304313a95f849acd4bdd8d3e4b60fd626e Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 3 May 2018 13:06:48 -0700 Subject: [PATCH 50/83] Pythia setting --- src/client/activation/analysis.ts | 11 ++++- src/client/activation/downloader.ts | 58 ++++++++++++++++++------ src/client/common/platform/fileSystem.ts | 21 +++++++++ src/client/common/platform/types.ts | 2 + 4 files changed, 76 insertions(+), 16 deletions(-) diff --git a/src/client/activation/analysis.ts b/src/client/activation/analysis.ts index 65618c8ee03c..91cc647e27e8 100644 --- a/src/client/activation/analysis.ts +++ b/src/client/activation/analysis.ts @@ -77,11 +77,14 @@ export class AnalysisExtensionActivator implements IExtensionActivator { private async startLanguageServer(context: ExtensionContext, clientOptions: LanguageClientOptions): Promise { // Determine if we are running MSIL/Universal via dotnet or self-contained app. const mscorlib = path.join(context.extensionPath, analysisEngineFolder, 'mscorlib.dll'); + const downloader = new AnalysisEngineDownloader(this.services, analysisEngineFolder); let downloadPackage = false; const reporter = getTelemetryReporter(); reporter.sendTelemetryEvent(PYTHON_ANALYSIS_ENGINE_ENABLED); + await this.checkPythiaModel(context, downloader); + if (!await this.fs.fileExistsAsync(mscorlib)) { // Depends on .NET Runtime or SDK this.languageClient = this.createSimpleLanguageClient(context, clientOptions); @@ -100,7 +103,6 @@ export class AnalysisExtensionActivator implements IExtensionActivator { } if (downloadPackage) { - const downloader = new AnalysisEngineDownloader(this.services, analysisEngineFolder); await downloader.downloadAnalysisEngine(context); reporter.sendTelemetryEvent(PYTHON_ANALYSIS_ENGINE_DOWNLOADED); } @@ -233,4 +235,11 @@ export class AnalysisExtensionActivator implements IExtensionActivator { const result = await ps.exec('dotnet', ['--version']).catch(() => { return { stdout: '' }; }); return result.stdout.trim().startsWith('2.'); } + + private async checkPythiaModel(context: ExtensionContext, downloader: AnalysisEngineDownloader): Promise { + const settings = this.configuration.getSettings(); + if (settings.pythiaEnabled) { + await downloader.downloadPythiaModel(context); + } + } } diff --git a/src/client/activation/downloader.ts b/src/client/activation/downloader.ts index 98a2d2e1bfc2..d634f01c9cbc 100644 --- a/src/client/activation/downloader.ts +++ b/src/client/activation/downloader.ts @@ -1,13 +1,12 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -import * as fs from 'fs'; +import * as fileSystem from 'fs'; import * as path from 'path'; import * as request from 'request'; import * as requestProgress from 'request-progress'; import { ExtensionContext, OutputChannel, ProgressLocation, window } from 'vscode'; import { STANDARD_OUTPUT_CHANNEL } from '../common/constants'; -import { noop } from '../common/core.utils'; import { createDeferred, createTemporaryFile } from '../common/helpers'; import { IFileSystem, IPlatformService } from '../common/platform/types'; import { IOutputChannel } from '../common/types'; @@ -22,21 +21,28 @@ const downloadUriPrefix = 'https://pvsc.blob.core.windows.net/python-analysis'; const downloadBaseFileName = 'python-analysis-vscode'; const downloadVersion = '0.1.0'; const downloadFileExtension = '.nupkg'; +const pythiaModelName = 'model-sequence.json.gz'; export class AnalysisEngineDownloader { private readonly output: OutputChannel; private readonly platform: IPlatformService; private readonly platformData: PlatformData; + private readonly fs: IFileSystem; constructor(private readonly services: IServiceContainer, private engineFolder: string) { this.output = this.services.get(IOutputChannel, STANDARD_OUTPUT_CHANNEL); + this.fs = this.services.get(IFileSystem); this.platform = this.services.get(IPlatformService); - this.platformData = new PlatformData(this.platform, this.services.get(IFileSystem)); + this.platformData = new PlatformData(this.platform, this.fs); } public async downloadAnalysisEngine(context: ExtensionContext): Promise { - const localTempFilePath = await this.downloadFile(); + const platformString = await this.platformData.getPlatformName(); + const enginePackageFileName = `${downloadBaseFileName}-${platformString}.${downloadVersion}${downloadFileExtension}`; + + let localTempFilePath = ''; try { + localTempFilePath = await this.downloadFile(downloadUriPrefix, enginePackageFileName, 'Downloading Python Analysis Engine... '); await this.verifyDownload(localTempFilePath); await this.unpackArchive(context.extensionPath, localTempFilePath); } catch (err) { @@ -44,19 +50,42 @@ export class AnalysisEngineDownloader { this.output.appendLine(err); throw new Error(err); } finally { - fs.unlink(localTempFilePath, noop); + if (localTempFilePath.length > 0) { + await this.fs.deleteFileAsync(localTempFilePath); + } } } - private async downloadFile(): Promise { - const platformString = await this.platformData.getPlatformName(); - const remoteFileName = `${downloadBaseFileName}-${platformString}.${downloadVersion}${downloadFileExtension}`; - const uri = `${downloadUriPrefix}/${remoteFileName}`; + public async downloadPythiaModel(context: ExtensionContext): Promise { + const modelFolder = path.join(context.extensionPath, 'analysis', 'Pythia', 'model'); + const localPath = path.join(modelFolder, pythiaModelName); + if (await this.fs.directoryExistsAsync(localPath)) { + return; + } + + let localTempFilePath = ''; + try { + localTempFilePath = await this.downloadFile(downloadUriPrefix, pythiaModelName, 'Downloading IntelliSense Model File... '); + await this.fs.createDirectoryAsync(modelFolder); + await this.fs.copyFileAsync(localTempFilePath, localPath); + } catch (err) { + this.output.appendLine('failed.'); + this.output.appendLine(err); + throw new Error(err); + } finally { + if (localTempFilePath.length > 0) { + await this.fs.deleteFileAsync(localTempFilePath); + } + } + } + + private async downloadFile(location: string, fileName: string, title: string): Promise { + const uri = `${location}/${fileName}`; this.output.append(`Downloading ${uri}... `); const tempFile = await createTemporaryFile(downloadFileExtension); const deferred = createDeferred(); - const fileStream = fs.createWriteStream(tempFile.filePath); + const fileStream = fileSystem.createWriteStream(tempFile.filePath); fileStream.on('finish', () => { fileStream.close(); }).on('error', (err) => { @@ -64,7 +93,6 @@ export class AnalysisEngineDownloader { deferred.reject(err); }); - const title = 'Downloading Python Analysis Engine... '; await window.withProgress({ location: ProgressLocation.Window, title @@ -123,10 +151,10 @@ export class AnalysisEngineDownloader { let totalFiles = 0; let extractedFiles = 0; - zip.on('ready', () => { + zip.on('ready', async () => { totalFiles = zip.entriesCount; - if (!fs.existsSync(installFolder)) { - fs.mkdirSync(installFolder); + if (!await this.fs.directoryExistsAsync(installFolder)) { + await this.fs.createDirectoryAsync(installFolder); } zip.extract(null, installFolder, (err, count) => { if (err) { @@ -147,7 +175,7 @@ export class AnalysisEngineDownloader { // Set file to executable if (!this.platform.isWindows) { const executablePath = path.join(installFolder, this.platformData.getEngineExecutableName()); - fs.chmodSync(executablePath, '0764'); // -rwxrw-r-- + fileSystem.chmodSync(executablePath, '0764'); // -rwxrw-r-- } } } diff --git a/src/client/common/platform/fileSystem.ts b/src/client/common/platform/fileSystem.ts index 463b1089b6fe..7dca14f714c9 100644 --- a/src/client/common/platform/fileSystem.ts +++ b/src/client/common/platform/fileSystem.ts @@ -5,6 +5,7 @@ import * as fs from 'fs-extra'; import { inject, injectable } from 'inversify'; import * as path from 'path'; +import { createDeferred } from '../helpers'; import { IFileSystem, IPlatformService } from './types'; @injectable() @@ -96,4 +97,24 @@ export class FileSystem implements IFileSystem { }); }); } + + public copyFileAsync(src: string, dest: string): Promise { + const deferred = createDeferred(); + const rs = fs.createReadStream(src).on('error', (err) => { + deferred.reject(err); + }); + const ws = fs.createWriteStream(dest).on('error', (err) => { + deferred.reject(err); + }).on('close', () => { + deferred.resolve(); + }); + rs.pipe(ws); + return deferred.promise; + } + + public deleteFileAsync(filename: string): Promise { + const deferred = createDeferred(); + fs.unlink(filename, err => err ? deferred.reject(err) : deferred.resolve()); + return deferred.promise; + } } diff --git a/src/client/common/platform/types.ts b/src/client/common/platform/types.ts index 6c40a7a6a068..df5921b3eb5f 100644 --- a/src/client/common/platform/types.ts +++ b/src/client/common/platform/types.ts @@ -44,4 +44,6 @@ export interface IFileSystem { // tslint:disable-next-line:unified-signatures appendFileSync(filename: string, data: {}, options?: { encoding?: string; mode?: string; flag?: string }): void; getRealPathAsync(path: string): Promise; + copyFileAsync(src: string, dest: string): Promise; + deleteFileAsync(filename: string): Promise; } From d140b3a94021ae378033c46f23f475a6078b2426 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 3 May 2018 14:23:33 -0700 Subject: [PATCH 51/83] Baseline updates --- src/client/activation/downloader.ts | 2 +- src/test/definitions/hover.ptvs.test.ts | 28 ++++++++++++++++++------- 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/src/client/activation/downloader.ts b/src/client/activation/downloader.ts index d634f01c9cbc..b4dda1ae56d8 100644 --- a/src/client/activation/downloader.ts +++ b/src/client/activation/downloader.ts @@ -59,7 +59,7 @@ export class AnalysisEngineDownloader { public async downloadPythiaModel(context: ExtensionContext): Promise { const modelFolder = path.join(context.extensionPath, 'analysis', 'Pythia', 'model'); const localPath = path.join(modelFolder, pythiaModelName); - if (await this.fs.directoryExistsAsync(localPath)) { + if (await this.fs.fileExistsAsync(localPath)) { return; } diff --git a/src/test/definitions/hover.ptvs.test.ts b/src/test/definitions/hover.ptvs.test.ts index 4f0f014c7bff..089245836090 100644 --- a/src/test/definitions/hover.ptvs.test.ts +++ b/src/test/definitions/hover.ptvs.test.ts @@ -52,7 +52,9 @@ suite('Hover Definition (Analysis Engine)', () => { const actual = normalizeMarkedString(def[0].contents[0]).splitLines(); const expected = [ 'obj.method1:', + '```python', 'method method1 of one.Class1 objects', + '```', 'This is method1' ]; verifySignatureLines(actual, expected); @@ -67,7 +69,9 @@ suite('Hover Definition (Analysis Engine)', () => { const actual = normalizeMarkedString(def[0].contents[0]).splitLines(); const expected = [ 'two.ct().fun:', + '```python', 'method fun of two.ct objects', + '```', 'This is fun' ]; verifySignatureLines(actual, expected); @@ -81,12 +85,13 @@ suite('Hover Definition (Analysis Engine)', () => { const actual = normalizeMarkedString(def[0].contents[0]).splitLines(); const expected = [ - 'Foo.bar:', + '```python', 'four.Foo.bar() -> bool', + 'declared in Foo', + '```', '说明 - keep this line, it works', 'delete following line, it works', - '如果存在需要等待审批或正在执行的任务,将不刷新页面', - 'declared in Foo' + '如果存在需要等待审批或正在执行的任务,将不刷新页面' ]; verifySignatureLines(actual, expected); }); @@ -99,8 +104,9 @@ suite('Hover Definition (Analysis Engine)', () => { const actual = normalizeMarkedString(def[0].contents[0]).splitLines(); const expected = [ - 'four.showMessage:', + '```python', 'four.showMessage()', + '```', 'Кюм ут жэмпэр пошжим льаборэж, коммюны янтэрэсщэт нам ед, декта игнота ныморэ жят эи.', 'Шэа декам экшырки эи, эи зыд эррэм докэндё, векж факэтэ пэрчыквюэрёж ку.' ]; @@ -131,8 +137,9 @@ suite('Hover Definition (Analysis Engine)', () => { const actual = normalizeMarkedString(def[0].contents[0]).splitLines(); const expected = [ - 'misc.Random:', + '```python', 'class misc.Random(_random.Random)', + '```', 'Random number generator base class used by bound module functions.', 'Used to instantiate instances of Random to get generators that don\'t', 'share state.', @@ -154,7 +161,9 @@ suite('Hover Definition (Analysis Engine)', () => { const actual = normalizeMarkedString(def[0].contents[0]).splitLines(); const expected = [ 'rnd2.randint:', + '```python', 'method randint of misc.Random objects -> int', + '```', 'Return random integer in range [a, b], including both end points.' ]; verifySignatureLines(actual, expected); @@ -168,8 +177,9 @@ suite('Hover Definition (Analysis Engine)', () => { const actual = normalizeMarkedString(def[0].contents[0]).splitLines(); const expected = [ - 'math.acos:', - 'built-in function acos(x)', + '```python', + 'acos(x)', + '```', 'acos(x)', 'Return the arc cosine (measured in radians) of x.' ]; @@ -184,8 +194,9 @@ suite('Hover Definition (Analysis Engine)', () => { const actual = normalizeMarkedString(def[0].contents[0]).splitLines(); const expected = [ - 'misc.Thread:', + '```python', 'class misc.Thread(_Verbose)', + '```', 'A class that represents a thread of control.', 'This class can be safely subclassed in a limited fashion.' ]; @@ -222,6 +233,7 @@ suite('Hover Definition (Analysis Engine)', () => { function verifySignatureLines(actual: string[], expected: string[]) { assert.equal(actual.length, expected.length, 'incorrect number of lines'); for (let i = 0; i < actual.length; i += 1) { + actual[i] = actual[i].replace(new RegExp(' ', 'g'), ' '); assert.equal(actual[i].trim(), expected[i], `signature line ${i + 1} is incorrect`); } } From 4b394d92957610466c4ced87efd48e7f8d1eb06c Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 3 May 2018 15:40:24 -0700 Subject: [PATCH 52/83] Baseline update --- src/test/signature/signature.ptvs.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/signature/signature.ptvs.test.ts b/src/test/signature/signature.ptvs.test.ts index 8e2f630756d4..823433b50093 100644 --- a/src/test/signature/signature.ptvs.test.ts +++ b/src/test/signature/signature.ptvs.test.ts @@ -95,7 +95,7 @@ suite('Signatures (Analysis Engine)', () => { return; } const expected = [ - new SignatureHelpResult(0, 5, 0, 0, null), + new SignatureHelpResult(0, 5, 1, -1, null), new SignatureHelpResult(0, 6, 1, 0, 'value'), new SignatureHelpResult(0, 7, 1, 0, 'value'), new SignatureHelpResult(0, 8, 1, 1, '...'), From a397b114bd6434173be0582230bbc2f0b3d37988 Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Thu, 3 May 2018 21:24:25 -0700 Subject: [PATCH 53/83] Improve startup --- src/client/activation/analysis.ts | 9 ++++++--- src/client/activation/downloader.ts | 6 +++--- src/client/activation/hashVerifier.ts | 4 ++-- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/client/activation/analysis.ts b/src/client/activation/analysis.ts index 91cc647e27e8..be05b8ce924e 100644 --- a/src/client/activation/analysis.ts +++ b/src/client/activation/analysis.ts @@ -35,11 +35,11 @@ class LanguageServerStartupErrorHandler implements ErrorHandler { constructor(private readonly deferred: Deferred) { } public error(error: Error, message: Message, count: number): ErrorAction { this.deferred.reject(error); - return ErrorAction.Shutdown; + return ErrorAction.Continue; } public closed(): CloseAction { this.deferred.reject(); - return CloseAction.DoNotRestart; + return CloseAction.Restart; } } @@ -103,6 +103,7 @@ export class AnalysisExtensionActivator implements IExtensionActivator { } if (downloadPackage) { + this.appShell.showWarningMessage('.NET Runtime is not found, platform-specific Python Analysis Engine will be downloaded.'); await downloader.downloadAnalysisEngine(context); reporter.sendTelemetryEvent(PYTHON_ANALYSIS_ENGINE_DOWNLOADED); } @@ -130,7 +131,9 @@ export class AnalysisExtensionActivator implements IExtensionActivator { disposable = lc.start(); lc.onReady() .then(() => deferred.resolve()) - .catch(deferred.reject); + .catch((reason) => { + deferred.reject(reason); + }); await deferred.promise; this.output.appendLine(`Language server ready: ${this.sw.elapsedTime} ms`); diff --git a/src/client/activation/downloader.ts b/src/client/activation/downloader.ts index b4dda1ae56d8..9f456b3e867c 100644 --- a/src/client/activation/downloader.ts +++ b/src/client/activation/downloader.ts @@ -43,7 +43,7 @@ export class AnalysisEngineDownloader { let localTempFilePath = ''; try { localTempFilePath = await this.downloadFile(downloadUriPrefix, enginePackageFileName, 'Downloading Python Analysis Engine... '); - await this.verifyDownload(localTempFilePath); + await this.verifyDownload(localTempFilePath, platformString); await this.unpackArchive(context.extensionPath, localTempFilePath); } catch (err) { this.output.appendLine('failed.'); @@ -122,11 +122,11 @@ export class AnalysisEngineDownloader { return tempFile.filePath; } - private async verifyDownload(filePath: string): Promise { + private async verifyDownload(filePath: string, platformString: string): Promise { this.output.appendLine(''); this.output.append('Verifying download... '); const verifier = new HashVerifier(); - if (!await verifier.verifyHash(filePath, await this.platformData.getExpectedHash())) { + if (!await verifier.verifyHash(filePath, platformString, await this.platformData.getExpectedHash())) { throw new Error('Hash of the downloaded file does not match.'); } this.output.append('valid.'); diff --git a/src/client/activation/hashVerifier.ts b/src/client/activation/hashVerifier.ts index 950f02d869f9..c62cb36484f7 100644 --- a/src/client/activation/hashVerifier.ts +++ b/src/client/activation/hashVerifier.ts @@ -6,7 +6,7 @@ import * as fs from 'fs'; import { createDeferred } from '../common/helpers'; export class HashVerifier { - public async verifyHash(filePath: string, expectedDigest: string): Promise { + public async verifyHash(filePath: string, platformString: string, expectedDigest: string): Promise { const readStream = fs.createReadStream(filePath); const deferred = createDeferred(); const hash = createHash('sha512'); @@ -23,6 +23,6 @@ export class HashVerifier { readStream.pipe(hash); await deferred.promise; const actual = hash.read(); - return expectedDigest === '' ? true : actual === expectedDigest; + return expectedDigest === platformString ? true : actual === expectedDigest; } } From e54eaf898718a7ea4edf4bc7be0b5cb9bafc9390 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Fri, 4 May 2018 10:47:02 -0700 Subject: [PATCH 54/83] Handle missing interpreter better --- src/client/activation/analysis.ts | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/src/client/activation/analysis.ts b/src/client/activation/analysis.ts index be05b8ce924e..19ecc1249d49 100644 --- a/src/client/activation/analysis.ts +++ b/src/client/activation/analysis.ts @@ -177,20 +177,19 @@ export class AnalysisExtensionActivator implements IExtensionActivator { const interpreterData = await interpreterDataService.getInterpreterData(); if (!interpreterData) { const appShell = this.services.get(IApplicationShell); - appShell.showErrorMessage('Unable to determine path to Python interpreter.'); - return; + appShell.showWarningMessage('Unable to determine path to Python interpreter. IntelliSense will be limited.'); } - // tslint:disable-next-line:no-string-literal - properties['InterpreterPath'] = interpreterData.path; - // tslint:disable-next-line:no-string-literal - properties['Version'] = interpreterData.version; - // tslint:disable-next-line:no-string-literal - properties['PrefixPath'] = interpreterData.prefix; - // tslint:disable-next-line:no-string-literal - properties['DatabasePath'] = path.join(context.extensionPath, analysisEngineFolder); + if (interpreterData) { + // tslint:disable-next-line:no-string-literal + properties['InterpreterPath'] = interpreterData.path; + // tslint:disable-next-line:no-string-literal + properties['Version'] = interpreterData.version; + // tslint:disable-next-line:no-string-literal + properties['PrefixPath'] = interpreterData.prefix; + } - let searchPaths = interpreterData.searchPaths; + let searchPaths = interpreterData ? interpreterData.searchPaths : ''; const settings = this.configuration.getSettings(); if (settings.autoComplete) { const extraPaths = settings.autoComplete.extraPaths; @@ -199,6 +198,9 @@ export class AnalysisExtensionActivator implements IExtensionActivator { } } + // tslint:disable-next-line:no-string-literal + properties['DatabasePath'] = path.join(context.extensionPath, analysisEngineFolder); + const envProvider = this.services.get(IEnvironmentVariablesProvider); const pythonPath = (await envProvider.getEnvironmentVariables()).PYTHONPATH; From 3b8ddd555c2b19da7698a3e9abc3e2a7c6be5ed2 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Fri, 4 May 2018 11:32:09 -0700 Subject: [PATCH 55/83] Handle interpreter change --- src/client/activation/analysis.ts | 26 ++++++++++++++- .../activation/interpreterDataService.ts | 32 +++++++++---------- 2 files changed, 41 insertions(+), 17 deletions(-) diff --git a/src/client/activation/analysis.ts b/src/client/activation/analysis.ts index 19ecc1249d49..28af58e32d76 100644 --- a/src/client/activation/analysis.ts +++ b/src/client/activation/analysis.ts @@ -13,6 +13,7 @@ import { IProcessService } from '../common/process/types'; import { StopWatch } from '../common/stopWatch'; import { IConfigurationService, IOutputChannel, IPythonSettings } from '../common/types'; import { IEnvironmentVariablesProvider } from '../common/variables/types'; +import { IInterpreterService } from '../interpreter/contracts'; import { IServiceContainer } from '../ioc/types'; import { PYTHON_ANALYSIS_ENGINE_DOWNLOADED, @@ -50,7 +51,11 @@ export class AnalysisExtensionActivator implements IExtensionActivator { private readonly fs: IFileSystem; private readonly sw = new StopWatch(); private readonly platformData: PlatformData; + private readonly interpreterService: IInterpreterService; + private readonly disposables: Disposable[] = []; private languageClient: LanguageClient | undefined; + private context: ExtensionContext | undefined; + private interpreterHash: string = ''; constructor(private readonly services: IServiceContainer, pythonSettings: IPythonSettings) { this.configuration = this.services.get(IConfigurationService); @@ -58,13 +63,16 @@ export class AnalysisExtensionActivator implements IExtensionActivator { this.output = this.services.get(IOutputChannel, STANDARD_OUTPUT_CHANNEL); this.fs = this.services.get(IFileSystem); this.platformData = new PlatformData(services.get(IPlatformService), this.fs); + this.interpreterService = this.services.get(IInterpreterService); } public async activate(context: ExtensionContext): Promise { + this.context = context; const clientOptions = await this.getAnalysisOptions(context); if (!clientOptions) { return false; } + this.disposables.push(this.interpreterService.onDidChangeInterpreter(() => this.restartLanguageServer())); return this.startLanguageServer(context, clientOptions); } @@ -72,6 +80,22 @@ export class AnalysisExtensionActivator implements IExtensionActivator { if (this.languageClient) { await this.languageClient.stop(); } + for (const d of this.disposables) { + d.dispose(); + } + } + + private async restartLanguageServer(): Promise { + if (!this.context) { + return; + } + const ids = new InterpreterDataService(this.context, this.services); + const idata = await ids.getInterpreterData(); + if (!idata || idata.hash !== this.interpreterHash) { + this.interpreterHash = idata ? idata.hash : ''; + await this.deactivate(); + await this.activate(this.context); + } } private async startLanguageServer(context: ExtensionContext, clientOptions: LanguageClientOptions): Promise { @@ -203,10 +227,10 @@ export class AnalysisExtensionActivator implements IExtensionActivator { const envProvider = this.services.get(IEnvironmentVariablesProvider); const pythonPath = (await envProvider.getEnvironmentVariables()).PYTHONPATH; + this.interpreterHash = interpreterData ? interpreterData.hash : ''; // tslint:disable-next-line:no-string-literal properties['SearchPaths'] = `${searchPaths};${pythonPath ? pythonPath : ''}`; - const selector: string[] = [PYTHON]; // Options to control the language client diff --git a/src/client/activation/interpreterDataService.ts b/src/client/activation/interpreterDataService.ts index 45cf9749e6cf..45c7e42006c4 100644 --- a/src/client/activation/interpreterDataService.ts +++ b/src/client/activation/interpreterDataService.ts @@ -64,6 +64,22 @@ export class InterpreterDataService { return interpreterData; } + public getInterpreterHash(interpreterPath: string): Promise { + const platform = this.serviceContainer.get(IPlatformService); + const pythonExecutable = path.join(path.dirname(interpreterPath), platform.isWindows ? 'python.exe' : 'python'); + // Hash mod time and creation time + const deferred = createDeferred(); + fs.lstat(pythonExecutable, (err, stats) => { + if (err) { + deferred.resolve(''); + } else { + const actual = createHash('sha512').update(`${stats.ctime}-${stats.mtime}`).digest('hex'); + deferred.resolve(actual); + } + }); + return deferred.promise; + } + private async getInterpreterDataFromPython(execService: IPythonExecutionService, interpreterPath: string): Promise { const result = await execService.exec(['-c', 'import sys; print(sys.version_info); print(sys.prefix)'], {}); // 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) <> @@ -87,22 +103,6 @@ export class InterpreterDataService { return new InterpreterData(DataVersion, interpreterPath, `${majorMatches[1]}.${minorMatches[1]}`, prefix, searchPaths, hash); } - private getInterpreterHash(interpreterPath: string): Promise { - const platform = this.serviceContainer.get(IPlatformService); - const pythonExecutable = path.join(path.dirname(interpreterPath), platform.isWindows ? 'python.exe' : 'python'); - // Hash mod time and creation time - const deferred = createDeferred(); - fs.lstat(pythonExecutable, (err, stats) => { - if (err) { - deferred.resolve(''); - } else { - const actual = createHash('sha512').update(`${stats.ctimeMs}-${stats.mtimeMs}`).digest('hex'); - deferred.resolve(actual); - } - }); - return deferred.promise; - } - private async getSearchPaths(execService: IPythonExecutionService): Promise { const result = await execService.exec(['-c', 'import sys; print(sys.path);'], {}); if (!result.stdout) { From 41f9624568af5d02ae2372bd802ff2f83ee8efef Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Fri, 4 May 2018 12:17:44 -0700 Subject: [PATCH 56/83] Delete old file --- news/2 Fixes/1354.md | 1 - 1 file changed, 1 deletion(-) delete mode 100644 news/2 Fixes/1354.md diff --git a/news/2 Fixes/1354.md b/news/2 Fixes/1354.md deleted file mode 100644 index a5485ac00724..000000000000 --- a/news/2 Fixes/1354.md +++ /dev/null @@ -1 +0,0 @@ -Multiple fixes to format on type \ No newline at end of file From 3627b85ab845c64e77172f3c05644f648848d2f2 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Fri, 4 May 2018 12:35:24 -0700 Subject: [PATCH 57/83] Fix LS startup time reporting --- src/client/activation/analysis.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client/activation/analysis.ts b/src/client/activation/analysis.ts index 28af58e32d76..667462e64e1f 100644 --- a/src/client/activation/analysis.ts +++ b/src/client/activation/analysis.ts @@ -67,6 +67,7 @@ export class AnalysisExtensionActivator implements IExtensionActivator { } public async activate(context: ExtensionContext): Promise { + this.sw.reset(); this.context = context; const clientOptions = await this.getAnalysisOptions(context); if (!clientOptions) { From 486d11d08962794cbd884b4aafea81136fc27a10 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 8 May 2018 11:13:40 -0700 Subject: [PATCH 58/83] Remove Async suffix from IFileSystem --- src/client/activation/analysis.ts | 2 +- src/client/activation/downloader.ts | 14 +++--- src/client/common/platform/fileSystem.ts | 20 ++++---- src/client/common/platform/types.ts | 16 +++---- src/client/common/process/pythonProcess.ts | 2 +- .../baseActivationProvider.ts | 6 +-- .../configurationProviderUtils.ts | 2 +- src/client/interpreter/display/index.ts | 2 +- .../services/baseVirtualEnvService.ts | 4 +- .../locators/services/condaEnvFileService.ts | 6 +-- .../locators/services/condaEnvService.ts | 4 +- .../locators/services/condaService.ts | 12 ++--- .../locators/services/currentPathService.ts | 2 +- .../locators/services/pipEnvService.ts | 6 +-- src/client/linters/lintingEngine.ts | 4 +- src/client/linters/pylint.ts | 14 +++--- .../terminals/codeExecution/djangoContext.ts | 4 +- src/test/common/process/execFactory.test.ts | 2 +- .../common/terminals/activation.bash.test.ts | 2 +- .../activation.commandPrompt.test.ts | 16 +++---- .../common/terminals/activation.conda.test.ts | 12 ++--- .../configuration/interpreterSelector.test.ts | 2 +- .../debugger/configProvider/provider.test.ts | 2 +- .../interpreters/condaEnvFileService.test.ts | 10 ++-- src/test/interpreters/condaEnvService.test.ts | 16 +++---- src/test/interpreters/condaService.test.ts | 48 +++++++++---------- .../interpreters/currentPathService.test.ts | 8 ++-- src/test/interpreters/display.test.ts | 6 +-- src/test/interpreters/pipEnvService.test.ts | 16 +++---- src/test/linters/lint.args.test.ts | 2 +- src/test/linters/lint.provider.test.ts | 2 +- src/test/linters/lintengine.test.ts | 2 +- src/test/linters/pylint.test.ts | 34 ++++++------- 33 files changed, 149 insertions(+), 151 deletions(-) diff --git a/src/client/activation/analysis.ts b/src/client/activation/analysis.ts index 667462e64e1f..761f70a34589 100644 --- a/src/client/activation/analysis.ts +++ b/src/client/activation/analysis.ts @@ -110,7 +110,7 @@ export class AnalysisExtensionActivator implements IExtensionActivator { await this.checkPythiaModel(context, downloader); - if (!await this.fs.fileExistsAsync(mscorlib)) { + if (!await this.fs.fileExists(mscorlib)) { // Depends on .NET Runtime or SDK this.languageClient = this.createSimpleLanguageClient(context, clientOptions); try { diff --git a/src/client/activation/downloader.ts b/src/client/activation/downloader.ts index 9f456b3e867c..52e9136a4951 100644 --- a/src/client/activation/downloader.ts +++ b/src/client/activation/downloader.ts @@ -51,7 +51,7 @@ export class AnalysisEngineDownloader { throw new Error(err); } finally { if (localTempFilePath.length > 0) { - await this.fs.deleteFileAsync(localTempFilePath); + await this.fs.deleteFile(localTempFilePath); } } } @@ -59,22 +59,22 @@ export class AnalysisEngineDownloader { public async downloadPythiaModel(context: ExtensionContext): Promise { const modelFolder = path.join(context.extensionPath, 'analysis', 'Pythia', 'model'); const localPath = path.join(modelFolder, pythiaModelName); - if (await this.fs.fileExistsAsync(localPath)) { + if (await this.fs.fileExists(localPath)) { return; } let localTempFilePath = ''; try { localTempFilePath = await this.downloadFile(downloadUriPrefix, pythiaModelName, 'Downloading IntelliSense Model File... '); - await this.fs.createDirectoryAsync(modelFolder); - await this.fs.copyFileAsync(localTempFilePath, localPath); + await this.fs.createDirectory(modelFolder); + await this.fs.copyFile(localTempFilePath, localPath); } catch (err) { this.output.appendLine('failed.'); this.output.appendLine(err); throw new Error(err); } finally { if (localTempFilePath.length > 0) { - await this.fs.deleteFileAsync(localTempFilePath); + await this.fs.deleteFile(localTempFilePath); } } } @@ -153,8 +153,8 @@ export class AnalysisEngineDownloader { let extractedFiles = 0; zip.on('ready', async () => { totalFiles = zip.entriesCount; - if (!await this.fs.directoryExistsAsync(installFolder)) { - await this.fs.createDirectoryAsync(installFolder); + if (!await this.fs.directoryExists(installFolder)) { + await this.fs.createDirectory(installFolder); } zip.extract(null, installFolder, (err, count) => { if (err) { diff --git a/src/client/common/platform/fileSystem.ts b/src/client/common/platform/fileSystem.ts index 7dca14f714c9..ecc9c564f446 100644 --- a/src/client/common/platform/fileSystem.ts +++ b/src/client/common/platform/fileSystem.ts @@ -16,7 +16,7 @@ export class FileSystem implements IFileSystem { return path.sep; } - public objectExistsAsync(filePath: string, statCheck: (s: fs.Stats) => boolean): Promise { + public objectExists(filePath: string, statCheck: (s: fs.Stats) => boolean): Promise { return new Promise(resolve => { fs.stat(filePath, (error, stats) => { if (error) { @@ -27,8 +27,8 @@ export class FileSystem implements IFileSystem { }); } - public fileExistsAsync(filePath: string): Promise { - return this.objectExistsAsync(filePath, (stats) => stats.isFile()); + public fileExists(filePath: string): Promise { + return this.objectExists(filePath, (stats) => stats.isFile()); } public fileExistsSync(filePath: string): boolean { return fs.existsSync(filePath); @@ -43,15 +43,15 @@ export class FileSystem implements IFileSystem { return fs.readFile(filePath).then(buffer => buffer.toString()); } - public directoryExistsAsync(filePath: string): Promise { - return this.objectExistsAsync(filePath, (stats) => stats.isDirectory()); + public directoryExists(filePath: string): Promise { + return this.objectExists(filePath, (stats) => stats.isDirectory()); } - public createDirectoryAsync(directoryPath: string): Promise { + public createDirectory(directoryPath: string): Promise { return fs.mkdirp(directoryPath); } - public getSubDirectoriesAsync(rootDir: string): Promise { + public getSubDirectories(rootDir: string): Promise { return new Promise(resolve => { fs.readdir(rootDir, (error, files) => { if (error) { @@ -90,7 +90,7 @@ export class FileSystem implements IFileSystem { return fs.appendFileSync(filename, data, optionsOrEncoding); } - public getRealPathAsync(filePath: string): Promise { + public getRealPath(filePath: string): Promise { return new Promise(resolve => { fs.realpath(filePath, (err, realPath) => { resolve(err ? filePath : realPath); @@ -98,7 +98,7 @@ export class FileSystem implements IFileSystem { }); } - public copyFileAsync(src: string, dest: string): Promise { + public copyFile(src: string, dest: string): Promise { const deferred = createDeferred(); const rs = fs.createReadStream(src).on('error', (err) => { deferred.reject(err); @@ -112,7 +112,7 @@ export class FileSystem implements IFileSystem { return deferred.promise; } - public deleteFileAsync(filename: string): Promise { + public deleteFile(filename: string): Promise { const deferred = createDeferred(); fs.unlink(filename, err => err ? deferred.reject(err) : deferred.resolve()); return deferred.promise; diff --git a/src/client/common/platform/types.ts b/src/client/common/platform/types.ts index df5921b3eb5f..ce3836eb59a2 100644 --- a/src/client/common/platform/types.ts +++ b/src/client/common/platform/types.ts @@ -31,19 +31,19 @@ export interface IPlatformService { export const IFileSystem = Symbol('IFileSystem'); export interface IFileSystem { directorySeparatorChar: string; - objectExistsAsync(path: string, statCheck: (s: fs.Stats) => boolean): Promise; - fileExistsAsync(path: string): Promise; + objectExists(path: string, statCheck: (s: fs.Stats) => boolean): Promise; + fileExists(path: string): Promise; fileExistsSync(path: string): boolean; - directoryExistsAsync(path: string): Promise; - createDirectoryAsync(path: string): Promise; - getSubDirectoriesAsync(rootDir: string): Promise; + directoryExists(path: string): Promise; + createDirectory(path: string): Promise; + getSubDirectories(rootDir: string): Promise; arePathsSame(path1: string, path2: string): boolean; readFile(filePath: string): Promise; appendFileSync(filename: string, data: {}, encoding: string): void; appendFileSync(filename: string, data: {}, options?: { encoding?: string; mode?: number; flag?: string }): void; // tslint:disable-next-line:unified-signatures appendFileSync(filename: string, data: {}, options?: { encoding?: string; mode?: string; flag?: string }): void; - getRealPathAsync(path: string): Promise; - copyFileAsync(src: string, dest: string): Promise; - deleteFileAsync(filename: string): Promise; + getRealPath(path: string): Promise; + copyFile(src: string, dest: string): Promise; + deleteFile(filename: string): Promise; } diff --git a/src/client/common/process/pythonProcess.ts b/src/client/common/process/pythonProcess.ts index 0d6622bed472..5199f030c32f 100644 --- a/src/client/common/process/pythonProcess.ts +++ b/src/client/common/process/pythonProcess.ts @@ -31,7 +31,7 @@ export class PythonExecutionService implements IPythonExecutionService { public async getExecutablePath(): Promise { // If we've passed the python file, then return the file. // This is because on mac if using the interpreter /usr/bin/python2.7 we can get a different value for the path - if (await this.fileSystem.fileExistsAsync(this.pythonPath)) { + if (await this.fileSystem.fileExists(this.pythonPath)) { return this.pythonPath; } return this.procService.exec(this.pythonPath, ['-c', 'import sys;print(sys.executable)'], { env: this.envVars, throwOnStdErr: true }) diff --git a/src/client/common/terminal/environmentActivationProviders/baseActivationProvider.ts b/src/client/common/terminal/environmentActivationProviders/baseActivationProvider.ts index 5ba858d624f4..ee45d5948c93 100644 --- a/src/client/common/terminal/environmentActivationProviders/baseActivationProvider.ts +++ b/src/client/common/terminal/environmentActivationProviders/baseActivationProvider.ts @@ -4,12 +4,10 @@ import { injectable } from 'inversify'; import * as path from 'path'; import { Uri } from 'vscode'; -import { PythonInterpreter } from '../../../interpreter/contracts'; import { IServiceContainer } from '../../../ioc/types'; import { IFileSystem } from '../../platform/types'; import { IConfigurationService } from '../../types'; -import { TerminalShellType } from '../types'; -import { ITerminalActivationCommandProvider } from '../types'; +import { ITerminalActivationCommandProvider, TerminalShellType } from '../types'; @injectable() export abstract class BaseActivationCommandProvider implements ITerminalActivationCommandProvider { @@ -25,7 +23,7 @@ export abstract class BaseActivationCommandProvider implements ITerminalActivati for (const scriptFileName of scriptFileNames) { // Generate scripts are found in the same directory as the interpreter. const scriptFile = path.join(path.dirname(pythonPath), scriptFileName); - const found = await fs.fileExistsAsync(scriptFile); + const found = await fs.fileExists(scriptFile); if (found) { return scriptFile; } diff --git a/src/client/debugger/configProviders/configurationProviderUtils.ts b/src/client/debugger/configProviders/configurationProviderUtils.ts index e5ce625517f5..6bc13f0bf53f 100644 --- a/src/client/debugger/configProviders/configurationProviderUtils.ts +++ b/src/client/debugger/configProviders/configurationProviderUtils.ts @@ -27,7 +27,7 @@ export class ConfigurationProviderUtils implements IConfigurationProviderUtils { const executionService = await this.executionFactory.create(resource); const output = await executionService.exec(['-c', 'import pyramid;print(pyramid.__file__)'], { throwOnStdErr: true }); const pserveFilePath = path.join(path.dirname(output.stdout.trim()), 'scripts', PSERVE_SCRIPT_FILE_NAME); - return await this.fs.fileExistsAsync(pserveFilePath) ? pserveFilePath : undefined; + return await this.fs.fileExists(pserveFilePath) ? pserveFilePath : undefined; } catch (ex) { const message = 'Unable to locate \'pserve.py\' required for debugging of Pyramid applications.'; console.error(message, ex); diff --git a/src/client/interpreter/display/index.ts b/src/client/interpreter/display/index.ts index 1c19a75f2e70..e419691b31d2 100644 --- a/src/client/interpreter/display/index.ts +++ b/src/client/interpreter/display/index.ts @@ -65,7 +65,7 @@ export class InterpreterDisplay implements IInterpreterDisplay { } else { const defaultDisplayName = `${path.basename(pythonPath)} [Environment]`; await Promise.all([ - this.fileSystem.fileExistsAsync(pythonPath), + this.fileSystem.fileExists(pythonPath), this.versionProvider.getVersion(pythonPath, defaultDisplayName), this.getVirtualEnvironmentName(pythonPath).catch(() => '') ]) diff --git a/src/client/interpreter/locators/services/baseVirtualEnvService.ts b/src/client/interpreter/locators/services/baseVirtualEnvService.ts index 06b57b939436..4499a3a38acf 100644 --- a/src/client/interpreter/locators/services/baseVirtualEnvService.ts +++ b/src/client/interpreter/locators/services/baseVirtualEnvService.ts @@ -34,7 +34,7 @@ export class BaseVirtualEnvService extends CacheableLocatorService { .then(listOfInterpreters => _.flatten(listOfInterpreters)); } private async lookForInterpretersInVenvs(pathToCheck: string) { - return this.fileSystem.getSubDirectoriesAsync(pathToCheck) + return this.fileSystem.getSubDirectories(pathToCheck) .then(subDirs => Promise.all(this.getProspectiveDirectoriesForLookup(subDirs))) .then(dirs => dirs.filter(dir => dir.length > 0)) .then(dirs => Promise.all(dirs.map(lookForInterpretersInDirectory))) @@ -50,7 +50,7 @@ export class BaseVirtualEnvService extends CacheableLocatorService { const platform = this.serviceContainer.get(IPlatformService); const dirToLookFor = platform.virtualEnvBinName; return subDirs.map(subDir => - this.fileSystem.getSubDirectoriesAsync(subDir) + this.fileSystem.getSubDirectories(subDir) .then(dirs => { const scriptOrBinDirs = dirs.filter(dir => { const folderName = path.basename(dir); diff --git a/src/client/interpreter/locators/services/condaEnvFileService.ts b/src/client/interpreter/locators/services/condaEnvFileService.ts index 49cbc2eec935..2f9e681abb3f 100644 --- a/src/client/interpreter/locators/services/condaEnvFileService.ts +++ b/src/client/interpreter/locators/services/condaEnvFileService.ts @@ -15,7 +15,7 @@ import { AnacondaCompanyName, AnacondaCompanyNames, AnacondaDisplayName } from ' @injectable() export class CondaEnvFileService extends CacheableLocatorService { - constructor( @inject(IInterpreterVersionService) private versionService: IInterpreterVersionService, + constructor(@inject(IInterpreterVersionService) private versionService: IInterpreterVersionService, @inject(ICondaService) private condaService: ICondaService, @inject(IFileSystem) private fileSystem: IFileSystem, @inject(IServiceContainer) serviceContainer: IServiceContainer, @@ -31,7 +31,7 @@ export class CondaEnvFileService extends CacheableLocatorService { if (!this.condaService.condaEnvironmentsFile) { return []; } - return this.fileSystem.fileExistsAsync(this.condaService.condaEnvironmentsFile!) + return this.fileSystem.fileExists(this.condaService.condaEnvironmentsFile!) .then(exists => exists ? this.getEnvironmentsFromFile(this.condaService.condaEnvironmentsFile!) : Promise.resolve([])); } private async getEnvironmentsFromFile(envFile: string) { @@ -66,7 +66,7 @@ export class CondaEnvFileService extends CacheableLocatorService { } private async getInterpreterDetails(environmentPath: string): Promise { const interpreter = this.condaService.getInterpreterPath(environmentPath); - if (!interpreter || !await this.fileSystem.fileExistsAsync(interpreter)) { + if (!interpreter || !await this.fileSystem.fileExists(interpreter)) { return; } diff --git a/src/client/interpreter/locators/services/condaEnvService.ts b/src/client/interpreter/locators/services/condaEnvService.ts index 7e2d5c616fcc..781f3b286f75 100644 --- a/src/client/interpreter/locators/services/condaEnvService.ts +++ b/src/client/interpreter/locators/services/condaEnvService.ts @@ -14,7 +14,7 @@ import { CondaHelper } from './condaHelper'; @injectable() export class CondaEnvService extends CacheableLocatorService { private readonly condaHelper = new CondaHelper(); - constructor( @inject(ICondaService) private condaService: ICondaService, + constructor(@inject(ICondaService) private condaService: ICondaService, @inject(IInterpreterVersionService) private versionService: IInterpreterVersionService, @inject(ILogger) private logger: ILogger, @inject(IServiceContainer) serviceContainer: IServiceContainer, @@ -37,7 +37,7 @@ export class CondaEnvService extends CacheableLocatorService { .map(async envPath => { const pythonPath = this.condaService.getInterpreterPath(envPath); - const existsPromise = pythonPath ? this.fileSystem.fileExistsAsync(pythonPath) : Promise.resolve(false); + const existsPromise = pythonPath ? this.fileSystem.fileExists(pythonPath) : Promise.resolve(false); const versionPromise = this.versionService.getVersion(pythonPath, ''); const [exists, version] = await Promise.all([existsPromise, versionPromise]); diff --git a/src/client/interpreter/locators/services/condaService.ts b/src/client/interpreter/locators/services/condaService.ts index f73cb277fd35..753ffb8f1ee9 100644 --- a/src/client/interpreter/locators/services/condaService.ts +++ b/src/client/interpreter/locators/services/condaService.ts @@ -88,9 +88,9 @@ export class CondaService implements ICondaService { const dir = path.dirname(interpreterPath); const isWindows = this.serviceContainer.get(IPlatformService).isWindows; const condaMetaDirectory = isWindows ? path.join(dir, 'conda-meta') : path.join(dir, '..', 'conda-meta'); - return fs.directoryExistsAsync(condaMetaDirectory); + return fs.directoryExists(condaMetaDirectory); } - public async getCondaEnvironment(interpreterPath: string): Promise<{ name: string, path: string } | undefined> { + public async getCondaEnvironment(interpreterPath: string): Promise<{ name: string; path: string } | undefined> { const isCondaEnv = await this.isCondaEnvironment(interpreterPath); if (!isCondaEnv) { return; @@ -118,11 +118,11 @@ export class CondaService implements ICondaService { // If still not available, then the user created the env after starting vs code. // The only solution is to get the user to re-start vscode. } - public async getCondaEnvironments(ignoreCache: boolean): Promise<({ name: string, path: string }[]) | undefined> { + public async getCondaEnvironments(ignoreCache: boolean): Promise<({ name: string; path: string }[]) | undefined> { // Global cache. const persistentFactory = this.serviceContainer.get(IPersistentStateFactory); // tslint:disable-next-line:no-any - const globalPersistence = persistentFactory.createGlobalPersistentState<{ data: { name: string, path: string }[] | undefined }>('CONDA_ENVIRONMENTS', undefined as any); + const globalPersistence = persistentFactory.createGlobalPersistentState<{ data: { name: string; path: string }[] | undefined }>('CONDA_ENVIRONMENTS', undefined as any); if (!ignoreCache && globalPersistence.value) { return globalPersistence.value.data; } @@ -171,7 +171,7 @@ export class CondaService implements ICondaService { return condaInterpreter ? path.join(path.dirname(condaInterpreter.path), 'conda.exe') : 'conda'; }) .then(async condaPath => { - return this.fileSystem.fileExistsAsync(condaPath).then(exists => exists ? condaPath : 'conda'); + return this.fileSystem.fileExists(condaPath).then(exists => exists ? condaPath : 'conda'); }); } return this.getCondaFileFromKnownLocations(); @@ -179,7 +179,7 @@ export class CondaService implements ICondaService { private async getCondaFileFromKnownLocations(): Promise { const condaFiles = await Promise.all(KNOWN_CONDA_LOCATIONS .map(untildify) - .map(async (condaPath: string) => this.fileSystem.fileExistsAsync(condaPath).then(exists => exists ? condaPath : ''))); + .map(async (condaPath: string) => this.fileSystem.fileExists(condaPath).then(exists => exists ? condaPath : ''))); const validCondaFiles = condaFiles.filter(condaPath => condaPath.length > 0); return validCondaFiles.length === 0 ? 'conda' : validCondaFiles[0]; diff --git a/src/client/interpreter/locators/services/currentPathService.ts b/src/client/interpreter/locators/services/currentPathService.ts index 48444e0dfba9..a3c29ace620d 100644 --- a/src/client/interpreter/locators/services/currentPathService.ts +++ b/src/client/interpreter/locators/services/currentPathService.ts @@ -56,7 +56,7 @@ export class CurrentPathService extends CacheableLocatorService { try { const output = await this.processService.exec(pythonPath, ['-c', 'import sys;print(sys.executable)'], {}); const executablePath = output.stdout.trim(); - if (executablePath.length > 0 && await this.fs.fileExistsAsync(executablePath)) { + if (executablePath.length > 0 && await this.fs.fileExists(executablePath)) { return executablePath; } return defaultValue; diff --git a/src/client/interpreter/locators/services/pipEnvService.ts b/src/client/interpreter/locators/services/pipEnvService.ts index 935e075308e8..c9438a8da55b 100644 --- a/src/client/interpreter/locators/services/pipEnvService.ts +++ b/src/client/interpreter/locators/services/pipEnvService.ts @@ -78,15 +78,15 @@ export class PipEnvService extends CacheableLocatorService { return; } const venvFolder = await this.invokePipenv('--venv', cwd); - return venvFolder && await this.fs.directoryExistsAsync(venvFolder) ? venvFolder : undefined; + return venvFolder && await this.fs.directoryExists(venvFolder) ? venvFolder : undefined; } private async checkIfPipFileExists(cwd: string): Promise { const currentProcess = this.serviceContainer.get(ICurrentProcess); const pipFileName = currentProcess.env[pipEnvFileNameVariable]; - if (typeof pipFileName === 'string' && await this.fs.fileExistsAsync(path.join(cwd, pipFileName))) { + if (typeof pipFileName === 'string' && await this.fs.fileExists(path.join(cwd, pipFileName))) { return true; } - if (await this.fs.fileExistsAsync(path.join(cwd, 'Pipfile'))) { + if (await this.fs.fileExists(path.join(cwd, 'Pipfile'))) { return true; } return false; diff --git a/src/client/linters/lintingEngine.ts b/src/client/linters/lintingEngine.ts index d5323c697c43..93b424780278 100644 --- a/src/client/linters/lintingEngine.ts +++ b/src/client/linters/lintingEngine.ts @@ -66,7 +66,7 @@ export class LintingEngine implements ILintingEngine { public async lintOpenPythonFiles(): Promise { this.diagnosticCollection.clear(); - const promises = this.documents.textDocuments.map(async document => await this.lintDocument(document, 'auto')); + const promises = this.documents.textDocuments.map(async document => this.lintDocument(document, 'auto')); await Promise.all(promises); return this.diagnosticCollection; } @@ -197,6 +197,6 @@ export class LintingEngine implements ILintingEngine { if (document.uri.scheme !== 'file' || !document.uri.fsPath) { return false; } - return await this.fileSystem.fileExistsAsync(document.uri.fsPath); + return this.fileSystem.fileExists(document.uri.fsPath); } } diff --git a/src/client/linters/pylint.ts b/src/client/linters/pylint.ts index 1e830283127d..ef015a260c46 100644 --- a/src/client/linters/pylint.ts +++ b/src/client/linters/pylint.ts @@ -99,17 +99,17 @@ export class Pylint extends BaseLinter { return true; } - if (await fs.fileExistsAsync(path.join(folder, pylintrc)) || await fs.fileExistsAsync(path.join(folder, dotPylintrc))) { + if (await fs.fileExists(path.join(folder, pylintrc)) || await fs.fileExists(path.join(folder, dotPylintrc))) { return true; } let current = folder; let above = path.dirname(folder); do { - if (!await fs.fileExistsAsync(path.join(current, '__init__.py'))) { + if (!await fs.fileExists(path.join(current, '__init__.py'))) { break; } - if (await fs.fileExistsAsync(path.join(current, pylintrc)) || await fs.fileExistsAsync(path.join(current, dotPylintrc))) { + if (await fs.fileExists(path.join(current, pylintrc)) || await fs.fileExists(path.join(current, dotPylintrc))) { return true; } current = above; @@ -117,15 +117,15 @@ export class Pylint extends BaseLinter { } while (!fs.arePathsSame(current, above)); const home = os.homedir(); - if (await fs.fileExistsAsync(path.join(home, dotPylintrc))) { + if (await fs.fileExists(path.join(home, dotPylintrc))) { return true; } - if (await fs.fileExistsAsync(path.join(home, '.config', pylintrc))) { + if (await fs.fileExists(path.join(home, '.config', pylintrc))) { return true; } if (!platformService.isWindows) { - if (await fs.fileExistsAsync(path.join('/etc', pylintrc))) { + if (await fs.fileExists(path.join('/etc', pylintrc))) { return true; } } @@ -138,7 +138,7 @@ export class Pylint extends BaseLinter { let current = folder; let above = path.dirname(current); do { - if (await fs.fileExistsAsync(path.join(current, pylintrc)) || await fs.fileExistsAsync(path.join(current, dotPylintrc))) { + if (await fs.fileExists(path.join(current, pylintrc)) || await fs.fileExists(path.join(current, dotPylintrc))) { return true; } current = above; diff --git a/src/client/terminals/codeExecution/djangoContext.ts b/src/client/terminals/codeExecution/djangoContext.ts index 00a5dd8e437b..d6f8755d447b 100644 --- a/src/client/terminals/codeExecution/djangoContext.ts +++ b/src/client/terminals/codeExecution/djangoContext.ts @@ -54,7 +54,7 @@ export class DjangoContextInitializer implements Disposable { private async ensureContextStateIsSet(): Promise { const activeWorkspace = this.getActiveWorkspace(); if (!activeWorkspace) { - return await this.isDjangoProject.set(false); + return this.isDjangoProject.set(false); } if (this.lastCheckedWorkspace === activeWorkspace) { return; @@ -62,7 +62,7 @@ export class DjangoContextInitializer implements Disposable { if (this.workspaceContextKeyValues.has(activeWorkspace)) { await this.isDjangoProject.set(this.workspaceContextKeyValues.get(activeWorkspace)!); } else { - const exists = await this.fileSystem.fileExistsAsync(path.join(activeWorkspace, 'manage.py')); + const exists = await this.fileSystem.fileExists(path.join(activeWorkspace, 'manage.py')); await this.isDjangoProject.set(exists); this.workspaceContextKeyValues.set(activeWorkspace, exists); this.lastCheckedWorkspace = activeWorkspace; diff --git a/src/test/common/process/execFactory.test.ts b/src/test/common/process/execFactory.test.ts index 9a8aa3ce4322..83de43009b56 100644 --- a/src/test/common/process/execFactory.test.ts +++ b/src/test/common/process/execFactory.test.ts @@ -22,7 +22,7 @@ suite('PythonExecutableService', () => { procService = TypeMoq.Mock.ofType(); configService = TypeMoq.Mock.ofType(); const fileSystem = TypeMoq.Mock.ofType(); - fileSystem.setup(f => f.fileExistsAsync(TypeMoq.It.isAny())).returns(() => Promise.resolve(false)); + fileSystem.setup(f => f.fileExists(TypeMoq.It.isAny())).returns(() => Promise.resolve(false)); serviceContainer.setup(c => c.get(TypeMoq.It.isValue(IFileSystem))).returns(() => fileSystem.object); serviceContainer.setup(c => c.get(TypeMoq.It.isValue(IEnvironmentVariablesProvider))).returns(() => envVarsProvider.object); serviceContainer.setup(c => c.get(TypeMoq.It.isValue(IProcessService))).returns(() => procService.object); diff --git a/src/test/common/terminals/activation.bash.test.ts b/src/test/common/terminals/activation.bash.test.ts index c321528140ea..88f677f5dc21 100644 --- a/src/test/common/terminals/activation.bash.test.ts +++ b/src/test/common/terminals/activation.bash.test.ts @@ -75,7 +75,7 @@ suite('Terminal Environment Activation (bash)', () => { } const pathToScriptFile = path.join(path.dirname(pythonPath), scriptFileName); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); const command = await bash.getActivationCommands(undefined, shellType.value); if (isScriptFileSupported) { diff --git a/src/test/common/terminals/activation.commandPrompt.test.ts b/src/test/common/terminals/activation.commandPrompt.test.ts index ce1942bc3b88..8f6f743b6063 100644 --- a/src/test/common/terminals/activation.commandPrompt.test.ts +++ b/src/test/common/terminals/activation.commandPrompt.test.ts @@ -85,7 +85,7 @@ suite('Terminal Environment Activation (cmd/powershell)', () => { const bash = new CommandPromptAndPowerShell(serviceContainer.object); const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); const commands = await bash.getActivationCommands(resource, TerminalShellType.commandPrompt); // Ensure the script file is of the following form: @@ -101,7 +101,7 @@ suite('Terminal Environment Activation (cmd/powershell)', () => { platform.setup(p => p.isWindows).returns(() => true); const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); const command = await batch.getActivationCommands(resource, TerminalShellType.powershell); // Executing batch files from powershell requires going back to cmd, then into powershell @@ -116,7 +116,7 @@ suite('Terminal Environment Activation (cmd/powershell)', () => { platform.setup(p => p.isWindows).returns(() => true); const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); const command = await bash.getActivationCommands(resource, TerminalShellType.powershellCore); // Executing batch files from powershell requires going back to cmd, then into powershell @@ -131,7 +131,7 @@ suite('Terminal Environment Activation (cmd/powershell)', () => { platform.setup(p => p.isWindows).returns(() => false); const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); const command = await bash.getActivationCommands(resource, TerminalShellType.powershell); expect(command).to.be.equal(undefined, 'Invalid command'); @@ -142,7 +142,7 @@ suite('Terminal Environment Activation (cmd/powershell)', () => { platform.setup(p => p.isWindows).returns(() => false); const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.bat'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); const command = await bash.getActivationCommands(resource, TerminalShellType.powershellCore); expect(command).to.be.equal(undefined, 'Invalid command'); @@ -172,7 +172,7 @@ suite('Terminal Environment Activation (cmd/powershell)', () => { platform.setup(p => p.isWindows).returns(() => true); const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.ps1'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); const command = await bash.getActivationCommands(resource, TerminalShellType.commandPrompt); expect(command).to.be.deep.equal([], 'Invalid command (running powershell files are not supported on command prompt)'); @@ -183,7 +183,7 @@ suite('Terminal Environment Activation (cmd/powershell)', () => { platform.setup(p => p.isWindows).returns(() => true); const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.ps1'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); const command = await bash.getActivationCommands(resource, TerminalShellType.powershell); expect(command).to.be.deep.equal([`& ${pathToScriptFile.fileToCommandArgument()}`.trim()], 'Invalid command'); @@ -194,7 +194,7 @@ suite('Terminal Environment Activation (cmd/powershell)', () => { platform.setup(p => p.isWindows).returns(() => true); const pathToScriptFile = path.join(path.dirname(pythonPath), 'activate.ps1'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(pathToScriptFile))).returns(() => Promise.resolve(true)); const command = await bash.getActivationCommands(resource, TerminalShellType.powershellCore); expect(command).to.be.deep.equal([`& ${pathToScriptFile.fileToCommandArgument()}`.trim()], 'Invalid command'); diff --git a/src/test/common/terminals/activation.conda.test.ts b/src/test/common/terminals/activation.conda.test.ts index d1d868dcc44f..1222aa1da1f8 100644 --- a/src/test/common/terminals/activation.conda.test.ts +++ b/src/test/common/terminals/activation.conda.test.ts @@ -147,19 +147,19 @@ suite('Terminal Environment Activation conda', () => { test('If environment is a conda environment, ensure conda activation command is sent (windows)', async () => { const pythonPath = path.join('c', 'users', 'xyz', '.conda', 'envs', 'enva', 'python.exe'); - fileSystem.setup(f => f.directoryExistsAsync(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), 'conda-meta')))).returns(() => Promise.resolve(true)); + fileSystem.setup(f => f.directoryExists(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), 'conda-meta')))).returns(() => Promise.resolve(true)); await expectCondaActivationCommand(true, false, false, pythonPath); }); test('If environment is a conda environment, ensure conda activation command is sent (linux)', async () => { const pythonPath = path.join('users', 'xyz', '.conda', 'envs', 'enva', 'bin', 'python'); - fileSystem.setup(f => f.directoryExistsAsync(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), '..', 'conda-meta')))).returns(() => Promise.resolve(true)); + fileSystem.setup(f => f.directoryExists(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), '..', 'conda-meta')))).returns(() => Promise.resolve(true)); await expectCondaActivationCommand(false, false, true, pythonPath); }); test('If environment is a conda environment, ensure conda activation command is sent (osx)', async () => { const pythonPath = path.join('users', 'xyz', '.conda', 'envs', 'enva', 'bin', 'python'); - fileSystem.setup(f => f.directoryExistsAsync(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), '..', 'conda-meta')))).returns(() => Promise.resolve(true)); + fileSystem.setup(f => f.directoryExists(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), '..', 'conda-meta')))).returns(() => Promise.resolve(true)); await expectCondaActivationCommand(false, true, false, pythonPath); }); @@ -200,21 +200,21 @@ suite('Terminal Environment Activation conda', () => { test('If environment is a conda environment and environment detection fails, ensure activatino of script is sent (windows)', async () => { const pythonPath = path.join('c', 'users', 'xyz', '.conda', 'envs', 'enva', 'python.exe'); const condaEnvDir = path.join('c', 'users', 'xyz', '.conda', 'envs'); - fileSystem.setup(f => f.directoryExistsAsync(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), 'conda-meta')))).returns(() => Promise.resolve(true)); + fileSystem.setup(f => f.directoryExists(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), 'conda-meta')))).returns(() => Promise.resolve(true)); await expectActivationCommandIfCondaDetectionFails(true, false, false, pythonPath, condaEnvDir); }); test('If environment is a conda environment and environment detection fails, ensure activatino of script is sent (osx)', async () => { const pythonPath = path.join('users', 'xyz', '.conda', 'envs', 'enva', 'python'); const condaEnvDir = path.join('users', 'xyz', '.conda', 'envs'); - fileSystem.setup(f => f.directoryExistsAsync(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), '..', 'conda-meta')))).returns(() => Promise.resolve(true)); + fileSystem.setup(f => f.directoryExists(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), '..', 'conda-meta')))).returns(() => Promise.resolve(true)); await expectActivationCommandIfCondaDetectionFails(false, true, false, pythonPath, condaEnvDir); }); test('If environment is a conda environment and environment detection fails, ensure activatino of script is sent (linux)', async () => { const pythonPath = path.join('users', 'xyz', '.conda', 'envs', 'enva', 'python'); const condaEnvDir = path.join('users', 'xyz', '.conda', 'envs'); - fileSystem.setup(f => f.directoryExistsAsync(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), '..', 'conda-meta')))).returns(() => Promise.resolve(true)); + fileSystem.setup(f => f.directoryExists(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), '..', 'conda-meta')))).returns(() => Promise.resolve(true)); await expectActivationCommandIfCondaDetectionFails(false, false, true, pythonPath, condaEnvDir); }); diff --git a/src/test/configuration/interpreterSelector.test.ts b/src/test/configuration/interpreterSelector.test.ts index 5dbb7f7bd70d..98ea1fd27cc9 100644 --- a/src/test/configuration/interpreterSelector.test.ts +++ b/src/test/configuration/interpreterSelector.test.ts @@ -54,7 +54,7 @@ suite('Interpreters - selector', () => { .setup(x => x.arePathsSame(TypeMoq.It.isAnyString(), TypeMoq.It.isAnyString())) .returns((a: string, b: string) => a === b); fileSystem - .setup(x => x.getRealPathAsync(TypeMoq.It.isAnyString())) + .setup(x => x.getRealPath(TypeMoq.It.isAnyString())) .returns((a: string) => new Promise(resolve => resolve(a))); serviceManager.addSingletonInstance(IFileSystem, fileSystem.object); diff --git a/src/test/debugger/configProvider/provider.test.ts b/src/test/debugger/configProvider/provider.test.ts index 94ca215e76db..fbc51cd3c762 100644 --- a/src/test/debugger/configProvider/provider.test.ts +++ b/src/test/debugger/configProvider/provider.test.ts @@ -345,7 +345,7 @@ import { IServiceContainer } from '../../../client/ioc/types'; pythonExecutionService.setup(e => e.exec(TypeMoq.It.isValue(args), TypeMoq.It.isAny())) .returns(() => execOutput) .verifiable(TypeMoq.Times.exactly(addPyramidDebugOption ? 1 : 0)); - fileSystem.setup(f => f.fileExistsAsync(TypeMoq.It.isValue(pserveFilePath))) + fileSystem.setup(f => f.fileExists(TypeMoq.It.isValue(pserveFilePath))) .returns(() => Promise.resolve(pyramidExists)) .verifiable(TypeMoq.Times.exactly(pyramidExists && addPyramidDebugOption ? 1 : 0)); appShell.setup(a => a.showErrorMessage(TypeMoq.It.isAny())) diff --git a/src/test/interpreters/condaEnvFileService.test.ts b/src/test/interpreters/condaEnvFileService.test.ts index 2207be82732a..92783ddf3bc9 100644 --- a/src/test/interpreters/condaEnvFileService.test.ts +++ b/src/test/interpreters/condaEnvFileService.test.ts @@ -44,7 +44,7 @@ suite('Interpreters from Conda Environments Text File', () => { }); test('Must return an empty list for an empty file', async () => { condaService.setup(c => c.condaEnvironmentsFile).returns(() => environmentsFilePath); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(environmentsFilePath))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(environmentsFilePath))).returns(() => Promise.resolve(true)); fileSystem.setup(fs => fs.readFile(TypeMoq.It.isValue(environmentsFilePath))).returns(() => Promise.resolve('')); interpreterVersion.setup(i => i.getVersion(TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => Promise.resolve('Mock Name')); const interpreters = await condaFileProvider.getInterpreters(); @@ -73,11 +73,11 @@ suite('Interpreters from Conda Environments Text File', () => { }); return Promise.resolve(condaEnvironments); }); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(environmentsFilePath))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(environmentsFilePath))).returns(() => Promise.resolve(true)); fileSystem.setup(fs => fs.arePathsSame(TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns((p1: string, p2: string) => isWindows ? p1 === p2 : p1.toUpperCase() === p2.toUpperCase()); validPaths.forEach(validPath => { const pythonPath = isWindows ? path.join(validPath, 'python.exe') : path.join(validPath, 'bin', 'python'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); }); fileSystem.setup(fs => fs.readFile(TypeMoq.It.isValue(environmentsFilePath))).returns(() => Promise.resolve(interpreterPaths.join(EOL))); @@ -107,8 +107,8 @@ suite('Interpreters from Conda Environments Text File', () => { const pythonPath = path.join(interpreterPaths[0], 'pythonPath'); condaService.setup(c => c.condaEnvironmentsFile).returns(() => environmentsFilePath); condaService.setup(c => c.getInterpreterPath(TypeMoq.It.isAny())).returns(() => pythonPath); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(environmentsFilePath))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(environmentsFilePath))).returns(() => Promise.resolve(true)); fileSystem.setup(fs => fs.readFile(TypeMoq.It.isValue(environmentsFilePath))).returns(() => Promise.resolve(interpreterPaths.join(EOL))); for (const companyName of AnacondaCompanyNames) { diff --git a/src/test/interpreters/condaEnvService.test.ts b/src/test/interpreters/condaEnvService.test.ts index a3c7b93c7acc..82da233fb9e9 100644 --- a/src/test/interpreters/condaEnvService.test.ts +++ b/src/test/interpreters/condaEnvService.test.ts @@ -63,7 +63,7 @@ suite('Interpreters from Conda Environments', () => { }); info.envs.forEach(validPath => { const pythonPath = isWindows ? path.join(validPath, 'python.exe') : path.join(validPath, 'bin', 'python'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); }); interpreterVersion.setup(i => i.getVersion(TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns((_p, defaultValue) => Promise.resolve(defaultValue)); @@ -100,7 +100,7 @@ suite('Interpreters from Conda Environments', () => { }); info.envs.forEach(validPath => { const pythonPath = isWindows ? path.join(validPath, 'python.exe') : path.join(validPath, 'bin', 'python'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); }); interpreterVersion.setup(i => i.getVersion(TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns((_p, defaultValue) => Promise.resolve(defaultValue)); condaService.setup(c => c.getCondaFile()).returns(() => Promise.resolve('conda')); @@ -145,7 +145,7 @@ suite('Interpreters from Conda Environments', () => { }); info.envs.forEach(validPath => { const pythonPath = isWindows ? path.join(validPath, 'python.exe') : path.join(validPath, 'bin', 'python'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); }); interpreterVersion.setup(i => i.getVersion(TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns((_p, defaultValue) => Promise.resolve(defaultValue)); @@ -183,7 +183,7 @@ suite('Interpreters from Conda Environments', () => { }); info.envs.forEach(validPath => { const pythonPath = isWindows ? path.join(validPath, 'python.exe') : path.join(validPath, 'bin', 'python'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); }); interpreterVersion.setup(i => i.getVersion(TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns((_p, defaultValue) => Promise.resolve(defaultValue)); fileSystem.setup(fs => fs.arePathsSame(TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns((p1: string, p2: string) => isWindows ? p1 === p2 : p1.toUpperCase() === p2.toUpperCase()); @@ -213,7 +213,7 @@ suite('Interpreters from Conda Environments', () => { }); info.envs.forEach(validPath => { const pythonPath = isWindows ? path.join(validPath, 'python.exe') : path.join(validPath, 'bin', 'python'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); }); interpreterVersion.setup(i => i.getVersion(TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns((_p, defaultValue) => Promise.resolve(defaultValue)); @@ -243,7 +243,7 @@ suite('Interpreters from Conda Environments', () => { }); info.envs.forEach(validPath => { const pythonPath = isWindows ? path.join(validPath, 'python.exe') : path.join(validPath, 'bin', 'python'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); }); interpreterVersion.setup(i => i.getVersion(TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns((_p, defaultValue) => Promise.resolve(defaultValue)); condaService.setup(c => c.getCondaFile()).returns(() => Promise.resolve('conda')); @@ -279,7 +279,7 @@ suite('Interpreters from Conda Environments', () => { return isWindows ? path.join(environmentPath, 'python.exe') : path.join(environmentPath, 'bin', 'python'); }); const pythonPath = isWindows ? path.join(info.default_prefix, 'python.exe') : path.join(info.default_prefix, 'bin', 'python'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); interpreterVersion.setup(i => i.getVersion(TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns((_p, defaultValue) => Promise.resolve(defaultValue)); const interpreters = await condaProvider.parseCondaInfo(info); @@ -314,7 +314,7 @@ suite('Interpreters from Conda Environments', () => { return isWindows ? path.join(environmentPath, 'python.exe') : path.join(environmentPath, 'bin', 'python'); }); const pythonPath = isWindows ? path.join(envPath, 'python.exe') : path.join(envPath, 'bin', 'python'); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); }); const interpreters = await condaProvider.parseCondaInfo(info); diff --git a/src/test/interpreters/condaService.test.ts b/src/test/interpreters/condaService.test.ts index f6a16891e71a..fc4ed1da0e19 100644 --- a/src/test/interpreters/condaService.test.ts +++ b/src/test/interpreters/condaService.test.ts @@ -54,19 +54,19 @@ suite('Interpreters Conda Service', () => { test('Correctly identifies a python path as a conda environment (windows)', async () => { const pythonPath = path.join('c', 'users', 'xyz', '.conda', 'envs', 'enva', 'python.exe'); - fileSystem.setup(f => f.directoryExistsAsync(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), 'conda-meta')))).returns(() => Promise.resolve(true)); + fileSystem.setup(f => f.directoryExists(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), 'conda-meta')))).returns(() => Promise.resolve(true)); await identifyPythonPathAsCondaEnvironment(true, false, false, pythonPath); }); test('Correctly identifies a python path as a conda environment (linux)', async () => { const pythonPath = path.join('users', 'xyz', '.conda', 'envs', 'enva', 'bin', 'python'); - fileSystem.setup(f => f.directoryExistsAsync(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), '..', 'conda-meta')))).returns(() => Promise.resolve(true)); + fileSystem.setup(f => f.directoryExists(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), '..', 'conda-meta')))).returns(() => Promise.resolve(true)); await identifyPythonPathAsCondaEnvironment(false, false, true, pythonPath); }); test('Correctly identifies a python path as a conda environment (osx)', async () => { const pythonPath = path.join('users', 'xyz', '.conda', 'envs', 'enva', 'bin', 'python'); - fileSystem.setup(f => f.directoryExistsAsync(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), '..', 'conda-meta')))).returns(() => Promise.resolve(true)); + fileSystem.setup(f => f.directoryExists(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), '..', 'conda-meta')))).returns(() => Promise.resolve(true)); await identifyPythonPathAsCondaEnvironment(false, true, false, pythonPath); }); @@ -75,8 +75,8 @@ suite('Interpreters Conda Service', () => { platformService.setup(p => p.isWindows).returns(() => isWindows); platformService.setup(p => p.isMac).returns(() => isOsx); - fileSystem.setup(f => f.directoryExistsAsync(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), 'conda-meta')))).returns(() => Promise.resolve(false)); - fileSystem.setup(f => f.directoryExistsAsync(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), '..', 'conda-meta')))).returns(() => Promise.resolve(false)); + fileSystem.setup(f => f.directoryExists(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), 'conda-meta')))).returns(() => Promise.resolve(false)); + fileSystem.setup(f => f.directoryExists(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), '..', 'conda-meta')))).returns(() => Promise.resolve(false)); const isCondaEnv = await condaService.isCondaEnvironment(pythonPath); expect(isCondaEnv).to.be.equal(false, 'Path incorrectly identified as a conda path'); @@ -123,7 +123,7 @@ suite('Interpreters Conda Service', () => { const pythonPath = path.join('c', 'users', 'xyz', '.conda', 'envs', 'one', 'python.exe'); const condaEnvDir = path.join('c', 'users', 'xyz', '.conda', 'envs'); - fileSystem.setup(f => f.directoryExistsAsync(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), 'conda-meta')))).returns(() => Promise.resolve(true)); + fileSystem.setup(f => f.directoryExists(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), 'conda-meta')))).returns(() => Promise.resolve(true)); await checkCondaNameAndPathForCondaEnvironments(true, false, false, pythonPath, condaEnvDir, { name: 'One', path: path.dirname(pythonPath) }); }); @@ -131,7 +131,7 @@ suite('Interpreters Conda Service', () => { const pythonPath = path.join('c', 'users', 'xyz', '.conda', 'envs', 'eight 8', 'python.exe'); const condaEnvDir = path.join('c', 'users', 'xyz', '.conda', 'envs'); - fileSystem.setup(f => f.directoryExistsAsync(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), 'conda-meta')))).returns(() => Promise.resolve(true)); + fileSystem.setup(f => f.directoryExists(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), 'conda-meta')))).returns(() => Promise.resolve(true)); await checkCondaNameAndPathForCondaEnvironments(true, false, false, pythonPath, condaEnvDir, { name: 'Eight', path: path.dirname(pythonPath) }); }); @@ -139,7 +139,7 @@ suite('Interpreters Conda Service', () => { const pythonPath = path.join('c', 'users', 'xyz', '.conda', 'envs', 'one', 'bin', 'python'); const condaEnvDir = path.join('c', 'users', 'xyz', '.conda', 'envs'); - fileSystem.setup(f => f.directoryExistsAsync(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), '..', 'conda-meta')))).returns(() => Promise.resolve(true)); + fileSystem.setup(f => f.directoryExists(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), '..', 'conda-meta')))).returns(() => Promise.resolve(true)); await checkCondaNameAndPathForCondaEnvironments(false, true, false, pythonPath, condaEnvDir, { name: 'One', path: path.join(path.dirname(pythonPath), '..') }); }); @@ -147,7 +147,7 @@ suite('Interpreters Conda Service', () => { const pythonPath = path.join('c', 'users', 'xyz', '.conda', 'envs', 'Eight 8', 'bin', 'python'); const condaEnvDir = path.join('c', 'users', 'xyz', '.conda', 'envs'); - fileSystem.setup(f => f.directoryExistsAsync(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), '..', 'conda-meta')))).returns(() => Promise.resolve(true)); + fileSystem.setup(f => f.directoryExists(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), '..', 'conda-meta')))).returns(() => Promise.resolve(true)); await checkCondaNameAndPathForCondaEnvironments(false, true, false, pythonPath, condaEnvDir, { name: 'Eight', path: path.join(path.dirname(pythonPath), '..') }); }); @@ -155,7 +155,7 @@ suite('Interpreters Conda Service', () => { const pythonPath = path.join('c', 'users', 'xyz', '.conda', 'envs', 'one', 'bin', 'python'); const condaEnvDir = path.join('c', 'users', 'xyz', '.conda', 'envs'); - fileSystem.setup(f => f.directoryExistsAsync(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), '..', 'conda-meta')))).returns(() => Promise.resolve(true)); + fileSystem.setup(f => f.directoryExists(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), '..', 'conda-meta')))).returns(() => Promise.resolve(true)); await checkCondaNameAndPathForCondaEnvironments(false, false, true, pythonPath, condaEnvDir, { name: 'One', path: path.join(path.dirname(pythonPath), '..') }); }); @@ -163,7 +163,7 @@ suite('Interpreters Conda Service', () => { const pythonPath = path.join('c', 'users', 'xyz', '.conda', 'envs', 'Eight 8', 'bin', 'python'); const condaEnvDir = path.join('c', 'users', 'xyz', '.conda', 'envs'); - fileSystem.setup(f => f.directoryExistsAsync(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), '..', 'conda-meta')))).returns(() => Promise.resolve(true)); + fileSystem.setup(f => f.directoryExists(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), '..', 'conda-meta')))).returns(() => Promise.resolve(true)); await checkCondaNameAndPathForCondaEnvironments(false, false, true, pythonPath, condaEnvDir, { name: 'Eight', path: path.join(path.dirname(pythonPath), '..') }); }); @@ -183,7 +183,7 @@ suite('Interpreters Conda Service', () => { platformService.setup(p => p.isWindows).returns(() => true); platformService.setup(p => p.isMac).returns(() => false); - fileSystem.setup(f => f.directoryExistsAsync(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), 'conda-meta')))).returns(() => Promise.resolve(true)); + fileSystem.setup(f => f.directoryExists(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), 'conda-meta')))).returns(() => Promise.resolve(true)); const stateFactory = TypeMoq.Mock.ofType(); serviceContainer.setup(c => c.get(TypeMoq.It.isValue(IPersistentStateFactory))).returns(() => stateFactory.object); const state = new MockState({ data: condaEnvironments }); @@ -224,7 +224,7 @@ suite('Interpreters Conda Service', () => { platformService.setup(p => p.isWindows).returns(() => true); platformService.setup(p => p.isMac).returns(() => false); - fileSystem.setup(f => f.directoryExistsAsync(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), 'conda-meta')))).returns(() => Promise.resolve(true)); + fileSystem.setup(f => f.directoryExists(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), 'conda-meta')))).returns(() => Promise.resolve(true)); const stateFactory = TypeMoq.Mock.ofType(); serviceContainer.setup(c => c.get(TypeMoq.It.isValue(IPersistentStateFactory))).returns(() => stateFactory.object); const state = new MockState({ data: condaEnvironments }); @@ -261,7 +261,7 @@ suite('Interpreters Conda Service', () => { platformService.setup(p => p.isWindows).returns(() => true); processService.setup(p => p.exec(TypeMoq.It.isValue('conda'), TypeMoq.It.isValue(['--version']), TypeMoq.It.isAny())).returns(() => Promise.reject(new Error('Not Found'))); registryInterpreterLocatorService.setup(r => r.getInterpreters(TypeMoq.It.isAny())).returns(() => Promise.resolve(registryInterpreters)); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isAny())).returns((file: string) => Promise.resolve(file === expectedCodnaPath)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isAny())).returns((file: string) => Promise.resolve(file === expectedCodnaPath)); const condaExe = await condaService.getCondaFile(); assert.equal(condaExe, expectedCodnaPath, 'Failed to identify conda.exe'); @@ -283,7 +283,7 @@ suite('Interpreters Conda Service', () => { platformService.setup(p => p.isWindows).returns(() => true); processService.setup(p => p.exec(TypeMoq.It.isValue('conda'), TypeMoq.It.isValue(['--version']), TypeMoq.It.isAny())).returns(() => Promise.reject(new Error('Not Found'))); registryInterpreterLocatorService.setup(r => r.getInterpreters(TypeMoq.It.isAny())).returns(() => Promise.resolve(registryInterpreters)); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isAny())).returns((file: string) => Promise.resolve(file === expectedCodnaPath)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isAny())).returns((file: string) => Promise.resolve(file === expectedCodnaPath)); const condaExe = await condaService.getCondaFile(); assert.equal(condaExe, expectedCodnaPath, 'Failed to identify conda.exe'); @@ -303,7 +303,7 @@ suite('Interpreters Conda Service', () => { platformService.setup(p => p.isWindows).returns(() => true); processService.setup(p => p.exec(TypeMoq.It.isValue('conda'), TypeMoq.It.isValue(['--version']), TypeMoq.It.isAny())).returns(() => Promise.reject(new Error('Not Found'))); registryInterpreterLocatorService.setup(r => r.getInterpreters(TypeMoq.It.isAny())).returns(() => Promise.resolve(registryInterpreters)); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isAny())).returns((file: string) => Promise.resolve(false)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isAny())).returns((file: string) => Promise.resolve(false)); const condaExe = await condaService.getCondaFile(); assert.equal(condaExe, 'conda', 'Failed to identify conda.exe'); @@ -340,7 +340,7 @@ suite('Interpreters Conda Service', () => { const expectedCondaLocation = untildify(knownLocation); platformService.setup(p => p.isWindows).returns(() => false); processService.setup(p => p.exec(TypeMoq.It.isValue('conda'), TypeMoq.It.isValue(['--version']), TypeMoq.It.isAny())).returns(() => Promise.reject(new Error('Not Found'))); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isAny())).returns((file: string) => Promise.resolve(file === expectedCondaLocation)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isAny())).returns((file: string) => Promise.resolve(file === expectedCondaLocation)); const condaExe = await condaService.getCondaFile(); assert.equal(condaExe, expectedCondaLocation, 'Failed to identify'); @@ -350,7 +350,7 @@ suite('Interpreters Conda Service', () => { test('Must return \'conda\' if conda could not be found in known locations', async () => { platformService.setup(p => p.isWindows).returns(() => false); processService.setup(p => p.exec(TypeMoq.It.isValue('conda'), TypeMoq.It.isValue(['--version']), TypeMoq.It.isAny())).returns(() => Promise.reject(new Error('Not Found'))); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isAny())).returns((file: string) => Promise.resolve(false)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isAny())).returns((file: string) => Promise.resolve(false)); const condaExe = await condaService.getCondaFile(); assert.equal(condaExe, 'conda', 'Failed to identify'); @@ -466,7 +466,7 @@ suite('Interpreters Conda Service', () => { platformService.setup(p => p.isWindows).returns(() => true); processService.setup(p => p.exec(TypeMoq.It.isValue('conda'), TypeMoq.It.isValue(['--version']), TypeMoq.It.isAny())).returns(() => Promise.reject(new Error('Not Found'))); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(expectedCodaExe))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(expectedCodaExe))).returns(() => Promise.resolve(true)); registryInterpreterLocatorService.setup(r => r.getInterpreters(TypeMoq.It.isAny())).returns(() => Promise.resolve(registryInterpreters)); const condaExe = await condaService.getCondaFile(); @@ -481,7 +481,7 @@ suite('Interpreters Conda Service', () => { test('isAvailable will return false if conda is not available', async () => { processService.setup(p => p.exec(TypeMoq.It.isValue('conda'), TypeMoq.It.isValue(['--version']), TypeMoq.It.isAny())).returns(() => Promise.reject(new Error('not found'))); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isAny())).returns(() => Promise.resolve(false)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isAny())).returns(() => Promise.resolve(false)); platformService.setup(p => p.isWindows).returns(() => false); const isAvailable = await condaService.isCondaAvailable(); @@ -504,7 +504,7 @@ suite('Interpreters Conda Service', () => { test('isCondaInCurrentPath will return false if conda is not available', async () => { processService.setup(p => p.exec(TypeMoq.It.isValue('conda'), TypeMoq.It.isValue(['--version']), TypeMoq.It.isAny())).returns(() => Promise.reject(new Error('not found'))); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isAny())).returns(() => Promise.resolve(false)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isAny())).returns(() => Promise.resolve(false)); platformService.setup(p => p.isWindows).returns(() => false); const isAvailable = await condaService.isCondaInCurrentPath(); @@ -527,17 +527,17 @@ suite('Interpreters Conda Service', () => { } test('Fails to identify an environment as a conda env (windows)', async () => { const pythonPath = path.join('c', 'users', 'xyz', '.conda', 'envs', 'one', 'python.exe'); - fileSystem.setup(f => f.directoryExistsAsync(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), 'conda-meta')))).returns(() => Promise.resolve(true)); + fileSystem.setup(f => f.directoryExists(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), 'conda-meta')))).returns(() => Promise.resolve(true)); await testFailureOfGettingCondaEnvironments(true, false, false, pythonPath); }); test('Fails to identify an environment as a conda env (linux)', async () => { const pythonPath = path.join('c', 'users', 'xyz', '.conda', 'envs', 'one', 'python'); - fileSystem.setup(f => f.directoryExistsAsync(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), 'conda-meta')))).returns(() => Promise.resolve(true)); + fileSystem.setup(f => f.directoryExists(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), 'conda-meta')))).returns(() => Promise.resolve(true)); await testFailureOfGettingCondaEnvironments(false, false, true, pythonPath); }); test('Fails to identify an environment as a conda env (osx)', async () => { const pythonPath = path.join('c', 'users', 'xyz', '.conda', 'envs', 'one', 'python'); - fileSystem.setup(f => f.directoryExistsAsync(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), 'conda-meta')))).returns(() => Promise.resolve(true)); + fileSystem.setup(f => f.directoryExists(TypeMoq.It.isValue(path.join(path.dirname(pythonPath), 'conda-meta')))).returns(() => Promise.resolve(true)); await testFailureOfGettingCondaEnvironments(false, true, false, pythonPath); }); }); diff --git a/src/test/interpreters/currentPathService.test.ts b/src/test/interpreters/currentPathService.test.ts index 4d136735edf9..fc65ba18eb2f 100644 --- a/src/test/interpreters/currentPathService.test.ts +++ b/src/test/interpreters/currentPathService.test.ts @@ -63,10 +63,10 @@ suite('Interpreters CurrentPath Service', () => { processService.setup(p => p.exec(TypeMoq.It.isValue('python2'), TypeMoq.It.isValue(execArgs), TypeMoq.It.isAny())).returns(() => Promise.resolve({ stdout: 'c:/python2' })).verifiable(TypeMoq.Times.once()); processService.setup(p => p.exec(TypeMoq.It.isValue('python3'), TypeMoq.It.isValue(execArgs), TypeMoq.It.isAny())).returns(() => Promise.resolve({ stdout: 'c:/python3' })).verifiable(TypeMoq.Times.once()); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue('c:/root:python'))).returns(() => Promise.resolve(true)).verifiable(TypeMoq.Times.once()); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue('c:/python1'))).returns(() => Promise.resolve(false)).verifiable(TypeMoq.Times.once()); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue('c:/python2'))).returns(() => Promise.resolve(false)).verifiable(TypeMoq.Times.once()); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue('c:/python3'))).returns(() => Promise.resolve(true)).verifiable(TypeMoq.Times.once()); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue('c:/root:python'))).returns(() => Promise.resolve(true)).verifiable(TypeMoq.Times.once()); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue('c:/python1'))).returns(() => Promise.resolve(false)).verifiable(TypeMoq.Times.once()); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue('c:/python2'))).returns(() => Promise.resolve(false)).verifiable(TypeMoq.Times.once()); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue('c:/python3'))).returns(() => Promise.resolve(true)).verifiable(TypeMoq.Times.once()); const interpreters = await currentPathService.getInterpreters(); processService.verifyAll(); diff --git a/src/test/interpreters/display.test.ts b/src/test/interpreters/display.test.ts index fad85341c2a4..2d9d212d41a5 100644 --- a/src/test/interpreters/display.test.ts +++ b/src/test/interpreters/display.test.ts @@ -149,7 +149,7 @@ suite('Interpreters Display', () => { interpreterService.setup(i => i.getActiveInterpreter(TypeMoq.It.isValue(workspaceFolder))).returns(() => Promise.resolve(undefined)); configurationService.setup(c => c.getSettings(TypeMoq.It.isAny())).returns(() => pythonSettings.object); pythonSettings.setup(p => p.pythonPath).returns(() => pythonPath); - fileSystem.setup(f => f.fileExistsAsync(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(false)); + fileSystem.setup(f => f.fileExists(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(false)); const defaultDisplayName = `${path.basename(pythonPath)} [Environment]`; versionProvider.setup(v => v.getVersion(TypeMoq.It.isValue(pythonPath), TypeMoq.It.isAny())).returns(() => Promise.resolve(defaultDisplayName)); virtualEnvMgr.setup(v => v.getEnvironmentName(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve('')); @@ -169,7 +169,7 @@ suite('Interpreters Display', () => { interpreterService.setup(i => i.getActiveInterpreter(TypeMoq.It.isValue(workspaceFolder))).returns(() => Promise.resolve(undefined)); configurationService.setup(c => c.getSettings(TypeMoq.It.isAny())).returns(() => pythonSettings.object); pythonSettings.setup(p => p.pythonPath).returns(() => pythonPath); - fileSystem.setup(f => f.fileExistsAsync(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); + fileSystem.setup(f => f.fileExists(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); const defaultDisplayName = `${path.basename(pythonPath)} [Environment]`; versionProvider.setup(v => v.getVersion(TypeMoq.It.isValue(pythonPath), TypeMoq.It.isAny())).returns(() => Promise.resolve(defaultDisplayName)); // tslint:disable-next-line:no-any @@ -190,7 +190,7 @@ suite('Interpreters Display', () => { interpreterService.setup(i => i.getActiveInterpreter(TypeMoq.It.isValue(workspaceFolder))).returns(() => Promise.resolve(undefined)); configurationService.setup(c => c.getSettings(TypeMoq.It.isAny())).returns(() => pythonSettings.object); pythonSettings.setup(p => p.pythonPath).returns(() => pythonPath); - fileSystem.setup(f => f.fileExistsAsync(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); + fileSystem.setup(f => f.fileExists(TypeMoq.It.isValue(pythonPath))).returns(() => Promise.resolve(true)); const displayName = 'Version from Interperter'; versionProvider.setup(v => v.getVersion(TypeMoq.It.isValue(pythonPath), TypeMoq.It.isAny())).returns(() => Promise.resolve(displayName)); // tslint:disable-next-line:no-any diff --git a/src/test/interpreters/pipEnvService.test.ts b/src/test/interpreters/pipEnvService.test.ts index 65545cfc7b53..48584bb7c5cc 100644 --- a/src/test/interpreters/pipEnvService.test.ts +++ b/src/test/interpreters/pipEnvService.test.ts @@ -81,7 +81,7 @@ suite('Interpreters - PipEnv', () => { const env = {}; envVarsProvider.setup(e => e.getEnvironmentVariables(TypeMoq.It.isAny())).returns(() => Promise.resolve({})).verifiable(TypeMoq.Times.once()); currentProcess.setup(c => c.env).returns(() => env); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(path.join(rootWorkspace, 'Pipfile')))).returns(() => Promise.resolve(false)).verifiable(TypeMoq.Times.once()); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(path.join(rootWorkspace, 'Pipfile')))).returns(() => Promise.resolve(false)).verifiable(TypeMoq.Times.once()); const environments = await pipEnvService.getInterpreters(resource); expect(environments).to.be.deep.equal([]); @@ -91,7 +91,7 @@ suite('Interpreters - PipEnv', () => { const env = {}; currentProcess.setup(c => c.env).returns(() => env); processService.setup(p => p.exec(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => Promise.reject('')); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(path.join(rootWorkspace, 'Pipfile')))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(path.join(rootWorkspace, 'Pipfile')))).returns(() => Promise.resolve(true)); appShell.setup(a => a.showWarningMessage(TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => Promise.resolve('')).verifiable(TypeMoq.Times.once()); const environments = await pipEnvService.getInterpreters(resource); @@ -104,7 +104,7 @@ suite('Interpreters - PipEnv', () => { envVarsProvider.setup(e => e.getEnvironmentVariables(TypeMoq.It.isAny())).returns(() => Promise.resolve({})).verifiable(TypeMoq.Times.once()); currentProcess.setup(c => c.env).returns(() => env); processService.setup(p => p.exec(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => Promise.resolve({ stderr: 'PipEnv Failed', stdout: '' })); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(path.join(rootWorkspace, 'Pipfile')))).returns(() => Promise.resolve(true)); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(path.join(rootWorkspace, 'Pipfile')))).returns(() => Promise.resolve(true)); appShell.setup(a => a.showWarningMessage(TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => Promise.resolve('')).verifiable(TypeMoq.Times.once()); const environments = await pipEnvService.getInterpreters(resource); @@ -119,8 +119,8 @@ suite('Interpreters - PipEnv', () => { currentProcess.setup(c => c.env).returns(() => env); processService.setup(p => p.exec(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => Promise.resolve({ stdout: venvDir })); interpreterVersionService.setup(v => v.getVersion(TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => Promise.resolve('xyz')); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(path.join(rootWorkspace, 'Pipfile')))).returns(() => Promise.resolve(true)).verifiable(); - fileSystem.setup(fs => fs.directoryExistsAsync(TypeMoq.It.isValue(venvDir))).returns(() => Promise.resolve(true)).verifiable(); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(path.join(rootWorkspace, 'Pipfile')))).returns(() => Promise.resolve(true)).verifiable(); + fileSystem.setup(fs => fs.directoryExists(TypeMoq.It.isValue(venvDir))).returns(() => Promise.resolve(true)).verifiable(); const environments = await pipEnvService.getInterpreters(resource); expect(environments).to.be.lengthOf(1); @@ -137,9 +137,9 @@ suite('Interpreters - PipEnv', () => { currentProcess.setup(c => c.env).returns(() => env); processService.setup(p => p.exec(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => Promise.resolve({ stdout: venvDir })); interpreterVersionService.setup(v => v.getVersion(TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => Promise.resolve('xyz')); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(path.join(rootWorkspace, 'Pipfile')))).returns(() => Promise.resolve(false)).verifiable(TypeMoq.Times.never()); - fileSystem.setup(fs => fs.fileExistsAsync(TypeMoq.It.isValue(path.join(rootWorkspace, envPipFile)))).returns(() => Promise.resolve(true)).verifiable(TypeMoq.Times.once()); - fileSystem.setup(fs => fs.directoryExistsAsync(TypeMoq.It.isValue(venvDir))).returns(() => Promise.resolve(true)).verifiable(); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(path.join(rootWorkspace, 'Pipfile')))).returns(() => Promise.resolve(false)).verifiable(TypeMoq.Times.never()); + fileSystem.setup(fs => fs.fileExists(TypeMoq.It.isValue(path.join(rootWorkspace, envPipFile)))).returns(() => Promise.resolve(true)).verifiable(TypeMoq.Times.once()); + fileSystem.setup(fs => fs.directoryExists(TypeMoq.It.isValue(venvDir))).returns(() => Promise.resolve(true)).verifiable(); const environments = await pipEnvService.getInterpreters(resource); expect(environments).to.be.lengthOf(1); diff --git a/src/test/linters/lint.args.test.ts b/src/test/linters/lint.args.test.ts index 780aefb2fe61..4475ef2d94bf 100644 --- a/src/test/linters/lint.args.test.ts +++ b/src/test/linters/lint.args.test.ts @@ -52,7 +52,7 @@ suite('Linting - Arguments', () => { outputChannel = TypeMoq.Mock.ofType(); const fs = TypeMoq.Mock.ofType(); - fs.setup(x => x.fileExistsAsync(TypeMoq.It.isAny())).returns(() => new Promise((resolve, reject) => resolve(true))); + fs.setup(x => x.fileExists(TypeMoq.It.isAny())).returns(() => new Promise((resolve, reject) => resolve(true))); fs.setup(x => x.arePathsSame(TypeMoq.It.isAnyString(), TypeMoq.It.isAnyString())).returns(() => true); serviceManager.addSingletonInstance(IFileSystem, fs.object); diff --git a/src/test/linters/lint.provider.test.ts b/src/test/linters/lint.provider.test.ts index 023ee86223be..ee893b73db8f 100644 --- a/src/test/linters/lint.provider.test.ts +++ b/src/test/linters/lint.provider.test.ts @@ -39,7 +39,7 @@ suite('Linting - Provider', () => { context = TypeMoq.Mock.ofType(); fs = TypeMoq.Mock.ofType(); - fs.setup(x => x.fileExistsAsync(TypeMoq.It.isAny())).returns(() => new Promise((resolve, reject) => resolve(true))); + fs.setup(x => x.fileExists(TypeMoq.It.isAny())).returns(() => new Promise((resolve, reject) => resolve(true))); fs.setup(x => x.arePathsSame(TypeMoq.It.isAnyString(), TypeMoq.It.isAnyString())).returns(() => true); serviceManager.addSingletonInstance(IFileSystem, fs.object); diff --git a/src/test/linters/lintengine.test.ts b/src/test/linters/lintengine.test.ts index e11c2dcdc5c4..92bb25d17ba4 100644 --- a/src/test/linters/lintengine.test.ts +++ b/src/test/linters/lintengine.test.ts @@ -106,7 +106,7 @@ suite('Linting - LintingEngine', () => { }); function mockTextDocument(fileName: string, language: string, exists: boolean, ignorePattern: string[] = [], scheme?: string): TextDocument { - fileSystem.setup(x => x.fileExistsAsync(TypeMoq.It.isAnyString())).returns(() => Promise.resolve(exists)); + fileSystem.setup(x => x.fileExists(TypeMoq.It.isAnyString())).returns(() => Promise.resolve(exists)); lintSettings.setup(l => l.ignorePatterns).returns(() => ignorePattern); settings.setup(x => x.linting).returns(() => lintSettings.object); diff --git a/src/test/linters/pylint.test.ts b/src/test/linters/pylint.test.ts index ce08b0ac5c95..6b7479a3cb91 100644 --- a/src/test/linters/pylint.test.ts +++ b/src/test/linters/pylint.test.ts @@ -63,11 +63,11 @@ suite('Linting - Pylintrc search', () => { }); test('pylintrc in the file folder', async () => { - fileSystem.setup(x => x.fileExistsAsync(path.join(basePath, pylintrc))).returns(() => Promise.resolve(true)); + fileSystem.setup(x => x.fileExists(path.join(basePath, pylintrc))).returns(() => Promise.resolve(true)); let result = await Pylint.hasConfigurationFile(fileSystem.object, basePath, platformService.object); expect(result).to.be.equal(true, `'${pylintrc}' not detected in the file folder.`); - fileSystem.setup(x => x.fileExistsAsync(path.join(basePath, dotPylintrc))).returns(() => Promise.resolve(true)); + fileSystem.setup(x => x.fileExists(path.join(basePath, dotPylintrc))).returns(() => Promise.resolve(true)); result = await Pylint.hasConfigurationFile(fileSystem.object, basePath, platformService.object); expect(result).to.be.equal(true, `'${dotPylintrc}' not detected in the file folder.`); }); @@ -77,10 +77,10 @@ suite('Linting - Pylintrc search', () => { const module3 = path.join('/user/a/b', '__init__.py'); const rc = path.join('/user/a/b/c', pylintrc); - fileSystem.setup(x => x.fileExistsAsync(module1)).returns(() => Promise.resolve(true)); - fileSystem.setup(x => x.fileExistsAsync(module2)).returns(() => Promise.resolve(true)); - fileSystem.setup(x => x.fileExistsAsync(module3)).returns(() => Promise.resolve(true)); - fileSystem.setup(x => x.fileExistsAsync(rc)).returns(() => Promise.resolve(true)); + fileSystem.setup(x => x.fileExists(module1)).returns(() => Promise.resolve(true)); + fileSystem.setup(x => x.fileExists(module2)).returns(() => Promise.resolve(true)); + fileSystem.setup(x => x.fileExists(module3)).returns(() => Promise.resolve(true)); + fileSystem.setup(x => x.fileExists(rc)).returns(() => Promise.resolve(true)); const result = await Pylint.hasConfigurationFile(fileSystem.object, basePath, platformService.object); expect(result).to.be.equal(true, `'${pylintrc}' not detected in the module tree.`); @@ -92,10 +92,10 @@ suite('Linting - Pylintrc search', () => { const module3 = path.join('/user/a/b', '__init__.py'); const rc = path.join('/user/a/b/c', pylintrc); - fileSystem.setup(x => x.fileExistsAsync(module1)).returns(() => Promise.resolve(true)); - fileSystem.setup(x => x.fileExistsAsync(module2)).returns(() => Promise.resolve(true)); - fileSystem.setup(x => x.fileExistsAsync(module3)).returns(() => Promise.resolve(true)); - fileSystem.setup(x => x.fileExistsAsync(rc)).returns(() => Promise.resolve(true)); + fileSystem.setup(x => x.fileExists(module1)).returns(() => Promise.resolve(true)); + fileSystem.setup(x => x.fileExists(module2)).returns(() => Promise.resolve(true)); + fileSystem.setup(x => x.fileExists(module3)).returns(() => Promise.resolve(true)); + fileSystem.setup(x => x.fileExists(rc)).returns(() => Promise.resolve(true)); const result = await Pylint.hasConfigurationFile(fileSystem.object, basePath, platformService.object); expect(result).to.be.equal(true, `'${dotPylintrc}' not detected in the module tree.`); @@ -103,7 +103,7 @@ suite('Linting - Pylintrc search', () => { test('.pylintrc up the ~ folder', async () => { const home = os.homedir(); const rc = path.join(home, dotPylintrc); - fileSystem.setup(x => x.fileExistsAsync(rc)).returns(() => Promise.resolve(true)); + fileSystem.setup(x => x.fileExists(rc)).returns(() => Promise.resolve(true)); const result = await Pylint.hasConfigurationFile(fileSystem.object, basePath, platformService.object); expect(result).to.be.equal(true, `'${dotPylintrc}' not detected in the ~ folder.`); @@ -111,14 +111,14 @@ suite('Linting - Pylintrc search', () => { test('pylintrc up the ~/.config folder', async () => { const home = os.homedir(); const rc = path.join(home, '.config', pylintrc); - fileSystem.setup(x => x.fileExistsAsync(rc)).returns(() => Promise.resolve(true)); + fileSystem.setup(x => x.fileExists(rc)).returns(() => Promise.resolve(true)); const result = await Pylint.hasConfigurationFile(fileSystem.object, basePath, platformService.object); expect(result).to.be.equal(true, `'${pylintrc}' not detected in the ~/.config folder.`); }); test('pylintrc in the /etc folder', async () => { const rc = path.join('/etc', pylintrc); - fileSystem.setup(x => x.fileExistsAsync(rc)).returns(() => Promise.resolve(true)); + fileSystem.setup(x => x.fileExists(rc)).returns(() => Promise.resolve(true)); const result = await Pylint.hasConfigurationFile(fileSystem.object, basePath, platformService.object); expect(result).to.be.equal(true, `'${pylintrc}' not detected in the /etc folder.`); @@ -127,7 +127,7 @@ suite('Linting - Pylintrc search', () => { const root = '/user/a'; const midFolder = '/user/a/b'; fileSystem - .setup(x => x.fileExistsAsync(path.join(midFolder, pylintrc))) + .setup(x => x.fileExists(path.join(midFolder, pylintrc))) .returns(() => Promise.resolve(true)); const result = await Pylint.hasConfigrationFileInWorkspace(fileSystem.object, basePath, root); @@ -136,7 +136,7 @@ suite('Linting - Pylintrc search', () => { test('minArgs - pylintrc between the file and the workspace root', async () => { fileSystem - .setup(x => x.fileExistsAsync(path.join('/user/a/b', pylintrc))) + .setup(x => x.fileExists(path.join('/user/a/b', pylintrc))) .returns(() => Promise.resolve(true)); await testPylintArguments('/user/a/b/c', '/user/a', false); @@ -149,7 +149,7 @@ suite('Linting - Pylintrc search', () => { test('minArgs - pylintrc next to the file', async () => { const fileFolder = '/user/a/b/c'; fileSystem - .setup(x => x.fileExistsAsync(path.join(fileFolder, pylintrc))) + .setup(x => x.fileExists(path.join(fileFolder, pylintrc))) .returns(() => Promise.resolve(true)); await testPylintArguments(fileFolder, '/user/a', false); @@ -158,7 +158,7 @@ suite('Linting - Pylintrc search', () => { test('minArgs - pylintrc at the workspace root', async () => { const root = '/user/a'; fileSystem - .setup(x => x.fileExistsAsync(path.join(root, pylintrc))) + .setup(x => x.fileExists(path.join(root, pylintrc))) .returns(() => Promise.resolve(true)); await testPylintArguments('/user/a/b/c', root, false); From 84214e108760dca9c3483c3898cc0032828b3083 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 17 May 2018 16:37:40 -0700 Subject: [PATCH 59/83] Remove Pythia --- package.json | 6 ------ src/client/activation/analysis.ts | 11 +---------- src/client/activation/downloader.ts | 8 ++++---- src/client/common/configSettings.ts | 4 ++-- src/client/common/types.ts | 2 +- 5 files changed, 8 insertions(+), 23 deletions(-) diff --git a/package.json b/package.json index ec57caec2beb..c1667e114834 100644 --- a/package.json +++ b/package.json @@ -1232,12 +1232,6 @@ "description": "Whether to install Python modules globally when not using an environment.", "scope": "resource" }, - "python.pythiaEnabled": { - "type": "boolean", - "default": true, - "description": "Enables AI-driven additions to the completion list. Does not apply to Jedi.", - "scope": "resource" - }, "python.jediEnabled": { "type": "boolean", "default": true, diff --git a/src/client/activation/analysis.ts b/src/client/activation/analysis.ts index 8418b2c20c77..7d3cba9c9f29 100644 --- a/src/client/activation/analysis.ts +++ b/src/client/activation/analysis.ts @@ -108,8 +108,6 @@ export class AnalysisExtensionActivator implements IExtensionActivator { const reporter = getTelemetryReporter(); reporter.sendTelemetryEvent(PYTHON_ANALYSIS_ENGINE_ENABLED); - await this.checkPythiaModel(context, downloader); - if (!await this.fs.fileExists(mscorlib)) { // Depends on .NET Runtime or SDK this.languageClient = this.createSimpleLanguageClient(context, clientOptions); @@ -254,7 +252,7 @@ export class AnalysisExtensionActivator implements IExtensionActivator { maxDocumentationTextLength: 0 }, asyncStartup: true, - pythiaEnabled: settings.pythiaEnabled, + intelliCodeEnabled: settings.intelliCodeEnabled, testEnvironment: isTestExecution() } }; @@ -265,11 +263,4 @@ export class AnalysisExtensionActivator implements IExtensionActivator { const result = await ps.exec('dotnet', ['--version']).catch(() => { return { stdout: '' }; }); return result.stdout.trim().startsWith('2.'); } - - private async checkPythiaModel(context: ExtensionContext, downloader: AnalysisEngineDownloader): Promise { - const settings = this.configuration.getSettings(); - if (settings.pythiaEnabled) { - await downloader.downloadPythiaModel(context); - } - } } diff --git a/src/client/activation/downloader.ts b/src/client/activation/downloader.ts index 52e9136a4951..a1e4b8279aeb 100644 --- a/src/client/activation/downloader.ts +++ b/src/client/activation/downloader.ts @@ -21,7 +21,7 @@ const downloadUriPrefix = 'https://pvsc.blob.core.windows.net/python-analysis'; const downloadBaseFileName = 'python-analysis-vscode'; const downloadVersion = '0.1.0'; const downloadFileExtension = '.nupkg'; -const pythiaModelName = 'model-sequence.json.gz'; +const modelName = 'model-sequence.json.gz'; export class AnalysisEngineDownloader { private readonly output: OutputChannel; @@ -56,16 +56,16 @@ export class AnalysisEngineDownloader { } } - public async downloadPythiaModel(context: ExtensionContext): Promise { + public async downloadIntelliCodeModel(context: ExtensionContext): Promise { const modelFolder = path.join(context.extensionPath, 'analysis', 'Pythia', 'model'); - const localPath = path.join(modelFolder, pythiaModelName); + const localPath = path.join(modelFolder, modelName); if (await this.fs.fileExists(localPath)) { return; } let localTempFilePath = ''; try { - localTempFilePath = await this.downloadFile(downloadUriPrefix, pythiaModelName, 'Downloading IntelliSense Model File... '); + localTempFilePath = await this.downloadFile(downloadUriPrefix, modelName, 'Downloading IntelliCode Model File... '); await this.fs.createDirectory(modelFolder); await this.fs.copyFile(localTempFilePath, localPath); } catch (err) { diff --git a/src/client/common/configSettings.ts b/src/client/common/configSettings.ts index e659fe1f0a2f..d447fc4523e4 100644 --- a/src/client/common/configSettings.ts +++ b/src/client/common/configSettings.ts @@ -25,7 +25,7 @@ export const IS_WINDOWS = /^win/.test(process.platform); // tslint:disable-next-line:completed-docs export class PythonSettings extends EventEmitter implements IPythonSettings { private static pythonSettings: Map = new Map(); - public pythiaEnabled = true; + public intelliCodeEnabled = true; public jediEnabled = true; public jediPath = ''; public jediMemoryLimit = 1024; @@ -126,7 +126,7 @@ export class PythonSettings extends EventEmitter implements IPythonSettings { } this.jediMemoryLimit = pythonSettings.get('jediMemoryLimit')!; } else { - this.pythiaEnabled = systemVariables.resolveAny(pythonSettings.get('pythiaEnabled', true))!; + this.intelliCodeEnabled = systemVariables.resolveAny(pythonSettings.get('intelliCodeEnabled', true))!; } // tslint:disable-next-line:no-backbone-get-set-outside-model no-non-null-assertion diff --git a/src/client/common/types.ts b/src/client/common/types.ts index 2bec7bb18256..cf300cf6ab0e 100644 --- a/src/client/common/types.ts +++ b/src/client/common/types.ts @@ -99,7 +99,7 @@ export interface IPythonSettings { readonly pythonPath: string; readonly venvPath: string; readonly venvFolders: string[]; - readonly pythiaEnabled: boolean; + readonly intelliCodeEnabled: boolean; readonly jediEnabled: boolean; readonly jediPath: string; readonly jediMemoryLimit: number; From 9c1adb122de38924c29a57bbec2f06fa3b66a4a5 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Fri, 18 May 2018 13:52:15 -0700 Subject: [PATCH 60/83] Remove pre-packaged MSIL --- src/client/activation/analysis.ts | 30 ++++++----------------------- src/client/common/configSettings.ts | 2 ++ src/client/common/types.ts | 1 + 3 files changed, 9 insertions(+), 24 deletions(-) diff --git a/src/client/activation/analysis.ts b/src/client/activation/analysis.ts index 7d3cba9c9f29..8072e99d32af 100644 --- a/src/client/activation/analysis.ts +++ b/src/client/activation/analysis.ts @@ -9,7 +9,6 @@ import { IApplicationShell } from '../common/application/types'; import { isTestExecution, STANDARD_OUTPUT_CHANNEL } from '../common/constants'; import { createDeferred, Deferred } from '../common/helpers'; import { IFileSystem, IPlatformService } from '../common/platform/types'; -import { IProcessServiceFactory } from '../common/process/types'; import { StopWatch } from '../common/stopWatch'; import { IConfigurationService, IOutputChannel, IPythonSettings } from '../common/types'; import { IEnvironmentVariablesProvider } from '../common/variables/types'; @@ -103,30 +102,19 @@ export class AnalysisExtensionActivator implements IExtensionActivator { // Determine if we are running MSIL/Universal via dotnet or self-contained app. const mscorlib = path.join(context.extensionPath, analysisEngineFolder, 'mscorlib.dll'); const downloader = new AnalysisEngineDownloader(this.services, analysisEngineFolder); - let downloadPackage = false; const reporter = getTelemetryReporter(); reporter.sendTelemetryEvent(PYTHON_ANALYSIS_ENGINE_ENABLED); - if (!await this.fs.fileExists(mscorlib)) { - // Depends on .NET Runtime or SDK + const settings = this.configuration.getSettings(); + if (!settings.downloadCodeAnalysis) { + // Depends on .NET Runtime or SDK. Typically development-only case. this.languageClient = this.createSimpleLanguageClient(context, clientOptions); - try { - await this.tryStartLanguageClient(context, this.languageClient); - return true; - } catch (ex) { - if (await this.isDotNetInstalled()) { - this.appShell.showErrorMessage(`.NET Runtime appears to be installed but the language server did not start. Error ${ex}`); - reporter.sendTelemetryEvent(PYTHON_ANALYSIS_ENGINE_ERROR, { error: 'Failed to start (MSIL)' }); - return false; - } - // No .NET Runtime, no mscorlib - need to download self-contained package. - downloadPackage = true; - } + await this.tryStartLanguageClient(context, this.languageClient); + return true; } - if (downloadPackage) { - this.appShell.showWarningMessage('.NET Runtime is not found, platform-specific Python Analysis Engine will be downloaded.'); + if (!await this.fs.fileExists(mscorlib)) { await downloader.downloadAnalysisEngine(context); reporter.sendTelemetryEvent(PYTHON_ANALYSIS_ENGINE_DOWNLOADED); } @@ -257,10 +245,4 @@ export class AnalysisExtensionActivator implements IExtensionActivator { } }; } - - private async isDotNetInstalled(): Promise { - const ps = await this.services.get(IProcessServiceFactory).create(); - const result = await ps.exec('dotnet', ['--version']).catch(() => { return { stdout: '' }; }); - return result.stdout.trim().startsWith('2.'); - } } diff --git a/src/client/common/configSettings.ts b/src/client/common/configSettings.ts index d447fc4523e4..63408c7c5638 100644 --- a/src/client/common/configSettings.ts +++ b/src/client/common/configSettings.ts @@ -26,6 +26,7 @@ export const IS_WINDOWS = /^win/.test(process.platform); export class PythonSettings extends EventEmitter implements IPythonSettings { private static pythonSettings: Map = new Map(); public intelliCodeEnabled = true; + public downloadCodeAnalysis = true; public jediEnabled = true; public jediPath = ''; public jediMemoryLimit = 1024; @@ -115,6 +116,7 @@ export class PythonSettings extends EventEmitter implements IPythonSettings { this.venvPath = systemVariables.resolveAny(pythonSettings.get('venvPath'))!; this.venvFolders = systemVariables.resolveAny(pythonSettings.get('venvFolders'))!; + this.downloadCodeAnalysis = systemVariables.resolveAny(pythonSettings.get('downloadCodeAnalysis', true))!; this.jediEnabled = systemVariables.resolveAny(pythonSettings.get('jediEnabled', true))!; if (this.jediEnabled) { // tslint:disable-next-line:no-backbone-get-set-outside-model no-non-null-assertion diff --git a/src/client/common/types.ts b/src/client/common/types.ts index cf300cf6ab0e..4d8baa55b4ea 100644 --- a/src/client/common/types.ts +++ b/src/client/common/types.ts @@ -100,6 +100,7 @@ export interface IPythonSettings { readonly venvPath: string; readonly venvFolders: string[]; readonly intelliCodeEnabled: boolean; + readonly downloadCodeAnalysis: boolean; readonly jediEnabled: boolean; readonly jediPath: string; readonly jediMemoryLimit: number; From 5a6e546cc158a2bcb0e91a3549e54a93e2ee27f8 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Fri, 18 May 2018 16:28:01 -0700 Subject: [PATCH 61/83] Exe name on Unix --- src/client/activation/platformData.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/activation/platformData.ts b/src/client/activation/platformData.ts index 2a1cb29da461..9fbb10cc8cc9 100644 --- a/src/client/activation/platformData.ts +++ b/src/client/activation/platformData.ts @@ -58,7 +58,7 @@ export class PlatformData { public getEngineExecutableName(): string { return this.platform.isWindows ? 'Microsoft.PythonTools.VsCode.exe' - : 'Microsoft.PythonTools.VsCode'; + : 'Microsoft.PythonTools.VsCode.VsCode'; } public async getExpectedHash(): Promise { From 1f2ae09d2011153dd1b2475079be25607ef890bf Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Fri, 18 May 2018 16:34:39 -0700 Subject: [PATCH 62/83] Plain linux --- src/client/activation/analysisEngineHashes.ts | 8 +-- src/client/activation/platformData.ts | 54 ++----------------- 2 files changed, 5 insertions(+), 57 deletions(-) diff --git a/src/client/activation/analysisEngineHashes.ts b/src/client/activation/analysisEngineHashes.ts index 2f9123a46c59..c4b1c30af6de 100644 --- a/src/client/activation/analysisEngineHashes.ts +++ b/src/client/activation/analysisEngineHashes.ts @@ -7,10 +7,4 @@ export const analysis_engine_win_x86_sha512 = 'win-x86'; export const analysis_engine_win_x64_sha512 = 'win-x64'; export const analysis_engine_osx_x64_sha512 = 'osx-x64'; -export const analysis_engine_centos_x64_sha512 = 'centos-x64'; -export const analysis_engine_debian_x64_sha512 = 'debian-x64'; -export const analysis_engine_fedora_x64_sha512 = 'fedora-x64'; -export const analysis_engine_ol_x64_sha512 = 'ol-x64'; -export const analysis_engine_opensuse_x64_sha512 = 'opensuse-x64'; -export const analysis_engine_rhel_x64_sha512 = 'rhel-x64'; -export const analysis_engine_ubuntu_x64_sha512 = 'ubuntu-x64'; +export const analysis_engine_linux_x64_sha512 = 'linux-x64'; diff --git a/src/client/activation/platformData.ts b/src/client/activation/platformData.ts index 9fbb10cc8cc9..466955c496e5 100644 --- a/src/client/activation/platformData.ts +++ b/src/client/activation/platformData.ts @@ -3,31 +3,14 @@ import { IFileSystem, IPlatformService } from '../common/platform/types'; import { - analysis_engine_centos_x64_sha512, - analysis_engine_debian_x64_sha512, - analysis_engine_fedora_x64_sha512, - analysis_engine_ol_x64_sha512, - analysis_engine_opensuse_x64_sha512, + analysis_engine_linux_x64_sha512, analysis_engine_osx_x64_sha512, - analysis_engine_rhel_x64_sha512, - analysis_engine_ubuntu_x64_sha512, analysis_engine_win_x64_sha512, analysis_engine_win_x86_sha512 } from './analysisEngineHashes'; -// '/etc/os-release', ID=flavor -const supportedLinuxFlavors = [ - 'centos', - 'debian', - 'fedora', - 'ol', - 'opensuse', - 'rhel', - 'ubuntu' -]; - export class PlatformData { - constructor(private platform: IPlatformService, private fs: IFileSystem) { } + constructor(private platform: IPlatformService, fs: IFileSystem) { } public async getPlatformName(): Promise { if (this.platform.isWindows) { return this.platform.is64bit ? 'win-x64' : 'win-x86'; @@ -39,14 +22,7 @@ export class PlatformData { if (!this.platform.is64bit) { throw new Error('Python Analysis Engine does not support 32-bit Linux.'); } - const linuxFlavor = await this.getLinuxFlavor(); - if (linuxFlavor.length === 0) { - throw new Error('Unable to determine Linux flavor from /etc/os-release.'); - } - if (supportedLinuxFlavors.indexOf(linuxFlavor) < 0) { - throw new Error(`${linuxFlavor} is not supported.`); - } - return `${linuxFlavor}-x64`; + return 'linux-x64'; } throw new Error('Unknown OS platform.'); } @@ -69,30 +45,8 @@ export class PlatformData { return analysis_engine_osx_x64_sha512; } if (this.platform.isLinux && this.platform.is64bit) { - const linuxFlavor = await this.getLinuxFlavor(); - // tslint:disable-next-line:switch-default - switch (linuxFlavor) { - case 'centos': return analysis_engine_centos_x64_sha512; - case 'debian': return analysis_engine_debian_x64_sha512; - case 'fedora': return analysis_engine_fedora_x64_sha512; - case 'ol': return analysis_engine_ol_x64_sha512; - case 'opensuse': return analysis_engine_opensuse_x64_sha512; - case 'rhel': return analysis_engine_rhel_x64_sha512; - case 'ubuntu': return analysis_engine_ubuntu_x64_sha512; - } + return analysis_engine_linux_x64_sha512; } throw new Error('Unknown platform.'); } - - private async getLinuxFlavor(): Promise { - const verFile = '/etc/os-release'; - const data = await this.fs.readFile(verFile); - if (data) { - const res = /ID=(.*)/.exec(data); - if (res && res.length > 1) { - return res[1]; - } - } - return ''; - } } From f972614c469dcbaca0ee561859f657d325e6b85a Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Fri, 18 May 2018 22:09:41 -0700 Subject: [PATCH 63/83] Fix casing --- src/client/activation/downloader.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/activation/downloader.ts b/src/client/activation/downloader.ts index 52e9136a4951..b5a4dd4f4856 100644 --- a/src/client/activation/downloader.ts +++ b/src/client/activation/downloader.ts @@ -18,7 +18,7 @@ import { PlatformData } from './platformData'; const StreamZip = require('node-stream-zip'); const downloadUriPrefix = 'https://pvsc.blob.core.windows.net/python-analysis'; -const downloadBaseFileName = 'python-analysis-vscode'; +const downloadBaseFileName = 'Python-Analysis-VSCode'; const downloadVersion = '0.1.0'; const downloadFileExtension = '.nupkg'; const pythiaModelName = 'model-sequence.json.gz'; From d2721cd9f7fe1c0013a93e109d924893bf1e4e23 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Fri, 18 May 2018 22:21:24 -0700 Subject: [PATCH 64/83] Fix message --- src/client/activation/downloader.ts | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/client/activation/downloader.ts b/src/client/activation/downloader.ts index a1e4b8279aeb..b1c9d3eb9ca9 100644 --- a/src/client/activation/downloader.ts +++ b/src/client/activation/downloader.ts @@ -129,11 +129,10 @@ export class AnalysisEngineDownloader { if (!await verifier.verifyHash(filePath, platformString, await this.platformData.getExpectedHash())) { throw new Error('Hash of the downloaded file does not match.'); } - this.output.append('valid.'); + this.output.appendLine('valid.'); } private async unpackArchive(extensionPath: string, tempFilePath: string): Promise { - this.output.appendLine(''); this.output.append('Unpacking archive... '); const installFolder = path.join(extensionPath, this.engineFolder); @@ -170,12 +169,12 @@ export class AnalysisEngineDownloader { }); return deferred.promise; }); - this.output.append('done.'); // Set file to executable if (!this.platform.isWindows) { const executablePath = path.join(installFolder, this.platformData.getEngineExecutableName()); fileSystem.chmodSync(executablePath, '0764'); // -rwxrw-r-- } + this.output.appendLine('done.'); } } From 56d34f79c6c9a27a7286bbfae1aea7959aef4c35 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Mon, 21 May 2018 11:22:30 -0700 Subject: [PATCH 65/83] Update PTVS engine activation steps --- CONTRIBUTING - PYTHON_ANALYSIS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CONTRIBUTING - PYTHON_ANALYSIS.md b/CONTRIBUTING - PYTHON_ANALYSIS.md index 03563dac7efd..0e8ce38c8c9a 100644 --- a/CONTRIBUTING - PYTHON_ANALYSIS.md +++ b/CONTRIBUTING - PYTHON_ANALYSIS.md @@ -32,6 +32,7 @@ Visual Studio 2017: 3. Binaries arrive in *Python/BuildOutput/VsCode/raw* 4. Delete contents of the *analysis* folder in the Python Extension folder 5. Copy *.dll, *.pdb, *.json fron *Python/BuildOutput/VsCode/raw* to *analysis* +6. In VS Code set setting *python.downloadCodeAnalysis* to *false* ### Debugging code in Python Extension to VS Code Folow regular TypeScript debugging steps From 981290fe7c53b3701d21939d78fa23f367b2e71f Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 22 May 2018 10:05:57 -0700 Subject: [PATCH 66/83] Type formatter eats space in from . --- src/client/formatters/lineFormatter.ts | 10 ++++++++++ src/test/format/extension.lineFormatter.test.ts | 9 +++++++++ 2 files changed, 19 insertions(+) diff --git a/src/client/formatters/lineFormatter.ts b/src/client/formatters/lineFormatter.ts index 28a71f6ff08d..9bd256f50177 100644 --- a/src/client/formatters/lineFormatter.ts +++ b/src/client/formatters/lineFormatter.ts @@ -98,6 +98,8 @@ export class LineFormatter { private handleOperator(index: number): void { const t = this.tokens.getItemAt(index); const prev = index > 0 ? this.tokens.getItemAt(index - 1) : undefined; + const next = index < this.tokens.count - 1 ? this.tokens.getItemAt(index + 1) : undefined; + if (t.length === 1) { const opCode = this.text.charCodeAt(t.start); switch (opCode) { @@ -107,6 +109,14 @@ export class LineFormatter { } break; case Char.Period: + if (prev && this.isKeyword(prev, 'from')) { + this.builder.softAppendSpace(); + } + this.builder.append(this.text[t.start]); + if (next && this.isKeyword(next, 'import')) { + this.builder.softAppendSpace(); + } + return; case Char.At: case Char.ExclamationMark: this.builder.append(this.text[t.start]); diff --git a/src/test/format/extension.lineFormatter.test.ts b/src/test/format/extension.lineFormatter.test.ts index 46ca5e46f816..656f22c3ffd6 100644 --- a/src/test/format/extension.lineFormatter.test.ts +++ b/src/test/format/extension.lineFormatter.test.ts @@ -112,6 +112,15 @@ suite('Formatting - line formatter', () => { test('Function returning tuple', () => { testFormatLine('x,y=f(a)', 'x, y = f(a)'); }); + test('from. import A', () => { + testFormatLine('from. import A', 'from . import A'); + }); + test('from .. import', () => { + testFormatLine('from ..import', 'from .. import'); + }); + test('from..x import', () => { + testFormatLine('from..x import', 'from ..x import'); + }); test('Grammar file', () => { const content = fs.readFileSync(grammarFile).toString('utf8'); const lines = content.splitLines({ trim: false, removeEmptyEntries: false }); From d279e96e0676a28b454d2f68899320dd067f82bb Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 22 May 2018 14:06:48 -0700 Subject: [PATCH 67/83] fIX CASING --- src/client/activation/hashVerifier.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/activation/hashVerifier.ts b/src/client/activation/hashVerifier.ts index c62cb36484f7..61d1177966f9 100644 --- a/src/client/activation/hashVerifier.ts +++ b/src/client/activation/hashVerifier.ts @@ -22,7 +22,7 @@ export class HashVerifier { readStream.pipe(hash); await deferred.promise; - const actual = hash.read(); - return expectedDigest === platformString ? true : actual === expectedDigest; + const actual = hash.read() as string; + return expectedDigest === platformString ? true : actual.toLowerCase() === expectedDigest.toLowerCase(); } } From 6b466a93917b274970fafd9a8dbfa23e5b31cf18 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 23 May 2018 10:39:08 -0700 Subject: [PATCH 68/83] Remove flag --- src/client/activation/analysis.ts | 1 - src/client/activation/downloader.ts | 24 ------------------------ src/client/common/configSettings.ts | 3 --- src/client/common/types.ts | 1 - 4 files changed, 29 deletions(-) diff --git a/src/client/activation/analysis.ts b/src/client/activation/analysis.ts index 8072e99d32af..bf8fd4c9e587 100644 --- a/src/client/activation/analysis.ts +++ b/src/client/activation/analysis.ts @@ -240,7 +240,6 @@ export class AnalysisExtensionActivator implements IExtensionActivator { maxDocumentationTextLength: 0 }, asyncStartup: true, - intelliCodeEnabled: settings.intelliCodeEnabled, testEnvironment: isTestExecution() } }; diff --git a/src/client/activation/downloader.ts b/src/client/activation/downloader.ts index f6d7036ad12a..f28075571579 100644 --- a/src/client/activation/downloader.ts +++ b/src/client/activation/downloader.ts @@ -21,7 +21,6 @@ const downloadUriPrefix = 'https://pvsc.blob.core.windows.net/python-analysis'; const downloadBaseFileName = 'Python-Analysis-VSCode'; const downloadVersion = '0.1.0'; const downloadFileExtension = '.nupkg'; -const modelName = 'model-sequence.json.gz'; export class AnalysisEngineDownloader { private readonly output: OutputChannel; @@ -56,29 +55,6 @@ export class AnalysisEngineDownloader { } } - public async downloadIntelliCodeModel(context: ExtensionContext): Promise { - const modelFolder = path.join(context.extensionPath, 'analysis', 'Pythia', 'model'); - const localPath = path.join(modelFolder, modelName); - if (await this.fs.fileExists(localPath)) { - return; - } - - let localTempFilePath = ''; - try { - localTempFilePath = await this.downloadFile(downloadUriPrefix, modelName, 'Downloading IntelliCode Model File... '); - await this.fs.createDirectory(modelFolder); - await this.fs.copyFile(localTempFilePath, localPath); - } catch (err) { - this.output.appendLine('failed.'); - this.output.appendLine(err); - throw new Error(err); - } finally { - if (localTempFilePath.length > 0) { - await this.fs.deleteFile(localTempFilePath); - } - } - } - private async downloadFile(location: string, fileName: string, title: string): Promise { const uri = `${location}/${fileName}`; this.output.append(`Downloading ${uri}... `); diff --git a/src/client/common/configSettings.ts b/src/client/common/configSettings.ts index 63408c7c5638..033a33096cab 100644 --- a/src/client/common/configSettings.ts +++ b/src/client/common/configSettings.ts @@ -25,7 +25,6 @@ export const IS_WINDOWS = /^win/.test(process.platform); // tslint:disable-next-line:completed-docs export class PythonSettings extends EventEmitter implements IPythonSettings { private static pythonSettings: Map = new Map(); - public intelliCodeEnabled = true; public downloadCodeAnalysis = true; public jediEnabled = true; public jediPath = ''; @@ -127,8 +126,6 @@ export class PythonSettings extends EventEmitter implements IPythonSettings { this.jediPath = ''; } this.jediMemoryLimit = pythonSettings.get('jediMemoryLimit')!; - } else { - this.intelliCodeEnabled = systemVariables.resolveAny(pythonSettings.get('intelliCodeEnabled', true))!; } // tslint:disable-next-line:no-backbone-get-set-outside-model no-non-null-assertion diff --git a/src/client/common/types.ts b/src/client/common/types.ts index 4d8baa55b4ea..8fcebdef6a03 100644 --- a/src/client/common/types.ts +++ b/src/client/common/types.ts @@ -99,7 +99,6 @@ export interface IPythonSettings { readonly pythonPath: string; readonly venvPath: string; readonly venvFolders: string[]; - readonly intelliCodeEnabled: boolean; readonly downloadCodeAnalysis: boolean; readonly jediEnabled: boolean; readonly jediPath: string; From 2904f3b2d7938969bf96304f67a98676a547caf9 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 23 May 2018 17:06:36 -0700 Subject: [PATCH 69/83] Don't wait for LS --- src/client/activation/analysis.ts | 51 +++---------------------------- 1 file changed, 4 insertions(+), 47 deletions(-) diff --git a/src/client/activation/analysis.ts b/src/client/activation/analysis.ts index bf8fd4c9e587..f1d081e0022a 100644 --- a/src/client/activation/analysis.ts +++ b/src/client/activation/analysis.ts @@ -3,11 +3,9 @@ import * as path from 'path'; import { ExtensionContext, OutputChannel } from 'vscode'; -import { Message } from 'vscode-jsonrpc'; -import { CloseAction, Disposable, ErrorAction, ErrorHandler, LanguageClient, LanguageClientOptions, ServerOptions } from 'vscode-languageclient'; +import { Disposable, LanguageClient, LanguageClientOptions, ServerOptions } from 'vscode-languageclient'; import { IApplicationShell } from '../common/application/types'; import { isTestExecution, STANDARD_OUTPUT_CHANNEL } from '../common/constants'; -import { createDeferred, Deferred } from '../common/helpers'; import { IFileSystem, IPlatformService } from '../common/platform/types'; import { StopWatch } from '../common/stopWatch'; import { IConfigurationService, IOutputChannel, IPythonSettings } from '../common/types'; @@ -17,8 +15,7 @@ import { IServiceContainer } from '../ioc/types'; import { PYTHON_ANALYSIS_ENGINE_DOWNLOADED, PYTHON_ANALYSIS_ENGINE_ENABLED, - PYTHON_ANALYSIS_ENGINE_ERROR, - PYTHON_ANALYSIS_ENGINE_STARTUP + PYTHON_ANALYSIS_ENGINE_ERROR } from '../telemetry/constants'; import { getTelemetryReporter } from '../telemetry/telemetry'; import { AnalysisEngineDownloader } from './downloader'; @@ -31,18 +28,6 @@ const dotNetCommand = 'dotnet'; const languageClientName = 'Python Tools'; const analysisEngineFolder = 'analysis'; -class LanguageServerStartupErrorHandler implements ErrorHandler { - constructor(private readonly deferred: Deferred) { } - public error(error: Error, message: Message, count: number): ErrorAction { - this.deferred.reject(error); - return ErrorAction.Continue; - } - public closed(): CloseAction { - this.deferred.reject(); - return CloseAction.Restart; - } -} - export class AnalysisExtensionActivator implements IExtensionActivator { private readonly configuration: IConfigurationService; private readonly appShell: IApplicationShell; @@ -110,7 +95,7 @@ export class AnalysisExtensionActivator implements IExtensionActivator { if (!settings.downloadCodeAnalysis) { // Depends on .NET Runtime or SDK. Typically development-only case. this.languageClient = this.createSimpleLanguageClient(context, clientOptions); - await this.tryStartLanguageClient(context, this.languageClient); + context.subscriptions.push(this.languageClient.start()); return true; } @@ -123,7 +108,7 @@ export class AnalysisExtensionActivator implements IExtensionActivator { // Now try to start self-contained app this.languageClient = this.createSelfContainedLanguageClient(context, serverModule, clientOptions); try { - await this.tryStartLanguageClient(context, this.languageClient); + context.subscriptions.push(this.languageClient.start()); return true; } catch (ex) { this.appShell.showErrorMessage(`Language server failed to start. Error ${ex}`); @@ -132,34 +117,6 @@ export class AnalysisExtensionActivator implements IExtensionActivator { } } - private async tryStartLanguageClient(context: ExtensionContext, lc: LanguageClient): Promise { - let disposable: Disposable | undefined; - const deferred = createDeferred(); - try { - const sw = new StopWatch(); - lc.clientOptions.errorHandler = new LanguageServerStartupErrorHandler(deferred); - - disposable = lc.start(); - lc.onReady() - .then(() => deferred.resolve()) - .catch((reason) => { - deferred.reject(reason); - }); - await deferred.promise; - - this.output.appendLine(`Language server ready: ${this.sw.elapsedTime} ms`); - context.subscriptions.push(disposable); - - const reporter = getTelemetryReporter(); - reporter.sendTelemetryEvent(PYTHON_ANALYSIS_ENGINE_STARTUP, {}, { startup_time: sw.elapsedTime }); - } catch (ex) { - if (disposable) { - disposable.dispose(); - } - throw ex; - } - } - private createSimpleLanguageClient(context: ExtensionContext, clientOptions: LanguageClientOptions): LanguageClient { const commandOptions = { stdio: 'pipe' }; const serverModule = path.join(context.extensionPath, analysisEngineFolder, this.platformData.getEngineDllName()); From c7d34c92022c17d3778c6f0ea6c08d1d08479006 Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Mon, 28 May 2018 13:21:05 -0700 Subject: [PATCH 70/83] Small test fixes --- src/client/activation/analysis.ts | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/client/activation/analysis.ts b/src/client/activation/analysis.ts index f1d081e0022a..69835f4aff22 100644 --- a/src/client/activation/analysis.ts +++ b/src/client/activation/analysis.ts @@ -85,8 +85,6 @@ export class AnalysisExtensionActivator implements IExtensionActivator { private async startLanguageServer(context: ExtensionContext, clientOptions: LanguageClientOptions): Promise { // Determine if we are running MSIL/Universal via dotnet or self-contained app. - const mscorlib = path.join(context.extensionPath, analysisEngineFolder, 'mscorlib.dll'); - const downloader = new AnalysisEngineDownloader(this.services, analysisEngineFolder); const reporter = getTelemetryReporter(); reporter.sendTelemetryEvent(PYTHON_ANALYSIS_ENGINE_ENABLED); @@ -95,20 +93,21 @@ export class AnalysisExtensionActivator implements IExtensionActivator { if (!settings.downloadCodeAnalysis) { // Depends on .NET Runtime or SDK. Typically development-only case. this.languageClient = this.createSimpleLanguageClient(context, clientOptions); - context.subscriptions.push(this.languageClient.start()); + await this.startLanguageClient(context); return true; } + const mscorlib = path.join(context.extensionPath, analysisEngineFolder, 'mscorlib.dll'); if (!await this.fs.fileExists(mscorlib)) { + const downloader = new AnalysisEngineDownloader(this.services, analysisEngineFolder); await downloader.downloadAnalysisEngine(context); reporter.sendTelemetryEvent(PYTHON_ANALYSIS_ENGINE_DOWNLOADED); } const serverModule = path.join(context.extensionPath, analysisEngineFolder, this.platformData.getEngineExecutableName()); - // Now try to start self-contained app this.languageClient = this.createSelfContainedLanguageClient(context, serverModule, clientOptions); try { - context.subscriptions.push(this.languageClient.start()); + await this.startLanguageClient(context); return true; } catch (ex) { this.appShell.showErrorMessage(`Language server failed to start. Error ${ex}`); @@ -117,6 +116,13 @@ export class AnalysisExtensionActivator implements IExtensionActivator { } } + private async startLanguageClient(context: ExtensionContext): Promise { + context.subscriptions.push(this.languageClient!.start()); + if (isTestExecution()) { + await this.languageClient!.onReady(); + } + } + private createSimpleLanguageClient(context: ExtensionContext, clientOptions: LanguageClientOptions): LanguageClient { const commandOptions = { stdio: 'pipe' }; const serverModule = path.join(context.extensionPath, analysisEngineFolder, this.platformData.getEngineDllName()); From 17775bd0b7c3ac5b637113f26b7d994ddeff2ad7 Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Mon, 28 May 2018 14:37:25 -0700 Subject: [PATCH 71/83] Update hover baselines --- src/test/definitions/hover.ptvs.test.ts | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/test/definitions/hover.ptvs.test.ts b/src/test/definitions/hover.ptvs.test.ts index 089245836090..cb297e0e9374 100644 --- a/src/test/definitions/hover.ptvs.test.ts +++ b/src/test/definitions/hover.ptvs.test.ts @@ -53,7 +53,7 @@ suite('Hover Definition (Analysis Engine)', () => { const expected = [ 'obj.method1:', '```python', - 'method method1 of one.Class1 objects', + 'method method1 of pythonFiles.autocomp.one.Class1 objects', '```', 'This is method1' ]; @@ -70,7 +70,7 @@ suite('Hover Definition (Analysis Engine)', () => { const expected = [ 'two.ct().fun:', '```python', - 'method fun of two.ct objects', + 'method fun of pythonFiles.autocomp.two.ct objects', '```', 'This is fun' ]; @@ -86,7 +86,7 @@ suite('Hover Definition (Analysis Engine)', () => { const actual = normalizeMarkedString(def[0].contents[0]).splitLines(); const expected = [ '```python', - 'four.Foo.bar() -> bool', + 'pythonFiles.autocomp.four.Foo.bar() -> bool', 'declared in Foo', '```', '说明 - keep this line, it works', @@ -105,7 +105,7 @@ suite('Hover Definition (Analysis Engine)', () => { const actual = normalizeMarkedString(def[0].contents[0]).splitLines(); const expected = [ '```python', - 'four.showMessage()', + 'pythonFiles.autocomp.four.showMessage()', '```', 'Кюм ут жэмпэр пошжим льаборэж, коммюны янтэрэсщэт нам ед, декта игнота ныморэ жят эи.', 'Шэа декам экшырки эи, эи зыд эррэм докэндё, векж факэтэ пэрчыквюэрёж ку.' @@ -138,7 +138,7 @@ suite('Hover Definition (Analysis Engine)', () => { const actual = normalizeMarkedString(def[0].contents[0]).splitLines(); const expected = [ '```python', - 'class misc.Random(_random.Random)', + 'class pythonFiles.autocomp.misc.Random(_random.Random)', '```', 'Random number generator base class used by bound module functions.', 'Used to instantiate instances of Random to get generators that don\'t', @@ -162,7 +162,7 @@ suite('Hover Definition (Analysis Engine)', () => { const expected = [ 'rnd2.randint:', '```python', - 'method randint of misc.Random objects -> int', + 'method randint of pythonFiles.autocomp.misc.Random objects -> int', '```', 'Return random integer in range [a, b], including both end points.' ]; @@ -195,7 +195,7 @@ suite('Hover Definition (Analysis Engine)', () => { const actual = normalizeMarkedString(def[0].contents[0]).splitLines(); const expected = [ '```python', - 'class misc.Thread(_Verbose)', + 'class pythonFiles.autocomp.misc.Thread(_Verbose)', '```', 'A class that represents a thread of control.', 'This class can be safely subclassed in a limited fashion.' From 2fd53877fd1b0cc41380cc7b4b668ed2722a17ba Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 29 May 2018 11:44:53 -0700 Subject: [PATCH 72/83] Rename the engine --- src/client/activation/downloader.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/activation/downloader.ts b/src/client/activation/downloader.ts index f28075571579..3fee93e418f5 100644 --- a/src/client/activation/downloader.ts +++ b/src/client/activation/downloader.ts @@ -41,7 +41,7 @@ export class AnalysisEngineDownloader { let localTempFilePath = ''; try { - localTempFilePath = await this.downloadFile(downloadUriPrefix, enginePackageFileName, 'Downloading Python Analysis Engine... '); + localTempFilePath = await this.downloadFile(downloadUriPrefix, enginePackageFileName, 'Downloading Microsoft Python Language Server... '); await this.verifyDownload(localTempFilePath, platformString); await this.unpackArchive(context.extensionPath, localTempFilePath); } catch (err) { From 019007853fe9e22576d3856e8c656046b9c82099 Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Tue, 29 May 2018 16:56:39 -0700 Subject: [PATCH 73/83] Formatting 1 --- src/client/language/tokenizer.ts | 28 ++++++++++++++++++- .../format/extension.lineFormatter.test.ts | 6 ++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/src/client/language/tokenizer.ts b/src/client/language/tokenizer.ts index 7ceafdccb0e6..e88606f64582 100644 --- a/src/client/language/tokenizer.ts +++ b/src/client/language/tokenizer.ts @@ -350,10 +350,36 @@ export class Tokenizer implements ITokenizer { this.tokens.push(new Token(TokenType.Comment, start, this.cs.position - start)); } + // tslint:disable-next-line:cyclomatic-complexity private getStringPrefixLength(): number { - if (this.cs.currentChar === Char.f && (this.cs.nextChar === Char.SingleQuote || this.cs.nextChar === Char.DoubleQuote)) { + if (this.cs.nextChar === Char.SingleQuote || this.cs.nextChar === Char.DoubleQuote) { + switch (this.cs.currentChar) { + case Char.f: + case Char.F: + case Char.r: + case Char.R: + case Char.b: + case Char.B: + case Char.u: + case Char.U: + return 1; + default: + break; + } + } + if ((this.cs.currentChar === Char.f || this.cs.currentChar === Char.F) && ) { return 1; // f-string } + + if ((this.cs.currentChar === Char.r || this.cs.currentChar === Char.R)) { + if (this.cs.nextChar === Char.SingleQuote || this.cs.nextChar === Char.DoubleQuote) { + return 1; // r-string + } + if ((this.cs.nextChar === Char.f || this.cs.nextChar === Char.F) && (this.cs.lookAhead(2) === Char.SingleQuote || this.cs.lookAhead(2) === Char.DoubleQuote)) { + return 2; // rf-string + } + } + if (this.cs.currentChar === Char.b || this.cs.currentChar === Char.B || this.cs.currentChar === Char.u || this.cs.currentChar === Char.U) { if (this.cs.nextChar === Char.SingleQuote || this.cs.nextChar === Char.DoubleQuote) { // b-string or u-string diff --git a/src/test/format/extension.lineFormatter.test.ts b/src/test/format/extension.lineFormatter.test.ts index 656f22c3ffd6..5bf9374c1ca8 100644 --- a/src/test/format/extension.lineFormatter.test.ts +++ b/src/test/format/extension.lineFormatter.test.ts @@ -121,6 +121,12 @@ suite('Formatting - line formatter', () => { test('from..x import', () => { testFormatLine('from..x import', 'from ..x import'); }); + test('Raw strings', () => { + testFormatLine('z=r""', 'z = r""'); + testFormatLine('z=rf""', 'z = rf""'); + testFormatLine('z=R""', 'z = R""'); + testFormatLine('z=RF""', 'z = RF""'); + }); test('Grammar file', () => { const content = fs.readFileSync(grammarFile).toString('utf8'); const lines = content.splitLines({ trim: false, removeEmptyEntries: false }); From 5b93e344b9c9dd131eb0df5c88ca93303056e81f Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Tue, 29 May 2018 21:33:05 -0700 Subject: [PATCH 74/83] Add support for 'rf' strings --- src/client/language/tokenizer.ts | 39 ++++++++++++-------------------- 1 file changed, 15 insertions(+), 24 deletions(-) diff --git a/src/client/language/tokenizer.ts b/src/client/language/tokenizer.ts index e88606f64582..15d98346dcca 100644 --- a/src/client/language/tokenizer.ts +++ b/src/client/language/tokenizer.ts @@ -352,6 +352,10 @@ export class Tokenizer implements ITokenizer { // tslint:disable-next-line:cyclomatic-complexity private getStringPrefixLength(): number { + if (this.cs.currentChar === Char.SingleQuote || this.cs.currentChar === Char.DoubleQuote) { + return 0; // Simple string, no prefix + } + if (this.cs.nextChar === Char.SingleQuote || this.cs.nextChar === Char.DoubleQuote) { switch (this.cs.currentChar) { case Char.f: @@ -362,37 +366,24 @@ export class Tokenizer implements ITokenizer { case Char.B: case Char.u: case Char.U: - return 1; + return 1; // single-char prefix like u"" or r"" default: break; } } - if ((this.cs.currentChar === Char.f || this.cs.currentChar === Char.F) && ) { - return 1; // f-string - } - if ((this.cs.currentChar === Char.r || this.cs.currentChar === Char.R)) { - if (this.cs.nextChar === Char.SingleQuote || this.cs.nextChar === Char.DoubleQuote) { - return 1; // r-string - } - if ((this.cs.nextChar === Char.f || this.cs.nextChar === Char.F) && (this.cs.lookAhead(2) === Char.SingleQuote || this.cs.lookAhead(2) === Char.DoubleQuote)) { - return 2; // rf-string - } - } - - if (this.cs.currentChar === Char.b || this.cs.currentChar === Char.B || this.cs.currentChar === Char.u || this.cs.currentChar === Char.U) { - if (this.cs.nextChar === Char.SingleQuote || this.cs.nextChar === Char.DoubleQuote) { - // b-string or u-string - return 1; - } - if (this.cs.nextChar === Char.r || this.cs.nextChar === Char.R) { - // b-string or u-string with 'r' suffix - if (this.cs.lookAhead(2) === Char.SingleQuote || this.cs.lookAhead(2) === Char.DoubleQuote) { - return 2; - } + if (this.cs.lookAhead(2) === Char.SingleQuote || this.cs.lookAhead(2) === Char.DoubleQuote) { + const prefix = this.cs.getText().substr(this.cs.position, 2).toLowerCase(); + switch (prefix) { + case 'rf': + case 'ur': + case 'br': + return 2; + default: + break; } } - return this.cs.currentChar === Char.SingleQuote || this.cs.currentChar === Char.DoubleQuote ? 0 : -1; + return -1; } private getQuoteType(): QuoteType { From 781e6b18f9ba9ee562ed7f0d55506545bc1ff331 Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Tue, 29 May 2018 21:45:42 -0700 Subject: [PATCH 75/83] Add two spaces before comment per PEP --- src/client/formatters/lineFormatter.ts | 4 ++-- src/client/language/textBuilder.ts | 10 ++++++++-- src/test/format/extension.lineFormatter.test.ts | 2 +- src/test/pythonFiles/formatting/pythonGrammar.py | 4 ++-- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/src/client/formatters/lineFormatter.ts b/src/client/formatters/lineFormatter.ts index 9bd256f50177..938f74bf9b86 100644 --- a/src/client/formatters/lineFormatter.ts +++ b/src/client/formatters/lineFormatter.ts @@ -75,9 +75,9 @@ export class LineFormatter { break; case TokenType.Comment: - // Add space before in-line comment. + // Add 2 spaces before in-line comment per PEP guidelines. if (prev) { - this.builder.softAppendSpace(); + this.builder.softAppendSpace(2); } this.builder.append(this.text.substring(t.start, t.end)); break; diff --git a/src/client/language/textBuilder.ts b/src/client/language/textBuilder.ts index aebe6187696b..e11f2a1299c4 100644 --- a/src/client/language/textBuilder.ts +++ b/src/client/language/textBuilder.ts @@ -16,8 +16,14 @@ export class TextBuilder { return this.segments.join(''); } - public softAppendSpace(): void { - if (!this.isLastWhiteSpace() && this.segments.length > 0) { + public softAppendSpace(count: number = 1): void { + if (this.segments.length === 0) { + return; + } + if (this.isLastWhiteSpace()) { + count = count - 1; + } + for (let i = 0; i < count; i += 1) { this.segments.push(' '); } } diff --git a/src/test/format/extension.lineFormatter.test.ts b/src/test/format/extension.lineFormatter.test.ts index 5bf9374c1ca8..7faafbfddce6 100644 --- a/src/test/format/extension.lineFormatter.test.ts +++ b/src/test/format/extension.lineFormatter.test.ts @@ -59,7 +59,7 @@ suite('Formatting - line formatter', () => { testFormatLine('[ 1 :[2: (x,),y]]{1}', '[1:[2:(x,), y]]{1}'); }); test('Trailing comment', () => { - testFormatLine('x=1 # comment', 'x = 1 # comment'); + testFormatLine('x=1 # comment', 'x = 1 # comment'); }); test('Single comment', () => { testFormatLine('# comment', '# comment'); diff --git a/src/test/pythonFiles/formatting/pythonGrammar.py b/src/test/pythonFiles/formatting/pythonGrammar.py index 1a17d94302b5..78272c321e73 100644 --- a/src/test/pythonFiles/formatting/pythonGrammar.py +++ b/src/test/pythonFiles/formatting/pythonGrammar.py @@ -236,7 +236,7 @@ def test_eof_error(self): compile(s, "", "exec") self.assertIn("unexpected EOF", str(cm.exception)) -var_annot_global: int # a global annotated is necessary for test_var_annot +var_annot_global: int # a global annotated is necessary for test_var_annot # custom namespace for testing __annotations__ @@ -643,7 +643,7 @@ def test_lambdef(self): ### lambdef: 'lambda' [varargslist] ':' test l1 = lambda: 0 self.assertEqual(l1(), 0) - l2 = lambda: a[d] # XXX just testing the expression + l2 = lambda: a[d] # XXX just testing the expression l3 = lambda: [2 < x for x in [-1, 3, 0]] self.assertEqual(l3(), [0, 1, 0]) l4 = lambda x=lambda y=lambda z=1: z: y(): x() From e795309d88161bf9c1759b35efeefcd8d12585ac Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Tue, 29 May 2018 21:52:08 -0700 Subject: [PATCH 76/83] Fix @ operator spacing --- src/client/formatters/lineFormatter.ts | 13 +++++++++++-- src/test/format/extension.lineFormatter.test.ts | 3 +++ src/test/pythonFiles/formatting/pythonGrammar.py | 2 +- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/src/client/formatters/lineFormatter.ts b/src/client/formatters/lineFormatter.ts index 938f74bf9b86..30e229f437bd 100644 --- a/src/client/formatters/lineFormatter.ts +++ b/src/client/formatters/lineFormatter.ts @@ -118,8 +118,17 @@ export class LineFormatter { } return; case Char.At: - case Char.ExclamationMark: - this.builder.append(this.text[t.start]); + if (prev) { + // Binary case + this.builder.softAppendSpace(); + this.builder.append('@'); + this.builder.softAppendSpace(); + } else { + this.builder.append('@'); + } + return; + case Char.ExclamationMark: + this.builder.append('!'); return; case Char.Asterisk: if (prev && this.isKeyword(prev, 'lambda')) { diff --git a/src/test/format/extension.lineFormatter.test.ts b/src/test/format/extension.lineFormatter.test.ts index 7faafbfddce6..c6196fe84501 100644 --- a/src/test/format/extension.lineFormatter.test.ts +++ b/src/test/format/extension.lineFormatter.test.ts @@ -127,6 +127,9 @@ suite('Formatting - line formatter', () => { testFormatLine('z=R""', 'z = R""'); testFormatLine('z=RF""', 'z = RF""'); }); + test('binary @', () => { + testFormatLine('a@ b', 'a @ b'); + }); test('Grammar file', () => { const content = fs.readFileSync(grammarFile).toString('utf8'); const lines = content.splitLines({ trim: false, removeEmptyEntries: false }); diff --git a/src/test/pythonFiles/formatting/pythonGrammar.py b/src/test/pythonFiles/formatting/pythonGrammar.py index 78272c321e73..937cba401d3f 100644 --- a/src/test/pythonFiles/formatting/pythonGrammar.py +++ b/src/test/pythonFiles/formatting/pythonGrammar.py @@ -1492,7 +1492,7 @@ def __imatmul__(self, o): self.other = o return self m = M() - self.assertEqual(m@m, 4) + self.assertEqual(m @ m, 4) m @= 42 self.assertEqual(m.other, 42) From d300d0c1c669a4109766ac99a87bb8d705d0957e Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Tue, 29 May 2018 22:41:01 -0700 Subject: [PATCH 77/83] Handle module and unary ops --- src/client/formatters/lineFormatter.ts | 19 ++++++++++--- src/client/language/tokenizer.ts | 2 ++ .../format/extension.lineFormatter.test.ts | 10 +++++++ src/test/language/tokenizer.test.ts | 27 +++++++++++++++---- 4 files changed, 50 insertions(+), 8 deletions(-) diff --git a/src/client/formatters/lineFormatter.ts b/src/client/formatters/lineFormatter.ts index 30e229f437bd..6b224c2c9f32 100644 --- a/src/client/formatters/lineFormatter.ts +++ b/src/client/formatters/lineFormatter.ts @@ -98,10 +98,10 @@ export class LineFormatter { private handleOperator(index: number): void { const t = this.tokens.getItemAt(index); const prev = index > 0 ? this.tokens.getItemAt(index - 1) : undefined; + const opCode = this.text.charCodeAt(t.start); const next = index < this.tokens.count - 1 ? this.tokens.getItemAt(index + 1) : undefined; if (t.length === 1) { - const opCode = this.text.charCodeAt(t.start); switch (opCode) { case Char.Equal: if (this.handleEqual(t, index)) { @@ -112,7 +112,7 @@ export class LineFormatter { if (prev && this.isKeyword(prev, 'from')) { this.builder.softAppendSpace(); } - this.builder.append(this.text[t.start]); + this.builder.append('.'); if (next && this.isKeyword(next, 'import')) { this.builder.softAppendSpace(); } @@ -127,7 +127,7 @@ export class LineFormatter { this.builder.append('@'); } return; - case Char.ExclamationMark: + case Char.ExclamationMark: this.builder.append('!'); return; case Char.Asterisk: @@ -162,6 +162,13 @@ export class LineFormatter { this.builder.softAppendSpace(); this.builder.append(this.text.substring(t.start, t.end)); + + // Check unary case + if (prev && prev.type === TokenType.Operator) { + if (opCode === Char.Hyphen || opCode === Char.Plus || opCode === Char.Tilde) { + return; + } + } this.builder.softAppendSpace(); } @@ -197,6 +204,12 @@ export class LineFormatter { return; } + if (t.type === TokenType.Number && prev && prev.type === TokenType.Operator && prev.length === 1 && this.text.charCodeAt(prev.start) === Char.Tilde) { + // Special case for ~ before numbers + this.builder.append(this.text.substring(t.start, t.end)); + return; + } + if (t.type === TokenType.Unknown) { this.handleUnknown(t); } else { diff --git a/src/client/language/tokenizer.ts b/src/client/language/tokenizer.ts index 15d98346dcca..2574c388aeb1 100644 --- a/src/client/language/tokenizer.ts +++ b/src/client/language/tokenizer.ts @@ -280,6 +280,8 @@ export class Tokenizer implements ITokenizer { case Char.Caret: case Char.Equal: case Char.ExclamationMark: + case Char.Percent: + case Char.Tilde: length = nextChar === Char.Equal ? 2 : 1; break; diff --git a/src/test/format/extension.lineFormatter.test.ts b/src/test/format/extension.lineFormatter.test.ts index c6196fe84501..7941d280e19f 100644 --- a/src/test/format/extension.lineFormatter.test.ts +++ b/src/test/format/extension.lineFormatter.test.ts @@ -90,6 +90,8 @@ suite('Formatting - line formatter', () => { }); test('Dot operator', () => { testFormatLine('x.y', 'x.y'); + testFormatLine('5 .y', '5.y'); + //testFormatLine('- 135 .bit_length()', '-135.bit_length()'); }); test('Unknown tokens no space', () => { testFormatLine('abc\\n\\', 'abc\\n\\'); @@ -130,6 +132,14 @@ suite('Formatting - line formatter', () => { test('binary @', () => { testFormatLine('a@ b', 'a @ b'); }); + test('unary operators', () => { + testFormatLine('x= - y', 'x = -y'); + testFormatLine('x= + y', 'x = +y'); + testFormatLine('x= ~ y', 'x = ~y'); + testFormatLine('x=-1', 'x = -1'); + testFormatLine('x= +1', 'x = +1'); + testFormatLine('x= ~1 ', 'x = ~1'); + }); test('Grammar file', () => { const content = fs.readFileSync(grammarFile).toString('utf8'); const lines = content.splitLines({ trim: false, removeEmptyEntries: false }); diff --git a/src/test/language/tokenizer.test.ts b/src/test/language/tokenizer.test.ts index d7119b7b4f6f..397a1f9f398d 100644 --- a/src/test/language/tokenizer.test.ts +++ b/src/test/language/tokenizer.test.ts @@ -185,7 +185,7 @@ suite('Language.Tokenizer', () => { }); test('Unknown token', () => { const t = new Tokenizer(); - const tokens = t.tokenize('~$'); + const tokens = t.tokenize('`$'); assert.equal(tokens.count, 1); assert.equal(tokens.getItemAt(0).type, TokenType.Unknown); @@ -301,20 +301,37 @@ suite('Language.Tokenizer', () => { assert.equal(tokens.getItemAt(5).type, TokenType.Number); assert.equal(tokens.getItemAt(5).length, 5); }); + test('Simple expression, leading minus', () => { + const t = new Tokenizer(); + const tokens = t.tokenize('x == -y'); + assert.equal(tokens.count, 4); + + assert.equal(tokens.getItemAt(0).type, TokenType.Identifier); + assert.equal(tokens.getItemAt(0).length, 1); + + assert.equal(tokens.getItemAt(1).type, TokenType.Operator); + assert.equal(tokens.getItemAt(1).length, 2); + + assert.equal(tokens.getItemAt(2).type, TokenType.Operator); + assert.equal(tokens.getItemAt(2).length, 1); + + assert.equal(tokens.getItemAt(3).type, TokenType.Identifier); + assert.equal(tokens.getItemAt(3).length, 1); + }); test('Operators', () => { const text = '< <> << <<= ' + '== != > >> >>= >= <=' + - '+ -' + + '+ - ~ %' + '* ** / /= //=' + - '*= += -= **= ' + + '*= += -= ~= %= **= ' + '& &= | |= ^ ^= ->'; const tokens = new Tokenizer().tokenize(text); const lengths = [ 1, 2, 2, 3, 2, 2, 1, 2, 3, 2, 2, - 1, 1, + 1, 1, 1, 1, 1, 2, 1, 2, 3, - 2, 2, 2, 3, + 2, 2, 2, 2, 2, 3, 1, 2, 1, 2, 1, 2, 2]; assert.equal(tokens.count, lengths.length); for (let i = 0; i < tokens.count; i += 1) { From dd09087a24f89b568235346316cc966ad6f81fde Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Tue, 29 May 2018 22:57:09 -0700 Subject: [PATCH 78/83] Type hints --- src/client/formatters/lineFormatter.ts | 4 ++++ src/test/format/extension.lineFormatter.test.ts | 7 +++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/client/formatters/lineFormatter.ts b/src/client/formatters/lineFormatter.ts index 6b224c2c9f32..5bf13eb557a9 100644 --- a/src/client/formatters/lineFormatter.ts +++ b/src/client/formatters/lineFormatter.ts @@ -246,6 +246,10 @@ export class LineFormatter { return false; } + if (index > 1 && this.tokens.getItemAt(index - 2).type === TokenType.Colon) { + return false; // Type hint should have spaces around like foo(x: int = 1) per PEP 8 + } + const first = this.tokens.getItemAt(0); if (first.type === TokenType.Comma) { return true; // Line starts with commma diff --git a/src/test/format/extension.lineFormatter.test.ts b/src/test/format/extension.lineFormatter.test.ts index 7941d280e19f..2693cb2409ac 100644 --- a/src/test/format/extension.lineFormatter.test.ts +++ b/src/test/format/extension.lineFormatter.test.ts @@ -129,10 +129,10 @@ suite('Formatting - line formatter', () => { testFormatLine('z=R""', 'z = R""'); testFormatLine('z=RF""', 'z = RF""'); }); - test('binary @', () => { + test('Binary @', () => { testFormatLine('a@ b', 'a @ b'); }); - test('unary operators', () => { + test('Unary operators', () => { testFormatLine('x= - y', 'x = -y'); testFormatLine('x= + y', 'x = +y'); testFormatLine('x= ~ y', 'x = ~y'); @@ -140,6 +140,9 @@ suite('Formatting - line formatter', () => { testFormatLine('x= +1', 'x = +1'); testFormatLine('x= ~1 ', 'x = ~1'); }); + test('Equals with type hints', () => { + testFormatLine('def foo(x:int=3,x=100.)', 'def foo(x: int = 3, x=100.)'); + }); test('Grammar file', () => { const content = fs.readFileSync(grammarFile).toString('utf8'); const lines = content.splitLines({ trim: false, removeEmptyEntries: false }); From 46b6dfdb567b43d1c4abd3c0e4fc6dcb7c33d55c Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Tue, 29 May 2018 22:58:50 -0700 Subject: [PATCH 79/83] Fix typo --- src/test/activation/activationService.unit.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/activation/activationService.unit.test.ts b/src/test/activation/activationService.unit.test.ts index 24c5d2841522..e5390cc2ae23 100644 --- a/src/test/activation/activationService.unit.test.ts +++ b/src/test/activation/activationService.unit.test.ts @@ -16,7 +16,7 @@ import { IServiceContainer } from '../../client/ioc/types'; suite('Activation - ActivationService', () => { [true, false].forEach(jediIsEnabled => { - suite(`Jedi is ${jediIsEnabled ? 'dnabled' : 'disabled'}`, () => { + suite(`Jedi is ${jediIsEnabled ? 'enabled' : 'disabled'}`, () => { let serviceContainer: TypeMoq.IMock; let pythonSettings: TypeMoq.IMock; let appShell: TypeMoq.IMock; From cf264b87b2e4b0be94730ece961efbeddaa68648 Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Tue, 29 May 2018 23:10:06 -0700 Subject: [PATCH 80/83] Trailing comma --- src/client/formatters/lineFormatter.ts | 22 ++++++++++++------- .../format/extension.lineFormatter.test.ts | 3 +++ 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/src/client/formatters/lineFormatter.ts b/src/client/formatters/lineFormatter.ts index 5bf13eb557a9..bb5203954a9c 100644 --- a/src/client/formatters/lineFormatter.ts +++ b/src/client/formatters/lineFormatter.ts @@ -104,10 +104,8 @@ export class LineFormatter { if (t.length === 1) { switch (opCode) { case Char.Equal: - if (this.handleEqual(t, index)) { - return; - } - break; + this.handleEqual(t, index); + return; case Char.Period: if (prev && this.isKeyword(prev, 'from')) { this.builder.softAppendSpace(); @@ -172,16 +170,24 @@ export class LineFormatter { this.builder.softAppendSpace(); } - private handleEqual(t: IToken, index: number): boolean { + private handleEqual(t: IToken, index: number): void { if (this.isMultipleStatements(index) && !this.braceCounter.isOpened(TokenType.OpenBrace)) { - return false; // x = 1; x, y = y, x + // x = 1; x, y = y, x + this.builder.softAppendSpace(); + this.builder.append('='); + this.builder.softAppendSpace(); + return; } + // Check if this is = in function arguments. If so, do not add spaces around it. if (this.isEqualsInsideArguments(index)) { this.builder.append('='); - return true; + return; } - return false; + + this.builder.softAppendSpace(); + this.builder.append('='); + this.builder.softAppendSpace(); } private handleOther(t: IToken, index: number): void { diff --git a/src/test/format/extension.lineFormatter.test.ts b/src/test/format/extension.lineFormatter.test.ts index 2693cb2409ac..46896acedb69 100644 --- a/src/test/format/extension.lineFormatter.test.ts +++ b/src/test/format/extension.lineFormatter.test.ts @@ -143,6 +143,9 @@ suite('Formatting - line formatter', () => { test('Equals with type hints', () => { testFormatLine('def foo(x:int=3,x=100.)', 'def foo(x: int = 3, x=100.)'); }); + test('Trailing comma', () => { + testFormatLine('a, =[1]', 'a, = [1]'); + }); test('Grammar file', () => { const content = fs.readFileSync(grammarFile).toString('utf8'); const lines = content.splitLines({ trim: false, removeEmptyEntries: false }); From 3e341e930cfeabbd805dc8db1f6059395e6c41bf Mon Sep 17 00:00:00 2001 From: Mikhail Arkhipov Date: Tue, 29 May 2018 23:14:21 -0700 Subject: [PATCH 81/83] Require space after if --- src/client/formatters/lineFormatter.ts | 2 +- src/test/format/extension.lineFormatter.test.ts | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/client/formatters/lineFormatter.ts b/src/client/formatters/lineFormatter.ts index bb5203954a9c..ffbe27aaaa23 100644 --- a/src/client/formatters/lineFormatter.ts +++ b/src/client/formatters/lineFormatter.ts @@ -314,7 +314,7 @@ export class LineFormatter { return s === 'in' || s === 'return' || s === 'and' || s === 'or' || s === 'not' || s === 'from' || s === 'import' || s === 'except' || s === 'for' || - s === 'as' || s === 'is'; + s === 'as' || s === 'is' || s === 'if'; } private isKeyword(t: IToken, keyword: string): boolean { return t.type === TokenType.Identifier && t.length === keyword.length && this.text.substr(t.start, t.length) === keyword; diff --git a/src/test/format/extension.lineFormatter.test.ts b/src/test/format/extension.lineFormatter.test.ts index 46896acedb69..77aacd6f1958 100644 --- a/src/test/format/extension.lineFormatter.test.ts +++ b/src/test/format/extension.lineFormatter.test.ts @@ -146,6 +146,9 @@ suite('Formatting - line formatter', () => { test('Trailing comma', () => { testFormatLine('a, =[1]', 'a, = [1]'); }); + test('if()', () => { + testFormatLine('if(True) :', 'if (True):'); + }); test('Grammar file', () => { const content = fs.readFileSync(grammarFile).toString('utf8'); const lines = content.splitLines({ trim: false, removeEmptyEntries: false }); From 06e71403739b8129852353ff7346c82a1be7768a Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Wed, 30 May 2018 12:44:11 -0700 Subject: [PATCH 82/83] Update list of keywords --- src/client/formatters/lineFormatter.ts | 24 ++++++++++++++----- .../format/extension.lineFormatter.test.ts | 4 ++++ 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/src/client/formatters/lineFormatter.ts b/src/client/formatters/lineFormatter.ts index ffbe27aaaa23..fde8dc7ae385 100644 --- a/src/client/formatters/lineFormatter.ts +++ b/src/client/formatters/lineFormatter.ts @@ -10,6 +10,20 @@ import { TextRangeCollection } from '../language/textRangeCollection'; import { Tokenizer } from '../language/tokenizer'; import { ITextRangeCollection, IToken, TokenType } from '../language/types'; +const keywordsWithSpaceBeforeBrace = [ + 'and', 'as', 'assert', + 'del', + 'except', 'elif', + 'for', 'from', + 'global', + 'if', 'import', 'in', 'is', + 'nonlocal', 'not', + 'or', + 'raise', 'return', + 'while', 'with', + 'yield' +]; + export class LineFormatter { private builder = new TextBuilder(); private tokens: ITextRangeCollection = new TextRangeCollection([]); @@ -59,7 +73,7 @@ export class LineFormatter { } const id = this.text.substring(t.start, t.end); this.builder.append(id); - if (this.keywordWithSpaceAfter(id) && next && this.isOpenBraceType(next.type)) { + if (this.isKeywordWithSpaceBeforeBrace(id) && next && this.isOpenBraceType(next.type)) { // for x in () this.builder.softAppendSpace(); } @@ -310,11 +324,9 @@ export class LineFormatter { } return false; } - private keywordWithSpaceAfter(s: string): boolean { - return s === 'in' || s === 'return' || s === 'and' || - s === 'or' || s === 'not' || s === 'from' || - s === 'import' || s === 'except' || s === 'for' || - s === 'as' || s === 'is' || s === 'if'; + + private isKeywordWithSpaceBeforeBrace(s: string): boolean { + return keywordsWithSpaceBeforeBrace.indexOf(s) >= 0; } private isKeyword(t: IToken, keyword: string): boolean { return t.type === TokenType.Identifier && t.length === keyword.length && this.text.substr(t.start, t.length) === keyword; diff --git a/src/test/format/extension.lineFormatter.test.ts b/src/test/format/extension.lineFormatter.test.ts index 77aacd6f1958..a50a7de64f2d 100644 --- a/src/test/format/extension.lineFormatter.test.ts +++ b/src/test/format/extension.lineFormatter.test.ts @@ -87,6 +87,10 @@ suite('Formatting - line formatter', () => { }); test('Brace after keyword', () => { testFormatLine('for x in(1,2,3)', 'for x in (1, 2, 3)'); + testFormatLine('assert(1,2,3)', 'assert (1, 2, 3)'); + testFormatLine('if (True|False)and(False/True)not (! x )', 'if (True | False) and (False / True) not (!x)'); + testFormatLine('while (True|False)', 'while (True | False)'); + testFormatLine('yield(a%b)', 'yield (a % b)'); }); test('Dot operator', () => { testFormatLine('x.y', 'x.y'); From c597d82801ea90527b8f53a28f1b13f77258217a Mon Sep 17 00:00:00 2001 From: MikhailArkhipov Date: Thu, 31 May 2018 10:02:20 -0700 Subject: [PATCH 83/83] PR feedback --- src/client/formatters/lineFormatter.ts | 3 ++- src/test/format/extension.lineFormatter.test.ts | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/formatters/lineFormatter.ts b/src/client/formatters/lineFormatter.ts index fde8dc7ae385..b7a6a13aa29b 100644 --- a/src/client/formatters/lineFormatter.ts +++ b/src/client/formatters/lineFormatter.ts @@ -11,12 +11,13 @@ import { Tokenizer } from '../language/tokenizer'; import { ITextRangeCollection, IToken, TokenType } from '../language/types'; const keywordsWithSpaceBeforeBrace = [ - 'and', 'as', 'assert', + 'and', 'as', 'assert', 'await', 'del', 'except', 'elif', 'for', 'from', 'global', 'if', 'import', 'in', 'is', + 'lambda', 'nonlocal', 'not', 'or', 'raise', 'return', diff --git a/src/test/format/extension.lineFormatter.test.ts b/src/test/format/extension.lineFormatter.test.ts index a50a7de64f2d..acb86d2c5715 100644 --- a/src/test/format/extension.lineFormatter.test.ts +++ b/src/test/format/extension.lineFormatter.test.ts @@ -95,7 +95,6 @@ suite('Formatting - line formatter', () => { test('Dot operator', () => { testFormatLine('x.y', 'x.y'); testFormatLine('5 .y', '5.y'); - //testFormatLine('- 135 .bit_length()', '-135.bit_length()'); }); test('Unknown tokens no space', () => { testFormatLine('abc\\n\\', 'abc\\n\\');