diff --git a/lib/options.ts b/lib/options.ts index ebfe9c56f..241d42fac 100644 --- a/lib/options.ts +++ b/lib/options.ts @@ -1,10 +1,8 @@ -import { Omit } from "ast-types/types"; - /** * All Recast API functions take second parameter with configuration options, * documented in options.js */ -export interface Options extends DeprecatedOptions { +export interface Options { /** * If you want to use a different branch of esprima, or any other module * that supports a .parse function, pass that module object to @@ -159,17 +157,11 @@ export interface Options extends DeprecatedOptions { tokens?: boolean; } -interface DeprecatedOptions { - /** @deprecated */ - esprima?: any; -} - const defaults: Options = { - parser: require("../parsers/esprima"), tabWidth: 4, useTabs: false, reuseWhitespace: true, - lineTerminator: require("os").EOL || "\n", + lineTerminator: "\n", wrapColumn: 74, // Aspirational for now. sourceFileName: null, sourceMapName: null, @@ -187,9 +179,7 @@ const defaults: Options = { }; const hasOwn = defaults.hasOwnProperty; -export type NormalizedOptions = Required< - Omit ->; +export type NormalizedOptions = Required; // Copy options and fill in default values. export function normalize(opts?: Options): NormalizedOptions { @@ -209,7 +199,7 @@ export function normalize(opts?: Options): NormalizedOptions { sourceMapName: get("sourceMapName"), sourceRoot: get("sourceRoot"), inputSourceMap: get("inputSourceMap"), - parser: get("esprima") || get("parser"), + parser: get("parser"), range: get("range"), tolerant: get("tolerant"), quote: get("quote"), diff --git a/lib/parser.ts b/lib/parser.ts index ca48b56bd..722eba98d 100644 --- a/lib/parser.ts +++ b/lib/parser.ts @@ -1,295 +1,28 @@ -import assert from "assert"; -import * as types from "ast-types"; -const b = types.builders; -const isObject = types.builtInTypes.object; -const isArray = types.builtInTypes.array; -import { normalize as normalizeOptions } from "./options"; -import { fromString } from "./lines"; -import { attach as attachComments } from "./comments"; -import * as util from "./util"; import { Options } from "./options"; +import { parse as pureParse } from "./pure-parser"; +import { normalize as normalizeOptions } from "./options"; export function parse(source: string, options?: Partial) { - options = normalizeOptions(options); - - const lines = fromString(source, options); - - const sourceWithoutTabs = lines.toString({ - tabWidth: options.tabWidth, - reuseWhitespace: false, - useTabs: false, - }); - - let comments: any[] = []; - const ast = options.parser.parse(sourceWithoutTabs, { - jsx: true, - loc: true, - locations: true, - range: options.range, - comment: true, - onComment: comments, - tolerant: util.getOption(options, "tolerant", true), - ecmaVersion: 6, - sourceType: util.getOption(options, "sourceType", "module"), + options = normalizeOptions({ + parser: require("../parsers/esprima"), + ...options, }); - - // Use ast.tokens if possible, and otherwise fall back to the Esprima - // tokenizer. All the preconfigured ../parsers/* expose ast.tokens - // automatically, but custom parsers might need additional configuration - // to avoid this fallback. - const tokens: any[] = Array.isArray(ast.tokens) - ? ast.tokens - : require("esprima").tokenize(sourceWithoutTabs, { - loc: true, - }); - - // We will reattach the tokens array to the file object below. - delete ast.tokens; - - // Make sure every token has a token.value string. - tokens.forEach(function (token) { - if (typeof token.value !== "string") { - token.value = lines.sliceString(token.loc.start, token.loc.end); - } - }); - - if (Array.isArray(ast.comments)) { - comments = ast.comments; - delete ast.comments; - } - - if (ast.loc) { - // If the source was empty, some parsers give loc.{start,end}.line - // values of 0, instead of the minimum of 1. - util.fixFaultyLocations(ast, lines); - } else { - ast.loc = { - start: lines.firstPos(), - end: lines.lastPos(), - }; - } - - ast.loc.lines = lines; - ast.loc.indent = 0; - - let file; - let program; - if (ast.type === "Program") { - program = ast; - // In order to ensure we reprint leading and trailing program - // comments, wrap the original Program node with a File node. Only - // ESTree parsers (Acorn and Esprima) return a Program as the root AST - // node. Most other (Babylon-like) parsers return a File. - file = b.file(ast, options.sourceFileName || null); - file.loc = { - start: lines.firstPos(), - end: lines.lastPos(), - lines: lines, - indent: 0, - } as any; - } else if (ast.type === "File") { - file = ast; - program = file.program; - } - - // Expose file.tokens unless the caller passed false for options.tokens. - if (options.tokens) { - file.tokens = tokens; - } - - // Expand the Program's .loc to include all comments (not just those - // attached to the Program node, as its children may have comments as - // well), since sometimes program.loc.{start,end} will coincide with the - // .loc.{start,end} of the first and last *statements*, mistakenly - // excluding comments that fall outside that region. - const trueProgramLoc: any = util.getTrueLoc( - { - type: program.type, - loc: program.loc, - body: [], - comments, - }, - lines, - ); - program.loc.start = trueProgramLoc.start; - program.loc.end = trueProgramLoc.end; - - // Passing file.program here instead of just file means that initial - // comments will be attached to program.body[0] instead of program. - attachComments(comments, program.body.length ? file.program : file, lines); - - // Return a copy of the original AST so that any changes made may be - // compared to the original. - return new TreeCopier(lines, tokens).copy(file); + let original = options.parser.parse.bind(options.parser); + options.parser.parse = (source: string, options: any) => { + const ast = original(source, options); + // Use ast.tokens if possible, and otherwise fall back to the Esprima + // tokenizer. All the preconfigured ../parsers/* expose ast.tokens + // automatically, but custom parsers might need additional configuration + // to avoid this fallback. + const tokens: any[] = Array.isArray(ast.tokens) + ? ast.tokens + : require("esprima").tokenize(source, { + loc: true, + }); + + ast.tokens = tokens; + + return ast; + }; + return pureParse(source, options); } - -interface TreeCopierType { - lines: any; - tokens: any[]; - startTokenIndex: number; - endTokenIndex: number; - indent: number; - seen: Map; - copy(node: any): any; - findTokenRange(loc: any): any; -} - -interface TreeCopierConstructor { - new (lines: any, tokens: any): TreeCopierType; -} - -const TreeCopier = function TreeCopier( - this: TreeCopierType, - lines: any, - tokens: any, -) { - assert.ok(this instanceof TreeCopier); - this.lines = lines; - this.tokens = tokens; - this.startTokenIndex = 0; - this.endTokenIndex = tokens.length; - this.indent = 0; - this.seen = new Map(); -} as any as TreeCopierConstructor; - -const TCp: TreeCopierType = TreeCopier.prototype; - -TCp.copy = function (node) { - if (this.seen.has(node)) { - return this.seen.get(node); - } - - if (isArray.check(node)) { - const copy: any = new Array(node.length); - this.seen.set(node, copy); - node.forEach(function (this: any, item: any, i: any) { - copy[i] = this.copy(item); - }, this); - return copy; - } - - if (!isObject.check(node)) { - return node; - } - - util.fixFaultyLocations(node, this.lines); - - const copy: any = Object.create(Object.getPrototypeOf(node), { - original: { - // Provide a link from the copy to the original. - value: node, - configurable: false, - enumerable: false, - writable: true, - }, - }); - - this.seen.set(node, copy); - - const loc = node.loc; - const oldIndent = this.indent; - let newIndent = oldIndent; - - const oldStartTokenIndex = this.startTokenIndex; - const oldEndTokenIndex = this.endTokenIndex; - - if (loc) { - // When node is a comment, we set node.loc.indent to - // node.loc.start.column so that, when/if we print the comment by - // itself, we can strip that much whitespace from the left margin of - // the comment. This only really matters for multiline Block comments, - // but it doesn't hurt for Line comments. - if ( - node.type === "Block" || - node.type === "Line" || - node.type === "CommentBlock" || - node.type === "CommentLine" || - this.lines.isPrecededOnlyByWhitespace(loc.start) - ) { - newIndent = this.indent = loc.start.column; - } - - // Every node.loc has a reference to the original source lines as well - // as a complete list of source tokens. - loc.lines = this.lines; - loc.tokens = this.tokens; - loc.indent = newIndent; - - // Set loc.start.token and loc.end.token such that - // loc.tokens.slice(loc.start.token, loc.end.token) returns a list of - // all the tokens that make up this node. - this.findTokenRange(loc); - } - - const keys = Object.keys(node); - const keyCount = keys.length; - for (let i = 0; i < keyCount; ++i) { - const key = keys[i]; - if (key === "loc") { - copy[key] = node[key]; - } else if (key === "tokens" && node.type === "File") { - // Preserve file.tokens (uncopied) in case client code cares about - // it, even though Recast ignores it when reprinting. - copy[key] = node[key]; - } else { - copy[key] = this.copy(node[key]); - } - } - - this.indent = oldIndent; - this.startTokenIndex = oldStartTokenIndex; - this.endTokenIndex = oldEndTokenIndex; - - return copy; -}; - -// If we didn't have any idea where in loc.tokens to look for tokens -// contained by this loc, a binary search would be appropriate, but -// because we maintain this.startTokenIndex and this.endTokenIndex as we -// traverse the AST, we only need to make small (linear) adjustments to -// those indexes with each recursive iteration. -TCp.findTokenRange = function (loc) { - // In the unlikely event that loc.tokens[this.startTokenIndex] starts - // *after* loc.start, we need to rewind this.startTokenIndex first. - while (this.startTokenIndex > 0) { - const token = loc.tokens[this.startTokenIndex]; - if (util.comparePos(loc.start, token.loc.start) < 0) { - --this.startTokenIndex; - } else break; - } - - // In the unlikely event that loc.tokens[this.endTokenIndex - 1] ends - // *before* loc.end, we need to fast-forward this.endTokenIndex first. - while (this.endTokenIndex < loc.tokens.length) { - const token = loc.tokens[this.endTokenIndex]; - if (util.comparePos(token.loc.end, loc.end) < 0) { - ++this.endTokenIndex; - } else break; - } - - // Increment this.startTokenIndex until we've found the first token - // contained by this node. - while (this.startTokenIndex < this.endTokenIndex) { - const token = loc.tokens[this.startTokenIndex]; - if (util.comparePos(token.loc.start, loc.start) < 0) { - ++this.startTokenIndex; - } else break; - } - - // Index into loc.tokens of the first token within this node. - loc.start.token = this.startTokenIndex; - - // Decrement this.endTokenIndex until we've found the first token after - // this node (not contained by the node). - while (this.endTokenIndex > this.startTokenIndex) { - const token = loc.tokens[this.endTokenIndex - 1]; - if (util.comparePos(loc.end, token.loc.end) < 0) { - --this.endTokenIndex; - } else break; - } - - // Index into loc.tokens of the first token *after* this node. - // If loc.start.token === loc.end.token, the node contains no tokens, - // and the index is that of the next token following this node. - loc.end.token = this.endTokenIndex; -}; diff --git a/lib/pure-parser.ts b/lib/pure-parser.ts new file mode 100644 index 000000000..59bbc3e95 --- /dev/null +++ b/lib/pure-parser.ts @@ -0,0 +1,287 @@ +import assert from "assert"; +import * as types from "ast-types"; +const b = types.builders; +const isObject = types.builtInTypes.object; +const isArray = types.builtInTypes.array; +import { normalize as normalizeOptions } from "./options"; +import { fromString } from "./lines"; +import { attach as attachComments } from "./comments"; +import * as util from "./util"; +import { Options } from "./options"; + +export function parse(source: string, options?: Partial) { + options = normalizeOptions(options); + + const lines = fromString(source, options); + + const sourceWithoutTabs = lines.toString({ + tabWidth: options.tabWidth, + reuseWhitespace: false, + useTabs: false, + }); + + let comments: any[] = []; + const ast = options.parser.parse(sourceWithoutTabs, { + jsx: true, + loc: true, + locations: true, + range: options.range, + comment: true, + onComment: comments, + tolerant: util.getOption(options, "tolerant", true), + ecmaVersion: 6, + sourceType: util.getOption(options, "sourceType", "module"), + }); + + const tokens: any[] = ast.tokens; + + // We will reattach the tokens array to the file object below. + delete ast.tokens; + + // Make sure every token has a token.value string. + tokens.forEach(function (token) { + if (typeof token.value !== "string") { + token.value = lines.sliceString(token.loc.start, token.loc.end); + } + }); + + if (Array.isArray(ast.comments)) { + comments = ast.comments; + delete ast.comments; + } + + if (ast.loc) { + // If the source was empty, some parsers give loc.{start,end}.line + // values of 0, instead of the minimum of 1. + util.fixFaultyLocations(ast, lines); + } else { + ast.loc = { + start: lines.firstPos(), + end: lines.lastPos(), + }; + } + + ast.loc.lines = lines; + ast.loc.indent = 0; + + let file; + let program; + if (ast.type === "Program") { + program = ast; + // In order to ensure we reprint leading and trailing program + // comments, wrap the original Program node with a File node. Only + // ESTree parsers (Acorn and Esprima) return a Program as the root AST + // node. Most other (Babylon-like) parsers return a File. + file = b.file(ast, options.sourceFileName || null); + file.loc = { + start: lines.firstPos(), + end: lines.lastPos(), + lines: lines, + indent: 0, + } as any; + } else if (ast.type === "File") { + file = ast; + program = file.program; + } + + // Expose file.tokens unless the caller passed false for options.tokens. + if (options.tokens) { + file.tokens = tokens; + } + + // Expand the Program's .loc to include all comments (not just those + // attached to the Program node, as its children may have comments as + // well), since sometimes program.loc.{start,end} will coincide with the + // .loc.{start,end} of the first and last *statements*, mistakenly + // excluding comments that fall outside that region. + const trueProgramLoc: any = util.getTrueLoc( + { + type: program.type, + loc: program.loc, + body: [], + comments, + }, + lines, + ); + program.loc.start = trueProgramLoc.start; + program.loc.end = trueProgramLoc.end; + + // Passing file.program here instead of just file means that initial + // comments will be attached to program.body[0] instead of program. + attachComments(comments, program.body.length ? file.program : file, lines); + + // Return a copy of the original AST so that any changes made may be + // compared to the original. + return new TreeCopier(lines, tokens).copy(file); +} + +interface TreeCopierType { + lines: any; + tokens: any[]; + startTokenIndex: number; + endTokenIndex: number; + indent: number; + seen: Map; + copy(node: any): any; + findTokenRange(loc: any): any; +} + +interface TreeCopierConstructor { + new (lines: any, tokens: any): TreeCopierType; +} + +const TreeCopier = function TreeCopier( + this: TreeCopierType, + lines: any, + tokens: any, +) { + assert.ok(this instanceof TreeCopier); + this.lines = lines; + this.tokens = tokens; + this.startTokenIndex = 0; + this.endTokenIndex = tokens.length; + this.indent = 0; + this.seen = new Map(); +} as any as TreeCopierConstructor; + +const TCp: TreeCopierType = TreeCopier.prototype; + +TCp.copy = function (node) { + if (this.seen.has(node)) { + return this.seen.get(node); + } + + if (isArray.check(node)) { + const copy: any = new Array(node.length); + this.seen.set(node, copy); + node.forEach(function (this: any, item: any, i: any) { + copy[i] = this.copy(item); + }, this); + return copy; + } + + if (!isObject.check(node)) { + return node; + } + + util.fixFaultyLocations(node, this.lines); + + const copy: any = Object.create(Object.getPrototypeOf(node), { + original: { + // Provide a link from the copy to the original. + value: node, + configurable: false, + enumerable: false, + writable: true, + }, + }); + + this.seen.set(node, copy); + + const loc = node.loc; + const oldIndent = this.indent; + let newIndent = oldIndent; + + const oldStartTokenIndex = this.startTokenIndex; + const oldEndTokenIndex = this.endTokenIndex; + + if (loc) { + // When node is a comment, we set node.loc.indent to + // node.loc.start.column so that, when/if we print the comment by + // itself, we can strip that much whitespace from the left margin of + // the comment. This only really matters for multiline Block comments, + // but it doesn't hurt for Line comments. + if ( + node.type === "Block" || + node.type === "Line" || + node.type === "CommentBlock" || + node.type === "CommentLine" || + this.lines.isPrecededOnlyByWhitespace(loc.start) + ) { + newIndent = this.indent = loc.start.column; + } + + // Every node.loc has a reference to the original source lines as well + // as a complete list of source tokens. + loc.lines = this.lines; + loc.tokens = this.tokens; + loc.indent = newIndent; + + // Set loc.start.token and loc.end.token such that + // loc.tokens.slice(loc.start.token, loc.end.token) returns a list of + // all the tokens that make up this node. + this.findTokenRange(loc); + } + + const keys = Object.keys(node); + const keyCount = keys.length; + for (let i = 0; i < keyCount; ++i) { + const key = keys[i]; + if (key === "loc") { + copy[key] = node[key]; + } else if (key === "tokens" && node.type === "File") { + // Preserve file.tokens (uncopied) in case client code cares about + // it, even though Recast ignores it when reprinting. + copy[key] = node[key]; + } else { + copy[key] = this.copy(node[key]); + } + } + + this.indent = oldIndent; + this.startTokenIndex = oldStartTokenIndex; + this.endTokenIndex = oldEndTokenIndex; + + return copy; +}; + +// If we didn't have any idea where in loc.tokens to look for tokens +// contained by this loc, a binary search would be appropriate, but +// because we maintain this.startTokenIndex and this.endTokenIndex as we +// traverse the AST, we only need to make small (linear) adjustments to +// those indexes with each recursive iteration. +TCp.findTokenRange = function (loc) { + // In the unlikely event that loc.tokens[this.startTokenIndex] starts + // *after* loc.start, we need to rewind this.startTokenIndex first. + while (this.startTokenIndex > 0) { + const token = loc.tokens[this.startTokenIndex]; + if (util.comparePos(loc.start, token.loc.start) < 0) { + --this.startTokenIndex; + } else break; + } + + // In the unlikely event that loc.tokens[this.endTokenIndex - 1] ends + // *before* loc.end, we need to fast-forward this.endTokenIndex first. + while (this.endTokenIndex < loc.tokens.length) { + const token = loc.tokens[this.endTokenIndex]; + if (util.comparePos(token.loc.end, loc.end) < 0) { + ++this.endTokenIndex; + } else break; + } + + // Increment this.startTokenIndex until we've found the first token + // contained by this node. + while (this.startTokenIndex < this.endTokenIndex) { + const token = loc.tokens[this.startTokenIndex]; + if (util.comparePos(token.loc.start, loc.start) < 0) { + ++this.startTokenIndex; + } else break; + } + + // Index into loc.tokens of the first token within this node. + loc.start.token = this.startTokenIndex; + + // Decrement this.endTokenIndex until we've found the first token after + // this node (not contained by the node). + while (this.endTokenIndex > this.startTokenIndex) { + const token = loc.tokens[this.endTokenIndex - 1]; + if (util.comparePos(loc.end, token.loc.end) < 0) { + --this.endTokenIndex; + } else break; + } + + // Index into loc.tokens of the first token *after* this node. + // If loc.start.token === loc.end.token, the node contains no tokens, + // and the index is that of the next token following this node. + loc.end.token = this.endTokenIndex; +}; diff --git a/main.ts b/main.ts index 4d4f7e925..5a339cc3f 100644 --- a/main.ts +++ b/main.ts @@ -1,93 +1,2 @@ -import fs from "fs"; -import * as types from "ast-types"; -import { parse } from "./lib/parser"; -import { Printer } from "./lib/printer"; -import { Options } from "./lib/options"; - -export { - /** - * Parse a string of code into an augmented syntax tree suitable for - * arbitrary modification and reprinting. - */ - parse, - /** - * Convenient shorthand for the ast-types package. - */ - types, -}; - -/** - * Traverse and potentially modify an abstract syntax tree using a - * convenient visitor syntax: - * - * recast.visit(ast, { - * names: [], - * visitIdentifier: function(path) { - * var node = path.value; - * this.visitor.names.push(node.name); - * this.traverse(path); - * } - * }); - */ -export { visit } from "ast-types"; - -/** - * Options shared between parsing and printing. - */ -export { Options } from "./lib/options"; - -/** - * Reprint a modified syntax tree using as much of the original source - * code as possible. - */ -export function print(node: types.ASTNode, options?: Options) { - return new Printer(options).print(node); -} - -/** - * Print without attempting to reuse any original source code. - */ -export function prettyPrint(node: types.ASTNode, options?: Options) { - return new Printer(options).printGenerically(node); -} - -/** - * Convenient command-line interface (see e.g. example/add-braces). - */ -export function run(transformer: Transformer, options?: RunOptions) { - return runFile(process.argv[2], transformer, options); -} - -export interface Transformer { - (ast: types.ASTNode, callback: (ast: types.ASTNode) => void): void; -} - -export interface RunOptions extends Options { - writeback?(code: string): void; -} - -function runFile(path: any, transformer: Transformer, options?: RunOptions) { - fs.readFile(path, "utf-8", function (err, code) { - if (err) { - console.error(err); - return; - } - - runString(code, transformer, options); - }); -} - -function defaultWriteback(output: string) { - process.stdout.write(output); -} - -function runString( - code: string, - transformer: Transformer, - options?: RunOptions, -) { - const writeback = (options && options.writeback) || defaultWriteback; - transformer(parse(code, options), function (node: any) { - writeback(print(node, options).code); - }); -} +export * from "./pure"; +export { parse } from "./lib/parser"; diff --git a/parsers/babel-ts.ts b/parsers/babel-ts.ts index 34c22c4c2..1a38091ca 100644 --- a/parsers/babel-ts.ts +++ b/parsers/babel-ts.ts @@ -1,10 +1,8 @@ -import { parser } from "./babel"; +import { parse as babelParse } from "@babel/parser"; import getBabelOptions, { Overrides } from "./_babel_options"; -export { parser }; - export function parse(source: string, options?: Overrides) { const babelOptions = getBabelOptions(options); babelOptions.plugins.push("jsx", "typescript"); - return parser.parse(source, babelOptions); + return babelParse(source, babelOptions); } diff --git a/parsers/babel.ts b/parsers/babel.ts index 25b134ea0..e2685b0a4 100644 --- a/parsers/babel.ts +++ b/parsers/babel.ts @@ -1,18 +1,6 @@ import { parse as babelParse } from "@babel/parser"; import getBabelOptions, { Overrides } from "./_babel_options"; -type BabelParser = { parse: typeof babelParse }; - -// Prefer the new @babel/parser package, but fall back to babylon if -// that's what's available. -export const parser = (function (): BabelParser { - try { - return require("@babel/parser"); - } catch (e) { - return require("babylon"); - } -})(); - // This module is suitable for passing as options.parser when calling // recast.parse to process JavaScript code with Babel: // @@ -23,5 +11,5 @@ export const parser = (function (): BabelParser { export function parse(source: string, options?: Overrides) { const babelOptions = getBabelOptions(options); babelOptions.plugins.push("jsx", "flow"); - return parser.parse(source, babelOptions); + return babelParse(source, babelOptions); } diff --git a/parsers/babylon.ts b/parsers/babylon.ts index 60fce87ff..30e282e67 100644 --- a/parsers/babylon.ts +++ b/parsers/babylon.ts @@ -1 +1,14 @@ -export * from "./babel"; +import getBabelOptions, { Overrides } from "./_babel_options"; + +// This module is suitable for passing as options.parser when calling +// recast.parse to process JavaScript code with Babel: +// +// const ast = recast.parse(source, { +// parser: require("recast/parsers/babel") +// }); +// +export function parse(source: string, options?: Overrides) { + const babelOptions = getBabelOptions(options); + babelOptions.plugins.push("jsx", "flow"); + return require("babylon").parse(source, babelOptions); +} diff --git a/parsers/flow.ts b/parsers/flow.ts index 771c6cedf..520df4b68 100644 --- a/parsers/flow.ts +++ b/parsers/flow.ts @@ -1,4 +1,4 @@ -import { parser } from "./babel"; +import { parse as babelParse } from "@babel/parser"; import getBabelOptions, { Overrides } from "./_babel_options"; // This module is suitable for passing as options.parser when calling @@ -11,5 +11,5 @@ import getBabelOptions, { Overrides } from "./_babel_options"; export function parse(source: string, options?: Overrides) { const babelOptions = getBabelOptions(options); babelOptions.plugins.push("jsx", "flow"); - return parser.parse(source, babelOptions); + return babelParse(source, babelOptions); } diff --git a/parsers/typescript.ts b/parsers/typescript.ts index c258ce93e..a7b073e5f 100644 --- a/parsers/typescript.ts +++ b/parsers/typescript.ts @@ -1,4 +1,4 @@ -import { parser } from "./babel"; +import { parse as babelParse } from "@babel/parser"; import getBabelOptions, { Overrides } from "./_babel_options"; // This module is suitable for passing as options.parser when calling @@ -11,5 +11,5 @@ import getBabelOptions, { Overrides } from "./_babel_options"; export function parse(source: string, options?: Overrides) { const babelOptions = getBabelOptions(options); babelOptions.plugins.push("typescript"); - return parser.parse(source, babelOptions); + return babelParse(source, babelOptions); } diff --git a/pure.ts b/pure.ts new file mode 100644 index 000000000..da93511c6 --- /dev/null +++ b/pure.ts @@ -0,0 +1,93 @@ +import fs from "fs"; +import * as types from "ast-types"; +import { parse } from "./lib/pure-parser"; +import { Printer } from "./lib/printer"; +import { Options } from "./lib/options"; + +export { + /** + * Parse a string of code into an augmented syntax tree suitable for + * arbitrary modification and reprinting. + */ + parse, + /** + * Convenient shorthand for the ast-types package. + */ + types, +}; + +/** + * Traverse and potentially modify an abstract syntax tree using a + * convenient visitor syntax: + * + * recast.visit(ast, { + * names: [], + * visitIdentifier: function(path) { + * var node = path.value; + * this.visitor.names.push(node.name); + * this.traverse(path); + * } + * }); + */ +export { visit } from "ast-types"; + +/** + * Options shared between parsing and printing. + */ +export { Options } from "./lib/options"; + +/** + * Reprint a modified syntax tree using as much of the original source + * code as possible. + */ +export function print(node: types.ASTNode, options?: Options) { + return new Printer(options).print(node); +} + +/** + * Print without attempting to reuse any original source code. + */ +export function prettyPrint(node: types.ASTNode, options?: Options) { + return new Printer(options).printGenerically(node); +} + +/** + * Convenient command-line interface (see e.g. example/add-braces). + */ +export function run(transformer: Transformer, options?: RunOptions) { + return runFile(process.argv[2], transformer, options); +} + +export interface Transformer { + (ast: types.ASTNode, callback: (ast: types.ASTNode) => void): void; +} + +export interface RunOptions extends Options { + writeback?(code: string): void; +} + +function runFile(path: any, transformer: Transformer, options?: RunOptions) { + fs.readFile(path, "utf-8", function (err, code) { + if (err) { + console.error(err); + return; + } + + runString(code, transformer, options); + }); +} + +function defaultWriteback(output: string) { + process.stdout.write(output); +} + +function runString( + code: string, + transformer: Transformer, + options?: RunOptions, +) { + const writeback = (options && options.writeback) || defaultWriteback; + transformer(parse(code, options), function (node: any) { + writeback(print(node, options).code); + }); +} diff --git a/test/printer.ts b/test/printer.ts index a2934dde4..fd7d21dc4 100644 --- a/test/printer.ts +++ b/test/printer.ts @@ -1341,7 +1341,7 @@ describe("printer", function () { const lines = fromString(code); const ast = parse(code, { - esprima: { + parser: { parse: function (source: string, options?: any) { const program = require("esprima").parse(source, options); n.Program.assert(program);