Thought about bringing some more commands over but too complex.

main
Zed A. Shaw 10 months ago
parent 59843f5dd2
commit c8d47a4be2
  1. 345
      commands/codedocs.js
  2. 105
      commands/coverage.js

@ -1,345 +0,0 @@
// you may not need all of these but they come up a lot
import fs from "fs";
import assert from "assert";
import logging from '../lib/logging.js';
import { mkdir_to, glob } from "../lib/builderator.js";
import { create_renderer } from "../lib/docgen.js";
import path from "path";
import slugify from "slugify";
import * as acorn from "acorn";
import * as acorn_walk from "acorn-walk";
const log = logging.create(import.meta.url);
export const description = "Describe your command here."
// your command uses the npm package commander's options format
export const options = [
["--quiet", "Don't output the stats at the end."],
["--readme", "README file to use as the initial page", "README.md"]
]
// example of a positional argument, it's the 1st argument to main
export const argument = ["<source...>", "source input globs"];
// put required options in the required variable
export const required = [
["--output <string>", "Save to file rather than stdout."],
]
const RENDERER = create_renderer();
const CAPS_WORDS = ["BUG", "TODO", "WARNING", "FOOTGUN", "DEPRECATED"];
const STATS = {total: 0, docs: 0, undoc: 0};
const slug = (instring) => slugify(instring, { lower: true, strict: true, trim: true});
/*
Strips 1 leading space from the comments, or the \s\* combinations
in traditional documentation comments.
If we strip more it'll mess up formatting in markdown for indentation
formats. Weirdly Remarkable seems to be able to handle leading spaces pretty
well so only need to remove one space or \s\* combinations like with
traditional comment docs.
*/
const render_comment = (comment) => {
const lines = comment.split(/\n/).map(l => l.replace(/^(\s*\*\s?|\s)/, ''));
return RENDERER.render(lines.join("\n"));
}
/* Handy function for checking things are good and aborting. */
const check = (test, fail_message) => {
if(!test) {
log.error(fail_message);
process.exit(1);
}
}
const dump = (obj) => {
return JSON.stringify(obj, null, 4);
}
class ParseWalker {
constructor(comments, code) {
this.comments = comments;
this.exported = [];
this.code = code;
}
handle_class(root) {
const new_class = {
isa: "class",
slug: slug(root.declaration.id.name),
name: root.declaration.id.name,
line_start: root.loc.start.line,
methods: [],
}
acorn_walk.simple(root, {
ClassDeclaration: (cls_node) => {
assert(cls_node.id.name === new_class.name, "Name of class changed!");
new_class.range = [cls_node.start, cls_node.body.start];
this.add_export(cls_node.id, new_class);
},
MethodDefinition: (meth_node) => {
const new_method = {
isa: "method",
static: meth_node.static,
async: meth_node.value.async,
generator: meth_node.value.generator,
slug: slug(`${new_class.name}-${meth_node.key.name}`),
name: meth_node.key.name,
line_start: meth_node.loc.start.line,
range: [meth_node.start, meth_node.value.body.start],
params: this.handle_params(meth_node.value.params),
comment: this.find_comment(meth_node.loc.start.line),
}
this.has_CAPS(new_method);
new_method.code = this.slice_code(new_method.range);
this.update_stats(new_method); // methods can't go through add_export
new_class.methods.push(new_method);
}
});
}
handle_params(param_list) {
const result = [];
for(let param of param_list) {
acorn_walk.simple(param, {
Identifier: (_node) => {
result.push({isa: "identifier", name: _node.name});
},
AssignmentPattern: (_node) => {
result.push({
isa: "assignment",
name: _node.left.name,
right: {
type: _node.right.type.toLowerCase(),
raw: _node.right.raw,
value: _node.right.value,
name: _node.right.name,
},
});
}
});
}
return result;
}
/*
Used to add information when something is mentioned in the
comment like BUG, TODO, etc.
*/
has_CAPS(exp) {
if(exp.comment) {
exp.caps = CAPS_WORDS.filter(phrase => exp.comment.includes(phrase));
} else {
exp.caps = [];
}
}
update_stats(exp) {
STATS.total += 1;
if(exp.comment) {
STATS.docs += 1;
} else {
STATS.undoc += 1;
}
}
add_export(id, exp) {
exp.name = id.name;
exp.slug = exp.slug ? exp.slug : slug(id.name);
exp.line_start = id.loc.start.line;
exp.comment = this.find_comment(exp.line_start);
this.has_CAPS(exp);
exp.code = this.slice_code(exp.range);
this.exported.push(exp);
this.update_stats(exp);
}
slice_code(range) {
return this.code.slice(range[0], range[1]);
}
handle_arrow_func(id, arrow) {
this.add_export(id, {
isa: "function",
async: arrow.async,
generator: arrow.generator,
expression: arrow.expression,
range: [id.start, arrow.body.start],
params: this.handle_params(arrow.params),
});
}
handle_variable(root) {
const declare = root.declaration.declarations[0];
const id = declare.id;
const _node = declare.init;
const init_is = declare.init.type;
if(init_is === "ArrowFunctionExpression") {
this.handle_arrow_func(id, declare.init);
} else {
this.add_export(id, {
isa: _node.type.toLowerCase(),
value: _node.value,
range: declare.range,
raw: _node.raw
});
}
}
/*
Find the nearest comment to this line, giving
about 2 lines of slack.
*/
find_comment(line) {
for(let c of this.comments) {
const distance = c.end - line;
if(!c.found && distance == -1) {
c.found = true;
return render_comment(c.value);
}
}
return undefined;
}
/*
Returns the first comment as the file's main doc comment, or undefined if there isn't one.
*/
file_comment() {
const comment = this.comments[0];
if(comment && comment.start === 1) {
// kind of a hack, but find_comment will find this now
return this.find_comment(comment.end + 1);
} else {
return undefined;
}
}
handle_export(_node) {
switch(_node.declaration.type) {
case "ClassDeclaration":
this.handle_class(_node);
break;
case "VariableDeclaration": {
this.handle_variable(_node);
break;
}
default:
console.log(">>>", _node.declaration.type);
}
}
}
const parse_source = (source) => {
const code = fs.readFileSync(source);
let comments = [];
const acorn_opts = {
sourceType: "module",
ecmaVersion: "2023",
locations: true,
sourceFile: source,
ranges: true,
onComment: comments
}
const parsed = acorn.parse(code, acorn_opts);
comments = comments.filter(c => c.type === "Block").map(c => {
return {
start: c.loc.start.line,
end: c.loc.end.line,
value: c.value,
type: "comment",
found: false,
}
});
const walker = new ParseWalker(comments, code.toString());
// acorn is stupid and they grab a reference to the functions so that _removes_
// this from the object, instead of just...calling walker.function() like a normal person
acorn_walk.simple(parsed, {
ExportNamedDeclaration: (_node) => walker.handle_export(_node),
});
let comment = walker.file_comment();
return {
// normalize to / even on windows
source: source.replaceAll("\\", "/"),
// find the first comment for the file's comment
comment,
exports: walker.exported,
orphan_comments: walker.comments.filter(c => !c.found)
};
}
const normalize_name = (fname) => {
const no_slash = fname.replaceAll("\\", "/");
if(fname.startsWith("./")) {
return no_slash.slice(2);
} else if(fname.startsWith("/")) {
return no_slash.slice(1);
} else {
return no_slash;
}
}
export const main = async (source_globs, opts) => {
const index = {};
mkdir_to(opts.output);
for(let source of source_globs) {
const source_list = glob(source);
for(let fname of source_list) {
const result = parse_source(fname);
const target = `${path.join(opts.output, fname)}.json`;
mkdir_to(target);
fs.writeFileSync(target, dump(result));
const name = normalize_name(fname);
index[name] = result.exports.map(e => {
return {isa: e.isa, name: e.name};
});
}
}
// now write the grand index
const index_name = path.join(opts.output, "index.json");
fs.writeFileSync(index_name, dump(index));
// render the README.md to the initial docs
const readme_name = path.join(opts.output, "index.html");
const md_out = RENDERER.render(fs.readFileSync(opts.readme).toString());
fs.writeFileSync(readme_name, md_out);
const percent = Math.floor(100 * STATS.docs / STATS.total);
if(!opts.quiet) {
console.log(`Total ${STATS.total}, ${percent}% documented (${STATS.docs} docs vs. ${STATS.undoc} no docs).`);
}
process.exit(0);
}

@ -1,105 +0,0 @@
import libCoverage from 'istanbul-lib-coverage';
import libReport from 'istanbul-lib-report';
import reports from 'istanbul-reports';
import { glob } from "../lib/builderator.js";
import fs from "fs";
import v8toIstanbul from 'v8-to-istanbul';
import assert from "assert";
import url from "url";
import normalize from "normalize-path";
export const description = "Takes the output of a nv8 coverage directory and generates a report.";
export const argument = ["<coverage_dir>", "coverage directory"];
export const main = async (coverage_dir) => {
const covdir = normalize(coverage_dir);
const covpattern = `${covdir}/**/*.json`;
console.log(`Searching ${covpattern} for coverage files...`);
const covfiles = glob(covpattern);
console.log(`Found ${covfiles.length} .json files in ${covdir}`);
const coverage = {};
const excludes = [
"node_modules",
"secrets",
"/net"
];
for(const fname of covfiles) {
const data = JSON.parse(fs.readFileSync(fname));
// test removing just node modules
data.result = data.result.filter(x => {
if(x.url) {
// we need to do surgery on the URL because node is bad at them
let pathname = url.parse(x.url).pathname;
// fix the URL and turn it into a file name
if(!pathname) {
return false;
} else if(pathname.startsWith("/C:")) {
// why does url not parse windows paths right?
// remove the leading / so it's parsed correctly
x.url = pathname.slice(1);
} else {
x.url = pathname;
}
const excluded = excludes.filter(e => x.url.includes(e));
return excluded.length === 0 && fs.existsSync(x.url);
} else {
return false;
}
});
// looks good, save it
if(data.result.length > 0) {
coverage[fname] = data;
}
}
const coverageMap = libCoverage.createCoverageMap();
console.log("After filtering, found count is:", Object.entries(coverage).length);
for(const [fname, data] of Object.entries(coverage)) {
for(const entry of data.result) {
let converter;
const pathname = url.parse(entry.url).pathname
assert(fs.existsSync(pathname), `coverage entry in ${fname} contains ${entry.url} that doesn't exist but should`);
converter = v8toIstanbul(pathname, 0, {source: entry.source}, path => {
const excluded = excludes.filter(e => path.includes(e));
return excluded.length > 0;
});
try {
await converter.load();
converter.applyCoverage(entry.functions);
coverageMap.merge(converter.toIstanbul());
} catch(error) {
console.error(error, "load", entry.url);
}
}
}
const watermarks = undefined; // used in check coverage ignored here
const context = libReport.createContext({
dir: "coverage",
watermarks,
coverageMap
});
["text","html"].forEach(format => {
reports.create(format, {
skipEmpty: false,
skipFull: true,
maxCols: 100
}).execute(context);
});
process.exit(0);
}
Loading…
Cancel
Save