docco.coffeesrc/ | |
---|---|
Docco is a quick-and-dirty, hundred-line-long, literate-programming-style documentation generator. It produces HTML that displays your comments alongside your code. Comments are passed through Markdown, and code is passed through Pygments syntax highlighting. This page is the result of running Docco against its own source file. If you install Docco, you can run it from the command-line:
...will generate an HTML documentation page for each of the named source files,
with a menu linking to the other pages, saving it into a The source for Docco is available on GitHub, and released under the MIT license. To install Docco, first make sure you have Node.js, Pygments (install the latest dev version of Pygments from its Mercurial repo), and CoffeeScript. Then, with NPM:
Docco can be used to process CoffeeScript, JavaScript, Ruby or Python files. Only single-line comments are processed -- block comments are ignored. Partners in Crime:
| |
Main Documentation Generation Functions | |
Generate the documentation for a source file by reading it in, splitting it up into comment/code sections, highlighting them for the appropriate language, and merging them into an HTML template. | generate_documentation = (source, context, callback) ->
fs.readFile source, "utf-8", (error, code) ->
throw error if error
sections = parse source, code
highlight source, sections, ->
generate_html source, context, sections
callback() |
Given a string of source code, parse out each comment and the code that follows it, and create an individual section for it. Sections take the form:
| parse = (source, code) ->
lines = code.split '\n'
sections = []
language = get_language source
has_code = docs_text = code_text = ''
in_multi = false
multi_accum = ""
save = (docs, code) ->
sections.push docs_text: docs, code_text: code
for line in lines
if line.match(language.multi_start_matcher) or in_multi
if has_code
save docs_text, code_text
has_code = docs_text = code_text = '' |
Found the start of a multiline comment line, set in_multi to true and begin accumulating lines untime we reach a line that finishes the multiline comment | in_multi = true
multi_accum += line + '\n' |
If we reached the end of a multiline comment, template the result and set inmulti to false, reset multiaccum | if line.match(language.multi_end_matcher)
in_multi = false
try
parsed = dox.parseComments( multi_accum )[0]
docs_text += dox_template(parsed);
catch error
console.log "Error parsing comments with Dox: #{error}"
docs_text = multi_accum
multi_accum = ''
else if line.match(language.comment_matcher) and not line.match(language.comment_filter)
if has_code
save docs_text, code_text
has_code = docs_text = code_text = ''
docs_text += line.replace(language.comment_matcher, '') + '\n'
else
has_code = yes
code_text += line + '\n'
save docs_text, code_text
sections |
Highlights a single chunk of CoffeeScript code, using Pygments over stdio, and runs the text of its corresponding comment through Markdown, using Showdown.js. We process the entire file in a single call to Pygments by inserting little marker comments between each section and then splitting the result string wherever our markers occur. | highlight = (source, sections, callback) ->
language = get_language source
pygments = spawn 'pygmentize', ['-l', language.name, '-f', 'html', '-O', 'encoding=utf-8,tabsize=2']
output = ''
pygments.stderr.addListener 'data', (error) ->
console.error error.toString() if error
pygments.stdin.addListener 'error', (error) ->
console.error "Could not use Pygments to highlight the source."
process.exit 1
pygments.stdout.addListener 'data', (result) ->
output += result if result
pygments.addListener 'exit', ->
output = output.replace(highlight_start, '').replace(highlight_end, '')
fragments = output.split language.divider_html
for section, i in sections
section.code_html = highlight_start + fragments[i] + highlight_end
section.docs_html = showdown.makeHtml section.docs_text
callback()
if pygments.stdin.writable
pygments.stdin.write((section.code_text for section in sections).join(language.divider_text))
pygments.stdin.end()
|
Once all of the code is finished highlighting, we can generate the HTML file
and write out the documentation. Pass the completed sections into the template
found in | generate_html = (source, context, sections) ->
title = path.basename source
dest = destination source, context
html = docco_template {
title: title, file_path: source, sections: sections, context: context, path: path, relative_base: relative_base
} |
Generate the file's base dir as required | target_dir = path.dirname(dest)
write_func = ->
console.log "docco: #{source} -> #{dest}"
fs.writeFile dest, html, (err) -> throw err if err
fs.stat target_dir, (err, stats) ->
throw err if err and err.code != 'ENOENT'
return write_func() unless err
if err
exec "mkdir -p #{target_dir}", (err) ->
throw err if err
write_func()
generate_readme = (context, sources) ->
title = "README"
dest = "docs/readme.html"
source = "README.md" |
README.md template to be use to generate the main REAME file | readme_template = jade.compile fs.readFileSync(__dirname + '/../resources/readme.jade').toString(), { filename: __dirname + '/../resources/readme.jade' }
readme_path = process.cwd() + '/README.md'
readme_markdown = if file_exists(readme_path) then fs.readFileSync(readme_path).toString() else "There is no README.md for this project yet :( "
package_path = process.cwd() + '/package.json'
package_json = if file_exists(package_path) then JSON.parse(fs.readFileSync(package_path).toString()) else {}
content = showdown.makeHtml readme_markdown
cloc sources.join(" "), (code_stats) ->
html = readme_template {
title: title, context: context, content: content, file_path: source, path: path, relative_base: relative_base, package_json: package_json, code_stats: code_stats, gravatar: gravatar
} |
Generate the file's base dir as required | target_dir = path.dirname(dest)
write_func = ->
console.log "docco: #{source} -> #{dest}"
fs.writeFile dest, html, (err) -> throw err if err
fs.stat target_dir, (err, stats) ->
throw err if err and err.code != 'ENOENT'
return write_func() unless err
if err
exec "mkdir -p #{target_dir}", (err) ->
throw err if err
write_func()
cloc = (paths, callback) ->
exec "#{__dirname}/../vendor/cloc.pl --quiet --read-lang-def=#{__dirname}/../resources/cloc_definitions.txt #{paths}", (err, stdout) ->
console.log "Calculating project stats failed #{err}" if err
callback stdout |
Helpers & Setup | |
Require our external dependencies, including Showdown.js (the JavaScript implementation of Markdown). | fs = require 'fs'
path = require 'path'
showdown = require('./../vendor/showdown').Showdown
jade = require 'jade'
dox = require 'dox'
gravatar = require 'gravatar'
{spawn, exec} = require 'child_process' |
A list of the languages that Docco supports, mapping the file extension to the name of the Pygments lexer and the symbol that indicates a comment. To add another language to Docco's repertoire, add it here. | languages =
'.coffee':
name: 'coffee-script', symbol: '#'
'.js':
name: 'javascript', symbol: '//', multi_start: "/*", multi_end: "*/"
'.rb':
name: 'ruby', symbol: '#'
'.py':
name: 'python', symbol: '#'
'.java':
name: 'java', symbol: '//', multi_start: "/*", multi_end: "*/" |
Build out the appropriate matchers and delimiters for each language. | for ext, l of languages |
Does the line begin with a comment? | l.comment_matcher = new RegExp('^\\s*' + l.symbol + '\\s?') |
Ignore hashbangs) and interpolations... | l.comment_filter = new RegExp('(^#![/]|^\\s*#\\{)') |
The dividing token we feed into Pygments, to delimit the boundaries between sections. | l.divider_text = '\n' + l.symbol + 'DIVIDER\n' |
The mirror of | l.divider_html = new RegExp('\\n*<span class="c1?">' + l.symbol + 'DIVIDER<\\/span>\\n*') |
Since we'll only handle /* */ multilin comments for now, test for them explicitly Otherwise set the multi matchers to an unmatchable RegEx | if l.multi_start == "/*"
l.multi_start_matcher = new RegExp(/^[\s]*\/\*[.]*/)
else
l.multi_start_matcher = new RegExp(/a^/)
if l.multi_end == "*/"
l.multi_end_matcher = new RegExp(/.*\*\/.*/)
else
l.multi_end_matcher = new RegExp(/a^/) |
Get the current language we're documenting, based on the extension. | get_language = (source) -> languages[path.extname(source)] |
Compute the path of a source file relative to the docs folder | relative_base = (filepath, context) ->
result = path.dirname(filepath) + '/'
if result == '/' then '' else result |
Compute the destination HTML path for an input source file path. If the source
is | destination = (filepath, context) ->
base_path = relative_base filepath, context
'docs/' + base_path + path.basename(filepath, path.extname(filepath)) + '.html' |
Ensure that the destination directory exists. | ensure_directory = (dir, callback) ->
exec "mkdir -p #{dir}", -> callback()
file_exists = (path) ->
try
return fs.lstatSync(path).isFile
catch ex
return false |
Create the template that we will use to generate the Docco HTML page. | docco_template = jade.compile fs.readFileSync(__dirname + '/../resources/docco.jade').toString(), { filename: __dirname + '/../resources/docco.jade' }
dox_template = jade.compile fs.readFileSync(__dirname + '/../resources/dox.jade').toString(), { filename: __dirname + '/../resources/dox.jade' } |
The CSS styles we'd like to apply to the documentation. | docco_styles = fs.readFileSync(__dirname + '/../resources/docco.css').toString() |
The start of each Pygments highlight block. | highlight_start = '<div class="highlight"><pre>' |
The end of each Pygments highlight block. | highlight_end = '</pre></div>' |
Process our arguments, passing an array of sources to generate docs for, and an optional relative root. | parse_args = (callback) ->
args = process.ARGV
project_name = "" |
Optional Project name following -name option | if args[0] == "-name"
args.shift()
project_name = args.shift() |
Sort the list of files and directories | args = args.sort() |
Preserving past behavior: if no args are given, we do nothing (eventually display help?) | return unless args.length |
Collect all of the directories or file paths to then pass onto the 'find' command | roots = (a.replace(/\/+$/, '') for a in args)
roots = roots.join(" ")
|
Only include files that we know how to handle | lang_filter = for ext of languages
" -name '*#{ext}' "
lang_filter = lang_filter.join ' -o ' |
Rather than deal with building a recursive tree walker via the fs module, let's save ourselves typing and testing and drop to the shell | exec "find #{roots} -type f \\( #{lang_filter} \\)", (err, stdout) ->
throw err if err |
Don't include hidden files, either | sources = stdout.split("\n").filter (file) -> file != '' and path.basename(file)[0] != '.'
console.log "docco: Recursively generating docs underneath #{roots}/"
callback(sources, project_name, args)
parse_args (sources, project_name, raw_paths) -> |
Rather than relying on globals, let's pass around a context w/ misc info that we require down the line. | context = sources: sources, project_name: project_name
ensure_directory 'docs', ->
fs.writeFile 'docs/docco.css', docco_styles
files = sources[0..sources.length]
next_file = -> generate_documentation files.shift(), context, next_file if files.length
next_file()
generate_readme(context, raw_paths)
|