marked/docs/USING_PRO.md

294 lines
6.0 KiB
Markdown
Raw Normal View History

2018-02-25 16:37:11 -05:00
## Extending Marked
2018-02-25 16:01:22 -05:00
2019-07-08 09:13:53 -05:00
To champion the single-responsibility and open/closed principles, we have tried to make it relatively painless to extend marked. If you are looking to add custom functionality, this is the place to start.
2018-02-25 16:01:22 -05:00
2018-02-25 21:56:00 -05:00
<h2 id="renderer">The renderer</h2>
2018-02-25 16:01:22 -05:00
2020-04-01 21:08:44 -05:00
The renderer defines the output of the parser.
2018-02-25 16:37:11 -05:00
**Example:** Overriding default heading token by adding an embedded anchor tag like on GitHub.
```js
// Create reference instance
2019-07-08 09:13:53 -05:00
const marked = require('marked');
2018-02-25 16:37:11 -05:00
// Get reference
2019-07-08 09:13:53 -05:00
const renderer = new marked.Renderer();
2018-02-25 16:37:11 -05:00
// Override function
2020-04-07 11:43:11 -05:00
renderer.heading = function(text, level) {
2019-07-08 09:13:53 -05:00
const escapedText = text.toLowerCase().replace(/[^\w]+/g, '-');
2018-02-25 16:01:22 -05:00
return `
<h${level}>
<a name="${escapedText}" class="anchor" href="#${escapedText}">
2018-02-25 22:06:18 -05:00
<span class="header-link"></span>
</a>
${text}
</h${level}>`;
2018-02-25 16:01:22 -05:00
};
2018-02-25 16:37:11 -05:00
// Run marked
2020-04-02 01:05:04 -05:00
console.log(marked('# heading+', { renderer }));
2018-02-25 16:01:22 -05:00
```
2018-02-25 16:37:11 -05:00
**Output:**
2018-02-25 16:01:22 -05:00
```html
<h1>
<a name="heading-" class="anchor" href="#heading-">
<span class="header-link"></span>
</a>
heading+
</h1>
```
2018-02-25 16:37:11 -05:00
### Block level renderer methods
2018-02-25 16:01:22 -05:00
- code(*string* code, *string* infostring, *boolean* escaped)
2018-02-25 16:01:22 -05:00
- blockquote(*string* quote)
- html(*string* html)
- heading(*string* text, *number* level, *string* raw, *Slugger* slugger)
2018-02-25 16:01:22 -05:00
- hr()
- list(*string* body, *boolean* ordered, *number* start)
2019-03-08 17:28:01 -06:00
- listitem(*string* text, *boolean* task, *boolean* checked)
- checkbox(*boolean* checked)
2018-02-25 16:01:22 -05:00
- paragraph(*string* text)
- table(*string* header, *string* body)
- tablerow(*string* content)
- tablecell(*string* content, *object* flags)
2020-04-07 11:43:11 -05:00
`slugger` has the `slug` method to create a unique id from value:
```js
slugger.slug('foo') // foo
slugger.slug('foo') // foo-1
slugger.slug('foo') // foo-2
slugger.slug('foo 1') // foo-1-1
slugger.slug('foo-1') // foo-1-2
...
```
2018-02-25 16:01:22 -05:00
`flags` has the following properties:
```js
{
header: true || false,
align: 'center' || 'left' || 'right'
}
```
2018-02-25 16:37:11 -05:00
### Inline level renderer methods
2018-02-25 16:01:22 -05:00
- strong(*string* text)
- em(*string* text)
- codespan(*string* code)
- br()
- del(*string* text)
- link(*string* href, *string* title, *string* text)
- image(*string* href, *string* title, *string* text)
- text(*string* text)
2020-04-07 11:43:11 -05:00
<h2 id="tokenizer">The tokenizer</h2>
The tokenizer defines how to turn markdown text into tokens.
**Example:** Overriding default `codespan` tokenizer to include latex.
```js
// Create reference instance
const marked = require('marked');
// Get reference
const tokenizer = new marked.Tokenizer();
const originalCodespan = tokenizer.codespan;
// Override function
tokenizer.codespan = function(lexer, src) {
const match = src.match(/\$+([^\$\n]+?)\$+/);
if (match) {
return {
type: 'codespan',
raw: match[0],
text: match[1].trim()
};
}
return originalCodespan.apply(this, arguments);
};
// Run marked
console.log(marked('$ latext code $', { tokenizer }));
```
**Output:**
```html
<p><code>latext code</code></p>
```
### Block level tokenizer methods
- space(*string* src)
- code(*string* src, *array* tokens)
- fences(*string* src)
- heading(*string* src)
- nptable(*string* src)
- hr(*string* src)
- blockquote(*string* src)
- list(*string* src)
- html(*string* src)
- def(*string* src)
- table(*string* src)
- lheading(*string* src)
- paragraph(*string* src)
- text(*string* src)
2020-04-07 11:43:11 -05:00
### Inline level tokenizer methods
- escape(*string* src)
- tag(*string* src, *bool* inLink, *bool* inRawBlock)
- link(*string* src)
- reflink(*string* src, *object* links)
- strong(*string* src)
- em(*string* src)
- codespan(*string* src)
- br(*string* src)
- del(*string* src)
- autolink(*string* src)
- url(*string* src)
- inlineText(*string* src, *bool* inRawBlock)
2020-04-07 11:43:11 -05:00
### Other tokenizer methods
- smartypants(*string* text)
- mangle(*string* text)
2018-02-25 16:37:11 -05:00
<h2 id="lexer">The lexer</h2>
2020-04-07 11:43:11 -05:00
The lexer takes a markdown string and calls the tokenizer functions.
2018-02-25 16:37:11 -05:00
<h2 id="parser">The parser</h2>
2020-04-02 01:05:04 -05:00
The parser takes tokens as input and calls the renderer functions.
2018-02-25 16:37:11 -05:00
***
2018-02-25 16:01:22 -05:00
<h2 id="extend">Access to lexer and parser</h2>
2020-04-02 01:05:04 -05:00
You also have direct access to the lexer and parser if you so desire.
2018-02-25 16:01:22 -05:00
``` js
2020-04-02 01:05:04 -05:00
const tokens = marked.lexer(markdown, options);
2019-11-06 16:03:39 -06:00
console.log(marked.parser(tokens, options));
2018-02-25 16:01:22 -05:00
```
``` js
2019-07-08 09:13:53 -05:00
const lexer = new marked.Lexer(options);
2020-04-02 01:05:04 -05:00
const tokens = lexer.lex(markdown);
2018-02-25 16:01:22 -05:00
console.log(tokens);
2020-04-02 01:05:04 -05:00
console.log(lexer.rules.block); // block level rules
console.log(lexer.rules.inline); // inline level rules
2018-02-25 16:01:22 -05:00
```
``` bash
$ node
2020-04-02 01:05:04 -05:00
> require('marked').lexer('> I am using marked.')
[
{
type: "blockquote",
raw: "> I am using marked.",
tokens: [
{
type: "paragraph",
raw: "I am using marked.",
text: "I am using marked.",
tokens: [
{
type: "text",
raw: "I am using marked.",
text: "I am using marked."
}
]
}
]
},
links: {}
]
```
2020-04-02 01:05:04 -05:00
The Lexer builds an array of tokens, which will be passed to the Parser.
The Parser processes each token in the token array:
``` js
const marked = require('marked');
const md = `
# heading
[link][1]
[1]: #heading "heading"
`;
2020-04-02 01:05:04 -05:00
const tokens = marked.lexer(md);
console.log(tokens);
const html = marked.parser(tokens);
console.log(html);
```
``` bash
2020-04-02 01:05:04 -05:00
[
{
type: "heading",
raw: " # heading\n\n",
depth: 1,
text: "heading",
tokens: [
{
type: "text",
raw: "heading",
text: "heading"
}
]
},
{
type: "paragraph",
raw: " [link][1]",
text: " [link][1]",
tokens: [
{
type: "text",
raw: " ",
text: " "
},
{
type: "link",
raw: "[link][1]",
text: "link",
href: "#heading",
title: "heading",
tokens: [
{
type: "text",
raw: "link",
text: "link"
}
]
}
]
},
{
type: "space",
raw: "\n\n"
},
links: {
"1": {
href: "#heading",
title: "heading"
}
}
]
<h1 id="heading">heading</h1>
<p> <a href="#heading" title="heading">link</a></p>
```