2019-11-07 12:49:10 -06:00
/ * *
2023-08-15 00:18:42 +00:00
* marked v7 . 0.3 - a markdown parser
2023-01-13 21:27:58 -05:00
* Copyright ( c ) 2011 - 2023 , Christopher Jeffrey . ( MIT Licensed )
2019-11-07 12:49:10 -06:00
* https : //github.com/markedjs/marked
* /
/ * *
* DO NOT EDIT THIS FILE
* The code in this file is generated from files in . / src /
* /
2023-08-07 16:50:43 -06:00
/ * *
* Gets the original marked default options .
* /
2023-07-29 08:31:34 +02:00
function _getDefaults ( ) {
2023-08-07 16:50:43 -06:00
return {
async : false ,
baseUrl : null ,
breaks : false ,
extensions : null ,
gfm : true ,
headerIds : false ,
headerPrefix : '' ,
highlight : null ,
hooks : null ,
langPrefix : 'language-' ,
mangle : false ,
pedantic : false ,
renderer : null ,
sanitize : false ,
sanitizer : null ,
silent : false ,
smartypants : false ,
tokenizer : null ,
walkTokens : null ,
xhtml : false
} ;
2019-11-07 12:49:10 -06:00
}
2023-08-07 16:50:43 -06:00
let _defaults = _getDefaults ( ) ;
2021-11-02 07:32:17 -07:00
function changeDefaults ( newDefaults ) {
2023-08-07 16:50:43 -06:00
_defaults = newDefaults ;
2021-11-02 07:32:17 -07:00
}
2019-11-07 12:49:10 -06:00
2023-08-07 16:50:43 -06:00
/ * *
* Helpers
* /
const escapeTest = /[&<>"']/ ;
const escapeReplace = new RegExp ( escapeTest . source , 'g' ) ;
const escapeTestNoEncode = /[<>"']|&(?!(#\d{1,7}|#[Xx][a-fA-F0-9]{1,6}|\w+);)/ ;
const escapeReplaceNoEncode = new RegExp ( escapeTestNoEncode . source , 'g' ) ;
const escapeReplacements = {
'&' : '&' ,
'<' : '<' ,
'>' : '>' ,
'"' : '"' ,
"'" : '''
2019-12-05 23:08:43 +00:00
} ;
2023-08-07 16:50:43 -06:00
const getEscapeReplacement = ( ch ) => escapeReplacements [ ch ] ;
2021-11-02 07:32:17 -07:00
function escape ( html , encode ) {
2023-08-07 16:50:43 -06:00
if ( encode ) {
if ( escapeTest . test ( html ) ) {
return html . replace ( escapeReplace , getEscapeReplacement ) ;
}
2019-11-07 12:49:10 -06:00
}
2023-08-07 16:50:43 -06:00
else {
if ( escapeTestNoEncode . test ( html ) ) {
return html . replace ( escapeReplaceNoEncode , getEscapeReplacement ) ;
}
2019-11-07 12:49:10 -06:00
}
2023-08-07 16:50:43 -06:00
return html ;
2019-11-07 12:49:10 -06:00
}
2023-08-07 16:50:43 -06:00
const unescapeTest = /&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/ig ;
2021-11-02 07:32:17 -07:00
function unescape ( html ) {
2023-08-07 16:50:43 -06:00
// explicitly match decimal, hex, and named HTML entities
return html . replace ( unescapeTest , ( _ , n ) => {
n = n . toLowerCase ( ) ;
if ( n === 'colon' )
return ':' ;
if ( n . charAt ( 0 ) === '#' ) {
return n . charAt ( 1 ) === 'x'
? String . fromCharCode ( parseInt ( n . substring ( 2 ) , 16 ) )
: String . fromCharCode ( + n . substring ( 1 ) ) ;
}
return '' ;
} ) ;
2019-11-07 12:49:10 -06:00
}
2023-08-07 16:50:43 -06:00
const caret = /(^|[^\[])\^/g ;
2021-11-02 07:32:17 -07:00
function edit ( regex , opt ) {
2023-08-07 16:50:43 -06:00
regex = typeof regex === 'string' ? regex : regex . source ;
opt = opt || '' ;
const obj = {
replace : ( name , val ) => {
val = typeof val === 'object' && 'source' in val ? val . source : val ;
val = val . replace ( caret , '$1' ) ;
regex = regex . replace ( name , val ) ;
return obj ;
} ,
getRegex : ( ) => {
return new RegExp ( regex , opt ) ;
}
} ;
return obj ;
2019-11-07 12:49:10 -06:00
}
2023-08-07 16:50:43 -06:00
const nonWordAndColonTest = /[^\w:]/g ;
const originIndependentUrl = /^$|^[a-z][a-z0-9+.-]*:|^[?#]/i ;
2021-11-02 07:32:17 -07:00
function cleanUrl ( sanitize , base , href ) {
2023-08-07 16:50:43 -06:00
if ( sanitize ) {
let prot ;
try {
prot = decodeURIComponent ( unescape ( href ) )
. replace ( nonWordAndColonTest , '' )
. toLowerCase ( ) ;
}
catch ( e ) {
return null ;
}
if ( prot . indexOf ( 'javascript:' ) === 0 || prot . indexOf ( 'vbscript:' ) === 0 || prot . indexOf ( 'data:' ) === 0 ) {
return null ;
}
}
if ( base && ! originIndependentUrl . test ( href ) ) {
href = resolveUrl ( base , href ) ;
}
2019-11-07 12:49:10 -06:00
try {
2023-08-07 16:50:43 -06:00
href = encodeURI ( href ) . replace ( /%25/g , '%' ) ;
}
catch ( e ) {
return null ;
}
return href ;
2019-11-07 12:49:10 -06:00
}
2023-08-07 16:50:43 -06:00
const baseUrls = { } ;
const justDomain = /^[^:]+:\/*[^/]*$/ ;
const protocol = /^([^:]+:)[\s\S]*$/ ;
const domain = /^([^:]+:\/*[^/]*)[\s\S]*$/ ;
2019-11-07 12:49:10 -06:00
function resolveUrl ( base , href ) {
2023-08-07 16:50:43 -06:00
if ( ! baseUrls [ ' ' + base ] ) {
// we can ignore everything in base after the last slash of its path component,
// but we might need to add _that_
// https://tools.ietf.org/html/rfc3986#section-3
if ( justDomain . test ( base ) ) {
baseUrls [ ' ' + base ] = base + '/' ;
}
else {
baseUrls [ ' ' + base ] = rtrim ( base , '/' , true ) ;
}
}
base = baseUrls [ ' ' + base ] ;
const relativeBase = base . indexOf ( ':' ) === - 1 ;
if ( href . substring ( 0 , 2 ) === '//' ) {
if ( relativeBase ) {
return href ;
}
return base . replace ( protocol , '$1' ) + href ;
}
else if ( href . charAt ( 0 ) === '/' ) {
if ( relativeBase ) {
return href ;
}
return base . replace ( domain , '$1' ) + href ;
}
else {
return base + href ;
}
2019-11-07 12:49:10 -06:00
}
2023-08-07 16:50:43 -06:00
const noopTest = { exec : ( ) => null } ;
2021-11-02 07:32:17 -07:00
function splitCells ( tableRow , count ) {
2023-08-07 16:50:43 -06:00
// ensure that every cell-delimiting pipe has a space
// before it to distinguish it from an escaped pipe
const row = tableRow . replace ( /\|/g , ( match , offset , str ) => {
2023-08-19 16:55:56 -06:00
let escaped = false ;
let curr = offset ;
2023-08-07 16:50:43 -06:00
while ( -- curr >= 0 && str [ curr ] === '\\' )
escaped = ! escaped ;
if ( escaped ) {
// odd number of slashes means | is escaped
// so we leave it alone
return '|' ;
}
else {
// add space before unescaped |
return ' |' ;
}
} ) , cells = row . split ( / \|/ ) ;
let i = 0 ;
// First/last cell in a row cannot be empty if it has no leading/trailing pipe
if ( ! cells [ 0 ] . trim ( ) ) {
cells . shift ( ) ;
}
if ( cells . length > 0 && ! cells [ cells . length - 1 ] . trim ( ) ) {
cells . pop ( ) ;
}
2023-08-09 23:33:46 -06:00
if ( count ) {
if ( cells . length > count ) {
cells . splice ( count ) ;
}
else {
while ( cells . length < count )
cells . push ( '' ) ;
}
2023-08-07 16:50:43 -06:00
}
for ( ; i < cells . length ; i ++ ) {
// leading or trailing whitespace is ignored per the gfm spec
cells [ i ] = cells [ i ] . trim ( ) . replace ( /\\\|/g , '|' ) ;
}
return cells ;
2019-11-07 12:49:10 -06:00
}
2023-08-07 16:50:43 -06:00
/ * *
* Remove trailing 'c' s . Equivalent to str . replace ( /c*$/ , '' ) .
* /c*$/ is vulnerable to REDOS .
*
* @ param str
* @ param c
* @ param invert Remove suffix of non - c chars instead . Default falsey .
* /
2021-11-02 07:32:17 -07:00
function rtrim ( str , c , invert ) {
2023-08-07 16:50:43 -06:00
const l = str . length ;
if ( l === 0 ) {
return '' ;
}
// Length of suffix matching the invert condition.
let suffLen = 0 ;
// Step left until we fail to match the invert condition.
while ( suffLen < l ) {
const currChar = str . charAt ( l - suffLen - 1 ) ;
if ( currChar === c && ! invert ) {
suffLen ++ ;
}
else if ( currChar !== c && invert ) {
suffLen ++ ;
}
else {
break ;
}
}
return str . slice ( 0 , l - suffLen ) ;
2019-11-07 12:49:10 -06:00
}
2021-11-02 07:32:17 -07:00
function findClosingBracket ( str , b ) {
2023-08-07 16:50:43 -06:00
if ( str . indexOf ( b [ 1 ] ) === - 1 ) {
return - 1 ;
}
2023-08-19 16:55:56 -06:00
let level = 0 ;
for ( let i = 0 ; i < str . length ; i ++ ) {
2023-08-07 16:50:43 -06:00
if ( str [ i ] === '\\' ) {
i ++ ;
}
else if ( str [ i ] === b [ 0 ] ) {
level ++ ;
}
else if ( str [ i ] === b [ 1 ] ) {
level -- ;
if ( level < 0 ) {
return i ;
}
}
}
2019-11-07 12:49:10 -06:00
return - 1 ;
}
2023-05-02 04:35:05 +00:00
function checkDeprecations ( opt , callback ) {
2023-08-07 16:50:43 -06:00
if ( ! opt || opt . silent ) {
return ;
}
if ( callback ) {
console . warn ( 'marked(): callback is deprecated since version 5.0.0, should not be used and will be removed in the future. Read more here: https://marked.js.org/using_pro#async' ) ;
}
if ( opt . sanitize || opt . sanitizer ) {
console . warn ( 'marked(): sanitize and sanitizer parameters are deprecated since version 0.7.0, should not be used and will be removed in the future. Read more here: https://marked.js.org/#/USING_ADVANCED.md#options' ) ;
}
if ( opt . highlight || opt . langPrefix !== 'language-' ) {
console . warn ( 'marked(): highlight and langPrefix parameters are deprecated since version 5.0.0, should not be used and will be removed in the future. Instead use https://www.npmjs.com/package/marked-highlight.' ) ;
}
if ( opt . mangle ) {
console . warn ( 'marked(): mangle parameter is enabled by default, but is deprecated since version 5.0.0, and will be removed in the future. To clear this warning, install https://www.npmjs.com/package/marked-mangle, or disable by setting `{mangle: false}`.' ) ;
}
if ( opt . baseUrl ) {
console . warn ( 'marked(): baseUrl parameter is deprecated since version 5.0.0, should not be used and will be removed in the future. Instead use https://www.npmjs.com/package/marked-base-url.' ) ;
}
if ( opt . smartypants ) {
console . warn ( 'marked(): smartypants parameter is deprecated since version 5.0.0, should not be used and will be removed in the future. Instead use https://www.npmjs.com/package/marked-smartypants.' ) ;
}
if ( opt . xhtml ) {
console . warn ( 'marked(): xhtml parameter is deprecated since version 5.0.0, should not be used and will be removed in the future. Instead use https://www.npmjs.com/package/marked-xhtml.' ) ;
}
if ( opt . headerIds || opt . headerPrefix ) {
console . warn ( 'marked(): headerIds and headerPrefix parameters enabled by default, but are deprecated since version 5.0.0, and will be removed in the future. To clear this warning, install https://www.npmjs.com/package/marked-gfm-heading-id, or disable by setting `{headerIds: false}`.' ) ;
}
2019-11-07 12:49:10 -06:00
}
2023-08-07 16:50:43 -06:00
function outputLink ( cap , link , raw , lexer ) {
const href = link . href ;
const title = link . title ? escape ( link . title ) : null ;
const text = cap [ 1 ] . replace ( /\\([\[\]])/g , '$1' ) ;
if ( cap [ 0 ] . charAt ( 0 ) !== '!' ) {
lexer . state . inLink = true ;
const token = {
type : 'link' ,
raw ,
href ,
title ,
text ,
tokens : lexer . inlineTokens ( text )
} ;
lexer . state . inLink = false ;
return token ;
}
return {
type : 'image' ,
raw ,
href ,
title ,
text : escape ( text )
2020-04-14 16:41:10 -05:00
} ;
}
2020-05-02 01:31:47 +00:00
function indentCodeCompensation ( raw , text ) {
2023-08-07 16:50:43 -06:00
const matchIndentToCode = raw . match ( /^(\s+)(?:```)/ ) ;
if ( matchIndentToCode === null ) {
return text ;
}
const indentToCode = matchIndentToCode [ 1 ] ;
return text
. split ( '\n' )
. map ( node => {
const matchIndentInNode = node . match ( /^\s+/ ) ;
if ( matchIndentInNode === null ) {
return node ;
}
const [ indentInNode ] = matchIndentInNode ;
if ( indentInNode . length >= indentToCode . length ) {
return node . slice ( indentToCode . length ) ;
}
return node ;
} )
. join ( '\n' ) ;
2020-05-02 01:31:47 +00:00
}
2023-08-07 16:50:43 -06:00
/ * *
* Tokenizer
* /
class _Tokenizer {
options ;
2023-08-19 16:55:56 -06:00
// TODO: Fix this rules type
2023-08-07 16:50:43 -06:00
rules ;
lexer ;
constructor ( options ) {
this . options = options || _defaults ;
}
space ( src ) {
const cap = this . rules . block . newline . exec ( src ) ;
if ( cap && cap [ 0 ] . length > 0 ) {
return {
type : 'space' ,
raw : cap [ 0 ]
} ;
}
}
code ( src ) {
const cap = this . rules . block . code . exec ( src ) ;
if ( cap ) {
const text = cap [ 0 ] . replace ( /^ {1,4}/gm , '' ) ;
return {
type : 'code' ,
raw : cap [ 0 ] ,
codeBlockStyle : 'indented' ,
text : ! this . options . pedantic
? rtrim ( text , '\n' )
: text
} ;
}
}
fences ( src ) {
const cap = this . rules . block . fences . exec ( src ) ;
if ( cap ) {
const raw = cap [ 0 ] ;
const text = indentCodeCompensation ( raw , cap [ 3 ] || '' ) ;
return {
type : 'code' ,
raw ,
lang : cap [ 2 ] ? cap [ 2 ] . trim ( ) . replace ( this . rules . inline . _escapes , '$1' ) : cap [ 2 ] ,
text
} ;
}
}
heading ( src ) {
const cap = this . rules . block . heading . exec ( src ) ;
if ( cap ) {
let text = cap [ 2 ] . trim ( ) ;
// remove trailing #s
if ( /#$/ . test ( text ) ) {
const trimmed = rtrim ( text , '#' ) ;
if ( this . options . pedantic ) {
text = trimmed . trim ( ) ;
}
else if ( ! trimmed || / $/ . test ( trimmed ) ) {
// CommonMark requires space before trailing #s
text = trimmed . trim ( ) ;
}
}
return {
type : 'heading' ,
raw : cap [ 0 ] ,
depth : cap [ 1 ] . length ,
text ,
tokens : this . lexer . inline ( text )
} ;
}
}
hr ( src ) {
const cap = this . rules . block . hr . exec ( src ) ;
if ( cap ) {
return {
type : 'hr' ,
raw : cap [ 0 ]
} ;
}
}
blockquote ( src ) {
const cap = this . rules . block . blockquote . exec ( src ) ;
if ( cap ) {
const text = cap [ 0 ] . replace ( /^ *>[ \t]?/gm , '' ) ;
const top = this . lexer . state . top ;
this . lexer . state . top = true ;
const tokens = this . lexer . blockTokens ( text ) ;
this . lexer . state . top = top ;
return {
type : 'blockquote' ,
raw : cap [ 0 ] ,
tokens ,
text
} ;
}
}
list ( src ) {
let cap = this . rules . block . list . exec ( src ) ;
if ( cap ) {
let bull = cap [ 1 ] . trim ( ) ;
const isordered = bull . length > 1 ;
const list = {
type : 'list' ,
raw : '' ,
ordered : isordered ,
start : isordered ? + bull . slice ( 0 , - 1 ) : '' ,
loose : false ,
items : [ ]
} ;
bull = isordered ? ` \\ d{1,9} \\ ${ bull . slice ( - 1 ) } ` : ` \\ ${ bull } ` ;
2021-12-09 23:59:39 +00:00
if ( this . options . pedantic ) {
2023-08-07 16:50:43 -06:00
bull = isordered ? bull : '[*+-]' ;
}
// Get next list item
const itemRegex = new RegExp ( ` ^( {0,3} ${ bull } )((?:[ \t ][^ \\ n]*)?(?: \\ n| $ )) ` ) ;
2023-08-19 16:55:56 -06:00
let raw = '' ;
let itemContents = '' ;
let endsWithBlankLine = false ;
2023-08-07 16:50:43 -06:00
// Check if current bullet point can start a new List Item
while ( src ) {
2023-08-19 16:55:56 -06:00
let endEarly = false ;
2023-08-07 16:50:43 -06:00
if ( ! ( cap = itemRegex . exec ( src ) ) ) {
break ;
}
if ( this . rules . block . hr . test ( src ) ) { // End list if bullet was actually HR (possibly move into itemRegex?)
break ;
}
raw = cap [ 0 ] ;
src = src . substring ( raw . length ) ;
2023-08-19 16:55:56 -06:00
let line = cap [ 2 ] . split ( '\n' , 1 ) [ 0 ] . replace ( /^\t+/ , ( t ) => ' ' . repeat ( 3 * t . length ) ) ;
let nextLine = src . split ( '\n' , 1 ) [ 0 ] ;
let indent = 0 ;
2023-08-07 16:50:43 -06:00
if ( this . options . pedantic ) {
indent = 2 ;
itemContents = line . trimLeft ( ) ;
}
else {
indent = cap [ 2 ] . search ( /[^ ]/ ) ; // Find first non-space char
indent = indent > 4 ? 1 : indent ; // Treat indented code blocks (> 4 spaces) as having only 1 indent
itemContents = line . slice ( indent ) ;
indent += cap [ 1 ] . length ;
}
2023-08-19 16:55:56 -06:00
let blankLine = false ;
2023-08-07 16:50:43 -06:00
if ( ! line && /^ *$/ . test ( nextLine ) ) { // Items begin with at most one blank line
raw += nextLine + '\n' ;
src = src . substring ( nextLine . length + 1 ) ;
endEarly = true ;
}
if ( ! endEarly ) {
const nextBulletRegex = new RegExp ( ` ^ {0, ${ Math . min ( 3 , indent - 1 ) } }(?:[*+-]| \\ d{1,9}[.)])((?:[ \t ][^ \\ n]*)?(?: \\ n| $ )) ` ) ;
const hrRegex = new RegExp ( ` ^ {0, ${ Math . min ( 3 , indent - 1 ) } }((?:- *){3,}|(?:_ *){3,}|(?: \\ * *){3,})(?: \\ n+| $ ) ` ) ;
const fencesBeginRegex = new RegExp ( ` ^ {0, ${ Math . min ( 3 , indent - 1 ) } }(?: \` \` \` |~~~) ` ) ;
const headingBeginRegex = new RegExp ( ` ^ {0, ${ Math . min ( 3 , indent - 1 ) } }# ` ) ;
// Check if following lines should be included in List Item
while ( src ) {
2023-08-19 16:55:56 -06:00
const rawLine = src . split ( '\n' , 1 ) [ 0 ] ;
2023-08-07 16:50:43 -06:00
nextLine = rawLine ;
// Re-align to follow commonmark nesting rules
if ( this . options . pedantic ) {
nextLine = nextLine . replace ( /^ {1,4}(?=( {4})*[^ ])/g , ' ' ) ;
}
// End list item if found code fences
if ( fencesBeginRegex . test ( nextLine ) ) {
break ;
}
// End list item if found start of new heading
if ( headingBeginRegex . test ( nextLine ) ) {
break ;
}
// End list item if found start of new bullet
if ( nextBulletRegex . test ( nextLine ) ) {
break ;
}
// Horizontal rule found
if ( hrRegex . test ( src ) ) {
break ;
}
if ( nextLine . search ( /[^ ]/ ) >= indent || ! nextLine . trim ( ) ) { // Dedent if possible
itemContents += '\n' + nextLine . slice ( indent ) ;
}
else {
// not enough indentation
if ( blankLine ) {
break ;
}
// paragraph continuation unless last line was a different block level element
if ( line . search ( /[^ ]/ ) >= 4 ) { // indented code block
break ;
}
if ( fencesBeginRegex . test ( line ) ) {
break ;
}
if ( headingBeginRegex . test ( line ) ) {
break ;
}
if ( hrRegex . test ( line ) ) {
break ;
}
itemContents += '\n' + nextLine ;
}
if ( ! blankLine && ! nextLine . trim ( ) ) { // Check if current line is blank
blankLine = true ;
}
raw += rawLine + '\n' ;
src = src . substring ( rawLine . length + 1 ) ;
line = nextLine . slice ( indent ) ;
}
}
if ( ! list . loose ) {
// If the previous item ended with a blank line, the list is loose
if ( endsWithBlankLine ) {
list . loose = true ;
}
else if ( /\n *\n *$/ . test ( raw ) ) {
endsWithBlankLine = true ;
}
}
2023-08-19 16:55:56 -06:00
let istask = null ;
let ischecked ;
2023-08-07 16:50:43 -06:00
// Check for task list items
if ( this . options . gfm ) {
istask = /^\[[ xX]\] / . exec ( itemContents ) ;
if ( istask ) {
ischecked = istask [ 0 ] !== '[ ] ' ;
itemContents = itemContents . replace ( /^\[[ xX]\] +/ , '' ) ;
}
}
list . items . push ( {
type : 'list_item' ,
raw ,
task : ! ! istask ,
checked : ischecked ,
loose : false ,
2023-08-19 16:55:56 -06:00
text : itemContents ,
tokens : [ ]
2023-08-07 16:50:43 -06:00
} ) ;
list . raw += raw ;
}
// Do not consume newlines at end of final item. Alternatively, make itemRegex *start* with any newlines to simplify/speed up endsWithBlankLine logic
list . items [ list . items . length - 1 ] . raw = raw . trimRight ( ) ;
list . items [ list . items . length - 1 ] . text = itemContents . trimRight ( ) ;
list . raw = list . raw . trimRight ( ) ;
// Item child tokens handled here at end because we needed to have the final item to trim it first
2023-08-19 16:55:56 -06:00
for ( let i = 0 ; i < list . items . length ; i ++ ) {
2023-08-07 16:50:43 -06:00
this . lexer . state . top = false ;
list . items [ i ] . tokens = this . lexer . blockTokens ( list . items [ i ] . text , [ ] ) ;
if ( ! list . loose ) {
// Check if list should be loose
const spacers = list . items [ i ] . tokens . filter ( t => t . type === 'space' ) ;
const hasMultipleLineBreaks = spacers . length > 0 && spacers . some ( t => / \ n . * \ n / . test ( t . raw ) ) ;
list . loose = hasMultipleLineBreaks ;
}
}
// Set all items to loose if list is loose
if ( list . loose ) {
2023-08-19 16:55:56 -06:00
for ( let i = 0 ; i < list . items . length ; i ++ ) {
2023-08-07 16:50:43 -06:00
list . items [ i ] . loose = true ;
}
}
return list ;
}
}
html ( src ) {
const cap = this . rules . block . html . exec ( src ) ;
if ( cap ) {
const token = {
type : 'html' ,
block : true ,
raw : cap [ 0 ] ,
pre : ! this . options . sanitizer
&& ( cap [ 1 ] === 'pre' || cap [ 1 ] === 'script' || cap [ 1 ] === 'style' ) ,
text : cap [ 0 ]
} ;
if ( this . options . sanitize ) {
const text = this . options . sanitizer ? this . options . sanitizer ( cap [ 0 ] ) : escape ( cap [ 0 ] ) ;
const paragraph = token ;
paragraph . type = 'paragraph' ;
paragraph . text = text ;
paragraph . tokens = this . lexer . inline ( text ) ;
}
return token ;
}
}
def ( src ) {
const cap = this . rules . block . def . exec ( src ) ;
if ( cap ) {
const tag = cap [ 1 ] . toLowerCase ( ) . replace ( /\s+/g , ' ' ) ;
const href = cap [ 2 ] ? cap [ 2 ] . replace ( /^<(.*)>$/ , '$1' ) . replace ( this . rules . inline . _escapes , '$1' ) : '' ;
const title = cap [ 3 ] ? cap [ 3 ] . substring ( 1 , cap [ 3 ] . length - 1 ) . replace ( this . rules . inline . _escapes , '$1' ) : cap [ 3 ] ;
return {
type : 'def' ,
tag ,
raw : cap [ 0 ] ,
href ,
title
} ;
2021-02-07 17:25:01 -05:00
}
2023-08-07 16:50:43 -06:00
}
table ( src ) {
const cap = this . rules . block . table . exec ( src ) ;
if ( cap ) {
const item = {
type : 'table' ,
2023-08-09 23:33:46 -06:00
raw : cap [ 0 ] ,
2023-08-07 16:50:43 -06:00
header : splitCells ( cap [ 1 ] ) . map ( c => {
2023-08-19 16:55:56 -06:00
return { text : c , tokens : [ ] } ;
2023-08-07 16:50:43 -06:00
} ) ,
align : cap [ 2 ] . replace ( /^ *|\| *$/g , '' ) . split ( / *\| */ ) ,
rows : cap [ 3 ] && cap [ 3 ] . trim ( ) ? cap [ 3 ] . replace ( /\n[ \t]*$/ , '' ) . split ( '\n' ) : [ ]
} ;
if ( item . header . length === item . align . length ) {
let l = item . align . length ;
let i , j , k , row ;
for ( i = 0 ; i < l ; i ++ ) {
2023-08-19 16:55:56 -06:00
const align = item . align [ i ] ;
if ( align ) {
if ( /^ *-+: *$/ . test ( align ) ) {
item . align [ i ] = 'right' ;
}
else if ( /^ *:-+: *$/ . test ( align ) ) {
item . align [ i ] = 'center' ;
}
else if ( /^ *:-+ *$/ . test ( align ) ) {
item . align [ i ] = 'left' ;
}
else {
item . align [ i ] = null ;
}
2023-08-07 16:50:43 -06:00
}
}
l = item . rows . length ;
for ( i = 0 ; i < l ; i ++ ) {
item . rows [ i ] = splitCells ( item . rows [ i ] , item . header . length ) . map ( c => {
2023-08-19 16:55:56 -06:00
return { text : c , tokens : [ ] } ;
2023-08-07 16:50:43 -06:00
} ) ;
}
// parse child tokens inside headers and cells
// header child tokens
l = item . header . length ;
for ( j = 0 ; j < l ; j ++ ) {
item . header [ j ] . tokens = this . lexer . inline ( item . header [ j ] . text ) ;
}
// cell child tokens
l = item . rows . length ;
for ( j = 0 ; j < l ; j ++ ) {
row = item . rows [ j ] ;
for ( k = 0 ; k < row . length ; k ++ ) {
row [ k ] . tokens = this . lexer . inline ( row [ k ] . text ) ;
}
}
return item ;
}
}
}
lheading ( src ) {
const cap = this . rules . block . lheading . exec ( src ) ;
if ( cap ) {
return {
type : 'heading' ,
raw : cap [ 0 ] ,
depth : cap [ 2 ] . charAt ( 0 ) === '=' ? 1 : 2 ,
text : cap [ 1 ] ,
tokens : this . lexer . inline ( cap [ 1 ] )
} ;
}
}
paragraph ( src ) {
const cap = this . rules . block . paragraph . exec ( src ) ;
if ( cap ) {
const text = cap [ 1 ] . charAt ( cap [ 1 ] . length - 1 ) === '\n'
? cap [ 1 ] . slice ( 0 , - 1 )
: cap [ 1 ] ;
return {
type : 'paragraph' ,
raw : cap [ 0 ] ,
text ,
tokens : this . lexer . inline ( text )
} ;
}
}
text ( src ) {
const cap = this . rules . block . text . exec ( src ) ;
if ( cap ) {
return {
type : 'text' ,
raw : cap [ 0 ] ,
text : cap [ 0 ] ,
tokens : this . lexer . inline ( cap [ 0 ] )
} ;
}
}
escape ( src ) {
const cap = this . rules . inline . escape . exec ( src ) ;
if ( cap ) {
return {
type : 'escape' ,
raw : cap [ 0 ] ,
text : escape ( cap [ 1 ] )
} ;
}
}
tag ( src ) {
const cap = this . rules . inline . tag . exec ( src ) ;
if ( cap ) {
if ( ! this . lexer . state . inLink && /^<a /i . test ( cap [ 0 ] ) ) {
this . lexer . state . inLink = true ;
}
else if ( this . lexer . state . inLink && /^<\/a>/i . test ( cap [ 0 ] ) ) {
this . lexer . state . inLink = false ;
}
if ( ! this . lexer . state . inRawBlock && /^<(pre|code|kbd|script)(\s|>)/i . test ( cap [ 0 ] ) ) {
this . lexer . state . inRawBlock = true ;
}
else if ( this . lexer . state . inRawBlock && /^<\/(pre|code|kbd|script)(\s|>)/i . test ( cap [ 0 ] ) ) {
this . lexer . state . inRawBlock = false ;
}
return {
type : this . options . sanitize
? 'text'
: 'html' ,
raw : cap [ 0 ] ,
inLink : this . lexer . state . inLink ,
inRawBlock : this . lexer . state . inRawBlock ,
block : false ,
text : this . options . sanitize
? ( this . options . sanitizer
? this . options . sanitizer ( cap [ 0 ] )
: escape ( cap [ 0 ] ) )
: cap [ 0 ]
} ;
}
}
link ( src ) {
const cap = this . rules . inline . link . exec ( src ) ;
if ( cap ) {
const trimmedUrl = cap [ 2 ] . trim ( ) ;
if ( ! this . options . pedantic && /^</ . test ( trimmedUrl ) ) {
// commonmark requires matching angle brackets
if ( ! ( />$/ . test ( trimmedUrl ) ) ) {
return ;
}
// ending angle bracket cannot be escaped
const rtrimSlash = rtrim ( trimmedUrl . slice ( 0 , - 1 ) , '\\' ) ;
if ( ( trimmedUrl . length - rtrimSlash . length ) % 2 === 0 ) {
return ;
}
}
else {
// find closing parenthesis
const lastParenIndex = findClosingBracket ( cap [ 2 ] , '()' ) ;
if ( lastParenIndex > - 1 ) {
const start = cap [ 0 ] . indexOf ( '!' ) === 0 ? 5 : 4 ;
const linkLen = start + cap [ 1 ] . length + lastParenIndex ;
cap [ 2 ] = cap [ 2 ] . substring ( 0 , lastParenIndex ) ;
cap [ 0 ] = cap [ 0 ] . substring ( 0 , linkLen ) . trim ( ) ;
cap [ 3 ] = '' ;
}
}
let href = cap [ 2 ] ;
let title = '' ;
if ( this . options . pedantic ) {
// split pedantic href and title
const link = /^([^'"]*[^\s])\s+(['"])(.*)\2/ . exec ( href ) ;
if ( link ) {
href = link [ 1 ] ;
title = link [ 3 ] ;
}
}
else {
title = cap [ 3 ] ? cap [ 3 ] . slice ( 1 , - 1 ) : '' ;
}
href = href . trim ( ) ;
if ( /^</ . test ( href ) ) {
if ( this . options . pedantic && ! ( />$/ . test ( trimmedUrl ) ) ) {
// pedantic allows starting angle bracket without ending angle bracket
href = href . slice ( 1 ) ;
}
else {
href = href . slice ( 1 , - 1 ) ;
}
}
return outputLink ( cap , {
href : href ? href . replace ( this . rules . inline . _escapes , '$1' ) : href ,
title : title ? title . replace ( this . rules . inline . _escapes , '$1' ) : title
} , cap [ 0 ] , this . lexer ) ;
}
}
reflink ( src , links ) {
let cap ;
if ( ( cap = this . rules . inline . reflink . exec ( src ) )
|| ( cap = this . rules . inline . nolink . exec ( src ) ) ) {
let link = ( cap [ 2 ] || cap [ 1 ] ) . replace ( /\s+/g , ' ' ) ;
link = links [ link . toLowerCase ( ) ] ;
if ( ! link ) {
const text = cap [ 0 ] . charAt ( 0 ) ;
return {
type : 'text' ,
raw : text ,
text
} ;
}
return outputLink ( cap , link , cap [ 0 ] , this . lexer ) ;
}
}
emStrong ( src , maskedSrc , prevChar = '' ) {
let match = this . rules . inline . emStrong . lDelim . exec ( src ) ;
if ( ! match )
return ;
// _ can't be between two alphanumerics. \p{L}\p{N} includes non-english alphabet/numbers as well
if ( match [ 3 ] && prevChar . match ( /[\p{L}\p{N}]/u ) )
return ;
const nextChar = match [ 1 ] || match [ 2 ] || '' ;
if ( ! nextChar || ! prevChar || this . rules . inline . punctuation . exec ( prevChar ) ) {
2023-08-15 00:18:42 +00:00
// unicode Regex counts emoji as 1 char; spread into array for proper count (used multiple times below)
const lLength = [ ... match [ 0 ] ] . length - 1 ;
2023-08-07 16:50:43 -06:00
let rDelim , rLength , delimTotal = lLength , midDelimTotal = 0 ;
const endReg = match [ 0 ] [ 0 ] === '*' ? this . rules . inline . emStrong . rDelimAst : this . rules . inline . emStrong . rDelimUnd ;
endReg . lastIndex = 0 ;
// Clip maskedSrc to same section of string as src (move to lexer?)
maskedSrc = maskedSrc . slice ( - 1 * src . length + lLength ) ;
while ( ( match = endReg . exec ( maskedSrc ) ) != null ) {
rDelim = match [ 1 ] || match [ 2 ] || match [ 3 ] || match [ 4 ] || match [ 5 ] || match [ 6 ] ;
if ( ! rDelim )
continue ; // skip single * in __abc*abc__
2023-08-15 00:18:42 +00:00
rLength = [ ... rDelim ] . length ;
2023-08-07 16:50:43 -06:00
if ( match [ 3 ] || match [ 4 ] ) { // found another Left Delim
delimTotal += rLength ;
continue ;
}
else if ( match [ 5 ] || match [ 6 ] ) { // either Left or Right Delim
if ( lLength % 3 && ! ( ( lLength + rLength ) % 3 ) ) {
midDelimTotal += rLength ;
continue ; // CommonMark Emphasis Rules 9-10
}
}
delimTotal -= rLength ;
if ( delimTotal > 0 )
continue ; // Haven't found enough closing delimiters
// Remove extra characters. *a*** -> *a*
rLength = Math . min ( rLength , rLength + delimTotal + midDelimTotal ) ;
2023-08-15 00:18:42 +00:00
const raw = [ ... src ] . slice ( 0 , lLength + match . index + rLength + 1 ) . join ( '' ) ;
2023-08-07 16:50:43 -06:00
// Create `em` if smallest delimiter has odd char count. *a***
if ( Math . min ( lLength , rLength ) % 2 ) {
const text = raw . slice ( 1 , - 1 ) ;
return {
type : 'em' ,
raw ,
text ,
tokens : this . lexer . inlineTokens ( text )
} ;
}
// Create 'strong' if smallest delimiter has even char count. **a***
const text = raw . slice ( 2 , - 2 ) ;
return {
type : 'strong' ,
raw ,
text ,
tokens : this . lexer . inlineTokens ( text )
} ;
}
}
}
codespan ( src ) {
const cap = this . rules . inline . code . exec ( src ) ;
if ( cap ) {
let text = cap [ 2 ] . replace ( /\n/g , ' ' ) ;
const hasNonSpaceChars = /[^ ]/ . test ( text ) ;
const hasSpaceCharsOnBothEnds = /^ / . test ( text ) && / $/ . test ( text ) ;
if ( hasNonSpaceChars && hasSpaceCharsOnBothEnds ) {
text = text . substring ( 1 , text . length - 1 ) ;
}
text = escape ( text , true ) ;
return {
type : 'codespan' ,
raw : cap [ 0 ] ,
text
} ;
}
}
br ( src ) {
const cap = this . rules . inline . br . exec ( src ) ;
if ( cap ) {
return {
type : 'br' ,
raw : cap [ 0 ]
} ;
}
}
del ( src ) {
const cap = this . rules . inline . del . exec ( src ) ;
if ( cap ) {
return {
type : 'del' ,
raw : cap [ 0 ] ,
text : cap [ 2 ] ,
tokens : this . lexer . inlineTokens ( cap [ 2 ] )
} ;
}
}
autolink ( src , mangle ) {
const cap = this . rules . inline . autolink . exec ( src ) ;
if ( cap ) {
let text , href ;
if ( cap [ 2 ] === '@' ) {
text = escape ( this . options . mangle ? mangle ( cap [ 1 ] ) : cap [ 1 ] ) ;
href = 'mailto:' + text ;
}
else {
text = escape ( cap [ 1 ] ) ;
href = text ;
}
return {
type : 'link' ,
raw : cap [ 0 ] ,
text ,
href ,
tokens : [
{
type : 'text' ,
raw : text ,
text
}
]
} ;
}
}
url ( src , mangle ) {
let cap ;
if ( cap = this . rules . inline . url . exec ( src ) ) {
let text , href ;
if ( cap [ 2 ] === '@' ) {
text = escape ( this . options . mangle ? mangle ( cap [ 0 ] ) : cap [ 0 ] ) ;
href = 'mailto:' + text ;
}
else {
// do extended autolink path validation
let prevCapZero ;
do {
prevCapZero = cap [ 0 ] ;
cap [ 0 ] = this . rules . inline . _backpedal . exec ( cap [ 0 ] ) [ 0 ] ;
} while ( prevCapZero !== cap [ 0 ] ) ;
text = escape ( cap [ 0 ] ) ;
if ( cap [ 1 ] === 'www.' ) {
href = 'http://' + cap [ 0 ] ;
}
else {
href = cap [ 0 ] ;
}
}
return {
type : 'link' ,
raw : cap [ 0 ] ,
text ,
href ,
tokens : [
{
type : 'text' ,
raw : text ,
text
}
]
} ;
}
}
inlineText ( src , smartypants ) {
const cap = this . rules . inline . text . exec ( src ) ;
if ( cap ) {
let text ;
if ( this . lexer . state . inRawBlock ) {
text = this . options . sanitize ? ( this . options . sanitizer ? this . options . sanitizer ( cap [ 0 ] ) : escape ( cap [ 0 ] ) ) : cap [ 0 ] ;
}
else {
text = escape ( this . options . smartypants ? smartypants ( cap [ 0 ] ) : cap [ 0 ] ) ;
}
return {
type : 'text' ,
raw : cap [ 0 ] ,
text
} ;
}
}
}
2020-04-06 23:25:33 -05:00
2023-08-07 16:50:43 -06:00
/ * *
* Block - Level Grammar
* /
// Not all rules are defined in the object literal
// @ts-expect-error
const block = {
newline : /^(?: *(?:\n|$))+/ ,
code : /^( {4}[^\n]+(?:\n(?: *(?:\n|$))*)?)+/ ,
fences : /^ {0,3}(`{3,}(?=[^`\n]*(?:\n|$))|~{3,})([^\n]*)(?:\n|$)(?:|([\s\S]*?)(?:\n|$))(?: {0,3}\1[~`]* *(?=\n|$)|$)/ ,
hr : /^ {0,3}((?:-[\t ]*){3,}|(?:_[ \t]*){3,}|(?:\*[ \t]*){3,})(?:\n+|$)/ ,
heading : /^ {0,3}(#{1,6})(?=\s|$)(.*)(?:\n+|$)/ ,
blockquote : /^( {0,3}> ?(paragraph|[^\n]*)(?:\n|$))+/ ,
list : /^( {0,3}bull)([ \t][^\n]+?)?(?:\n|$)/ ,
html : '^ {0,3}(?:' // optional indentation
+ '<(script|pre|style|textarea)[\\s>][\\s\\S]*?(?:</\\1>[^\\n]*\\n+|$)' // (1)
+ '|comment[^\\n]*(\\n+|$)' // (2)
+ '|<\\?[\\s\\S]*?(?:\\?>\\n*|$)' // (3)
+ '|<![A-Z][\\s\\S]*?(?:>\\n*|$)' // (4)
+ '|<!\\[CDATA\\[[\\s\\S]*?(?:\\]\\]>\\n*|$)' // (5)
+ '|</?(tag)(?: +|\\n|/?>)[\\s\\S]*?(?:(?:\\n *)+\\n|$)' // (6)
+ '|<(?!script|pre|style|textarea)([a-z][\\w-]*)(?:attribute)*? */?>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)' // (7) open tag
+ '|</(?!script|pre|style|textarea)[a-z][\\w-]*\\s*>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)' // (7) closing tag
+ ')' ,
def : /^ {0,3}\[(label)\]: *(?:\n *)?([^<\s][^\s]*|<.*?>)(?:(?: +(?:\n *)?| *\n *)(title))? *(?:\n+|$)/ ,
table : noopTest ,
lheading : /^((?:(?!^bull ).|\n(?!\n|bull ))+?)\n {0,3}(=+|-+) *(?:\n+|$)/ ,
// regex template, placeholders will be replaced according to different paragraph
// interruption rules of commonmark and the original markdown spec:
_paragraph : /^([^\n]+(?:\n(?!hr|heading|lheading|blockquote|fences|list|html|table| +\n)[^\n]+)*)/ ,
text : /^[^\n]+/
2020-04-14 16:41:10 -05:00
} ;
2022-01-13 02:03:16 +00:00
block . _label = /(?!\s*\])(?:\\.|[^\[\]\\])+/ ;
2021-11-02 07:32:17 -07:00
block . _title = /(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/ ;
2023-08-07 16:50:43 -06:00
block . def = edit ( block . def )
. replace ( 'label' , block . _label )
. replace ( 'title' , block . _title )
. getRegex ( ) ;
2021-11-02 07:32:17 -07:00
block . bullet = /(?:[*+-]|\d{1,9}[.)])/ ;
2023-08-07 16:50:43 -06:00
block . listItemStart = edit ( /^( *)(bull) */ )
. replace ( 'bull' , block . bullet )
. getRegex ( ) ;
block . list = edit ( block . list )
. replace ( /bull/g , block . bullet )
. replace ( 'hr' , '\\n+(?=\\1?(?:(?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$))' )
. replace ( 'def' , '\\n+(?=' + block . def . source + ')' )
. getRegex ( ) ;
block . _tag = 'address|article|aside|base|basefont|blockquote|body|caption'
+ '|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption'
+ '|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe'
+ '|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option'
+ '|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr'
+ '|track|ul' ;
2021-11-02 07:32:17 -07:00
block . _comment = /<!--(?!-?>)[\s\S]*?(?:-->|$)/ ;
2023-08-07 16:50:43 -06:00
block . html = edit ( block . html , 'i' )
. replace ( 'comment' , block . _comment )
. replace ( 'tag' , block . _tag )
. replace ( 'attribute' , / +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/ )
. getRegex ( ) ;
block . lheading = edit ( block . lheading )
. replace ( /bull/g , block . bullet ) // lists can interrupt
. getRegex ( ) ;
block . paragraph = edit ( block . _paragraph )
. replace ( 'hr' , block . hr )
. replace ( 'heading' , ' {0,3}#{1,6} ' )
. replace ( '|lheading' , '' ) // setex headings don't interrupt commonmark paragraphs
. replace ( '|table' , '' )
. replace ( 'blockquote' , ' {0,3}>' )
. replace ( 'fences' , ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n' )
. replace ( 'list' , ' {0,3}(?:[*+-]|1[.)]) ' ) // only lists starting from 1 can interrupt
. replace ( 'html' , '</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)' )
. replace ( 'tag' , block . _tag ) // pars can be interrupted by type (6) html blocks
. getRegex ( ) ;
block . blockquote = edit ( block . blockquote )
. replace ( 'paragraph' , block . paragraph )
. getRegex ( ) ;
/ * *
* Normal Block Grammar
* /
2023-03-21 19:50:28 -05:00
block . normal = { ... block } ;
2023-08-07 16:50:43 -06:00
/ * *
* GFM Block Grammar
* /
2023-03-21 19:50:28 -05:00
block . gfm = {
2023-08-07 16:50:43 -06:00
... block . normal ,
table : '^ *([^\\n ].*\\|.*)\\n' // Header
+ ' {0,3}(?:\\| *)?(:?-+:? *(?:\\| *:?-+:? *)*)(?:\\| *)?' // Align
+ '(?:\\n((?:(?! *\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)' // Cells
2023-03-21 19:50:28 -05:00
} ;
2023-08-07 16:50:43 -06:00
block . gfm . table = edit ( block . gfm . table )
. replace ( 'hr' , block . hr )
. replace ( 'heading' , ' {0,3}#{1,6} ' )
. replace ( 'blockquote' , ' {0,3}>' )
. replace ( 'code' , ' {4}[^\\n]' )
. replace ( 'fences' , ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n' )
. replace ( 'list' , ' {0,3}(?:[*+-]|1[.)]) ' ) // only lists starting from 1 can interrupt
. replace ( 'html' , '</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)' )
. replace ( 'tag' , block . _tag ) // tables can be interrupted by type (6) html blocks
. getRegex ( ) ;
block . gfm . paragraph = edit ( block . _paragraph )
. replace ( 'hr' , block . hr )
. replace ( 'heading' , ' {0,3}#{1,6} ' )
. replace ( '|lheading' , '' ) // setex headings don't interrupt commonmark paragraphs
. replace ( 'table' , block . gfm . table ) // interrupt paragraphs with table
. replace ( 'blockquote' , ' {0,3}>' )
. replace ( 'fences' , ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n' )
. replace ( 'list' , ' {0,3}(?:[*+-]|1[.)]) ' ) // only lists starting from 1 can interrupt
. replace ( 'html' , '</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)' )
. replace ( 'tag' , block . _tag ) // pars can be interrupted by type (6) html blocks
. getRegex ( ) ;
/ * *
* Pedantic grammar ( original John Gruber ' s loose markdown specification )
* /
2023-03-21 19:50:28 -05:00
block . pedantic = {
2023-08-07 16:50:43 -06:00
... block . normal ,
html : edit ( '^ *(?:comment *(?:\\n|\\s*$)'
+ '|<(tag)[\\s\\S]+?</\\1> *(?:\\n{2,}|\\s*$)' // closed tag
+ '|<tag(?:"[^"]*"|\'[^\']*\'|\\s[^\'"/>\\s]*)*?/?> *(?:\\n{2,}|\\s*$))' )
. replace ( 'comment' , block . _comment )
. replace ( /tag/g , '(?!(?:'
+ 'a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|var|samp|kbd|sub'
+ '|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|span|br|wbr|ins|del|img)'
+ '\\b)\\w+(?!:|[^\\w\\s@]*@)\\b' )
. getRegex ( ) ,
def : /^ *\[([^\]]+)\]: *<?([^\s>]+)>?(?: +(["(][^\n]+[")]))? *(?:\n+|$)/ ,
heading : /^(#{1,6})(.*)(?:\n+|$)/ ,
fences : noopTest ,
lheading : /^(.+?)\n {0,3}(=+|-+) *(?:\n+|$)/ ,
paragraph : edit ( block . normal . _paragraph )
. replace ( 'hr' , block . hr )
. replace ( 'heading' , ' *#{1,6} *[^\n]' )
. replace ( 'lheading' , block . lheading )
. replace ( 'blockquote' , ' {0,3}>' )
. replace ( '|fences' , '' )
. replace ( '|list' , '' )
. replace ( '|html' , '' )
. getRegex ( )
2023-03-21 19:50:28 -05:00
} ;
2023-08-07 16:50:43 -06:00
/ * *
* Inline - Level Grammar
* /
// Not all rules are defined in the object literal
// @ts-expect-error
const inline = {
escape : /^\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/ ,
autolink : /^<(scheme:[^\s\x00-\x1f<>]*|email)>/ ,
url : noopTest ,
tag : '^comment'
+ '|^</[a-zA-Z][\\w:-]*\\s*>' // self-closing tag
+ '|^<[a-zA-Z][\\w-]*(?:attribute)*?\\s*/?>' // open tag
+ '|^<\\?[\\s\\S]*?\\?>' // processing instruction, e.g. <?php ?>
+ '|^<![a-zA-Z]+\\s[\\s\\S]*?>' // declaration, e.g. <!DOCTYPE html>
+ '|^<!\\[CDATA\\[[\\s\\S]*?\\]\\]>' ,
link : /^!?\[(label)\]\(\s*(href)(?:\s+(title))?\s*\)/ ,
reflink : /^!?\[(label)\]\[(ref)\]/ ,
nolink : /^!?\[(ref)\](?:\[\])?/ ,
reflinkSearch : 'reflink|nolink(?!\\()' ,
emStrong : {
lDelim : /^(?:\*+(?:((?!\*)[punct])|[^\s*]))|^_+(?:((?!_)[punct])|([^\s_]))/ ,
// (1) and (2) can only be a Right Delimiter. (3) and (4) can only be Left. (5) and (6) can be either Left or Right.
// | Skip orphan inside strong | Consume to delim | (1) #*** | (2) a***#, a*** | (3) #***a, ***a | (4) ***# | (5) #***# | (6) a***a
rDelimAst : /^[^_*]*?__[^_*]*?\*[^_*]*?(?=__)|[^*]+(?=[^*])|(?!\*)[punct](\*+)(?=[\s]|$)|[^punct\s](\*+)(?!\*)(?=[punct\s]|$)|(?!\*)[punct\s](\*+)(?=[^punct\s])|[\s](\*+)(?!\*)(?=[punct])|(?!\*)[punct](\*+)(?!\*)(?=[punct])|[^punct\s](\*+)(?=[^punct\s])/ ,
rDelimUnd : /^[^_*]*?\*\*[^_*]*?_[^_*]*?(?=\*\*)|[^_]+(?=[^_])|(?!_)[punct](_+)(?=[\s]|$)|[^punct\s](_+)(?!_)(?=[punct\s]|$)|(?!_)[punct\s](_+)(?=[^punct\s])|[\s](_+)(?!_)(?=[punct])|(?!_)[punct](_+)(?!_)(?=[punct])/ // ^- Not allowed for _
} ,
code : /^(`+)([^`]|[^`][\s\S]*?[^`])\1(?!`)/ ,
br : /^( {2,}|\\)\n(?!\s*$)/ ,
del : noopTest ,
text : /^(`+|[^`])(?:(?= {2,}\n)|[\s\S]*?(?:(?=[\\<!\[`*_]|\b_|$)|[^ ](?= {2,}\n)))/ ,
punctuation : /^((?![*_])[\spunctuation])/
2020-04-14 16:41:10 -05:00
} ;
2023-08-07 16:50:43 -06:00
// list of unicode punctuation marks, plus any missing characters from CommonMark spec
inline . _punctuation = '\\p{P}$+<=>`^|~' ;
inline . punctuation = edit ( inline . punctuation , 'u' ) . replace ( /punctuation/g , inline . _punctuation ) . getRegex ( ) ;
// sequences em should skip over [title](link), `code`, <html>
2023-05-26 16:54:11 +00:00
inline . blockSkip = /\[[^[\]]*?\]\([^\(\)]*?\)|`[^`]*?`|<[^<>]*?>/g ;
2023-06-10 03:13:30 +00:00
inline . anyPunctuation = /\\[punct]/g ;
inline . _escapes = /\\([punct])/g ;
2023-08-07 16:50:43 -06:00
inline . _comment = edit ( block . _comment ) . replace ( '(?:-->|$)' , '-->' ) . getRegex ( ) ;
inline . emStrong . lDelim = edit ( inline . emStrong . lDelim , 'u' )
. replace ( /punct/g , inline . _punctuation )
. getRegex ( ) ;
inline . emStrong . rDelimAst = edit ( inline . emStrong . rDelimAst , 'gu' )
. replace ( /punct/g , inline . _punctuation )
. getRegex ( ) ;
inline . emStrong . rDelimUnd = edit ( inline . emStrong . rDelimUnd , 'gu' )
. replace ( /punct/g , inline . _punctuation )
. getRegex ( ) ;
inline . anyPunctuation = edit ( inline . anyPunctuation , 'gu' )
. replace ( /punct/g , inline . _punctuation )
. getRegex ( ) ;
inline . _escapes = edit ( inline . _escapes , 'gu' )
. replace ( /punct/g , inline . _punctuation )
. getRegex ( ) ;
2021-11-02 07:32:17 -07:00
inline . _scheme = /[a-zA-Z][a-zA-Z0-9+.-]{1,31}/ ;
inline . _email = /[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_])/ ;
2023-08-07 16:50:43 -06:00
inline . autolink = edit ( inline . autolink )
. replace ( 'scheme' , inline . _scheme )
. replace ( 'email' , inline . _email )
. getRegex ( ) ;
2021-11-02 07:32:17 -07:00
inline . _attribute = /\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*'[^']*'|\s*=\s*[^\s"'=<>`]+)?/ ;
2023-08-07 16:50:43 -06:00
inline . tag = edit ( inline . tag )
. replace ( 'comment' , inline . _comment )
. replace ( 'attribute' , inline . _attribute )
. getRegex ( ) ;
2021-11-02 07:32:17 -07:00
inline . _label = /(?:\[(?:\\.|[^\[\]\\])*\]|\\.|`[^`]*`|[^\[\]\\`])*?/ ;
inline . _href = /<(?:\\.|[^\n<>\\])+>|[^\s\x00-\x1f]*/ ;
inline . _title = /"(?:\\"?|[^"\\])*"|'(?:\\'?|[^'\\])*'|\((?:\\\)?|[^)\\])*\)/ ;
2023-08-07 16:50:43 -06:00
inline . link = edit ( inline . link )
. replace ( 'label' , inline . _label )
. replace ( 'href' , inline . _href )
. replace ( 'title' , inline . _title )
. getRegex ( ) ;
inline . reflink = edit ( inline . reflink )
. replace ( 'label' , inline . _label )
. replace ( 'ref' , block . _label )
. getRegex ( ) ;
inline . nolink = edit ( inline . nolink )
. replace ( 'ref' , block . _label )
. getRegex ( ) ;
inline . reflinkSearch = edit ( inline . reflinkSearch , 'g' )
. replace ( 'reflink' , inline . reflink )
. replace ( 'nolink' , inline . nolink )
. getRegex ( ) ;
/ * *
* Normal Inline Grammar
* /
2023-03-21 19:50:28 -05:00
inline . normal = { ... inline } ;
2023-08-07 16:50:43 -06:00
/ * *
* Pedantic Inline Grammar
* /
2023-03-21 19:50:28 -05:00
inline . pedantic = {
2023-08-07 16:50:43 -06:00
... inline . normal ,
strong : {
start : /^__|\*\*/ ,
middle : /^__(?=\S)([\s\S]*?\S)__(?!_)|^\*\*(?=\S)([\s\S]*?\S)\*\*(?!\*)/ ,
endAst : /\*\*(?!\*)/g ,
endUnd : /__(?!_)/g
} ,
em : {
start : /^_|\*/ ,
middle : /^()\*(?=\S)([\s\S]*?\S)\*(?!\*)|^_(?=\S)([\s\S]*?\S)_(?!_)/ ,
endAst : /\*(?!\*)/g ,
endUnd : /_(?!_)/g
} ,
link : edit ( /^!?\[(label)\]\((.*?)\)/ )
. replace ( 'label' , inline . _label )
. getRegex ( ) ,
reflink : edit ( /^!?\[(label)\]\s*\[([^\]]*)\]/ )
. replace ( 'label' , inline . _label )
. getRegex ( )
2023-03-21 19:50:28 -05:00
} ;
2023-08-07 16:50:43 -06:00
/ * *
* GFM Inline Grammar
* /
2023-03-21 19:50:28 -05:00
inline . gfm = {
2023-08-07 16:50:43 -06:00
... inline . normal ,
escape : edit ( inline . escape ) . replace ( '])' , '~|])' ) . getRegex ( ) ,
_extended _email : /[A-Za-z0-9._+-]+(@)[a-zA-Z0-9-_]+(?:\.[a-zA-Z0-9-_]*[a-zA-Z0-9])+(?![-_])/ ,
url : /^((?:ftp|https?):\/\/|www\.)(?:[a-zA-Z0-9\-]+\.?)+[^\s<]*|^email/ ,
_backpedal : /(?:[^?!.,:;*_'"~()&]+|\([^)]*\)|&(?![a-zA-Z0-9]+;$)|[?!.,:;*_'"~)]+(?!$))+/ ,
del : /^(~~?)(?=[^\s~])([\s\S]*?[^\s~])\1(?=[^~]|$)/ ,
text : /^([`~]+|[^`~])(?:(?= {2,}\n)|(?=[a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-]+@)|[\s\S]*?(?:(?=[\\<!\[`*~_]|\b_|https?:\/\/|ftp:\/\/|www\.|$)|[^ ](?= {2,}\n)|[^a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-](?=[a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-]+@)))/
2023-03-21 19:50:28 -05:00
} ;
2023-08-07 16:50:43 -06:00
inline . gfm . url = edit ( inline . gfm . url , 'i' )
. replace ( 'email' , inline . gfm . _extended _email )
. getRegex ( ) ;
/ * *
* GFM + Line Breaks Inline Grammar
* /
2023-03-21 19:50:28 -05:00
inline . breaks = {
2023-08-07 16:50:43 -06:00
... inline . gfm ,
br : edit ( inline . br ) . replace ( '{2,}' , '*' ) . getRegex ( ) ,
text : edit ( inline . gfm . text )
. replace ( '\\b_' , '\\b_| {2,}\\n' )
. replace ( /\{2,\}/g , '*' )
. getRegex ( )
2023-03-21 19:50:28 -05:00
} ;
2020-04-14 16:41:10 -05:00
2023-08-07 16:50:43 -06:00
/ * *
* smartypants text replacement
* /
2020-04-14 16:41:10 -05:00
function smartypants ( text ) {
2023-08-07 16:50:43 -06:00
return text
// em-dashes
. replace ( /---/g , '\u2014' )
// en-dashes
. replace ( /--/g , '\u2013' )
// opening singles
. replace ( /(^|[-\u2014/(\[{"\s])'/g , '$1\u2018' )
// closing singles & apostrophes
. replace ( /'/g , '\u2019' )
// opening doubles
. replace ( /(^|[-\u2014/(\[{\u2018\s])"/g , '$1\u201c' )
// closing doubles
. replace ( /"/g , '\u201d' )
// ellipses
. replace ( /\.{3}/g , '\u2026' ) ;
2020-04-14 16:41:10 -05:00
}
2023-08-07 16:50:43 -06:00
/ * *
* mangle email addresses
* /
2020-04-14 16:41:10 -05:00
function mangle ( text ) {
2023-08-19 16:55:56 -06:00
let out = '' ;
for ( let i = 0 ; i < text . length ; i ++ ) {
const ch = Math . random ( ) > 0.5
? 'x' + text . charCodeAt ( i ) . toString ( 16 )
: text . charCodeAt ( i ) . toString ( ) ;
2023-08-07 16:50:43 -06:00
out += '&#' + ch + ';' ;
}
2020-04-02 00:23:40 -05:00
return out ;
2023-08-07 16:50:43 -06:00
}
/ * *
* Block Lexer
* /
class _Lexer {
tokens ;
options ;
state ;
tokenizer ;
inlineQueue ;
constructor ( options ) {
// TokenList cannot be created in one go
// @ts-expect-error
this . tokens = [ ] ;
this . tokens . links = Object . create ( null ) ;
this . options = options || _defaults ;
this . options . tokenizer = this . options . tokenizer || new _Tokenizer ( ) ;
this . tokenizer = this . options . tokenizer ;
this . tokenizer . options = this . options ;
this . tokenizer . lexer = this ;
this . inlineQueue = [ ] ;
this . state = {
inLink : false ,
inRawBlock : false ,
top : true
} ;
const rules = {
block : block . normal ,
inline : inline . normal
} ;
if ( this . options . pedantic ) {
rules . block = block . pedantic ;
rules . inline = inline . pedantic ;
}
else if ( this . options . gfm ) {
rules . block = block . gfm ;
if ( this . options . breaks ) {
rules . inline = inline . breaks ;
}
else {
rules . inline = inline . gfm ;
}
}
this . tokenizer . rules = rules ;
2019-11-07 12:49:10 -06:00
}
2023-08-07 16:50:43 -06:00
/ * *
* Expose Rules
* /
static get rules ( ) {
return {
block ,
inline
} ;
2020-04-02 00:23:40 -05:00
}
2023-08-07 16:50:43 -06:00
/ * *
* Static Lex Method
* /
static lex ( src , options ) {
const lexer = new _Lexer ( options ) ;
return lexer . lex ( src ) ;
}
/ * *
* Static Lex Inline Method
* /
static lexInline ( src , options ) {
const lexer = new _Lexer ( options ) ;
return lexer . inlineTokens ( src ) ;
}
/ * *
* Preprocessing
* /
lex ( src ) {
src = src
. replace ( /\r\n|\r/g , '\n' ) ;
this . blockTokens ( src , this . tokens ) ;
let next ;
while ( next = this . inlineQueue . shift ( ) ) {
this . inlineTokens ( next . src , next . tokens ) ;
}
return this . tokens ;
}
blockTokens ( src , tokens = [ ] ) {
if ( this . options . pedantic ) {
src = src . replace ( /\t/g , ' ' ) . replace ( /^ +$/gm , '' ) ;
}
else {
src = src . replace ( /^( *)(\t+)/gm , ( _ , leading , tabs ) => {
return leading + ' ' . repeat ( tabs . length ) ;
} ) ;
}
2023-08-09 23:33:46 -06:00
let token ;
let lastToken ;
let cutSrc ;
let lastParagraphClipped ;
2023-08-07 16:50:43 -06:00
while ( src ) {
if ( this . options . extensions
&& this . options . extensions . block
&& this . options . extensions . block . some ( ( extTokenizer ) => {
if ( token = extTokenizer . call ( { lexer : this } , src , tokens ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
return true ;
}
return false ;
} ) ) {
continue ;
}
// newline
if ( token = this . tokenizer . space ( src ) ) {
src = src . substring ( token . raw . length ) ;
if ( token . raw . length === 1 && tokens . length > 0 ) {
// if there's a single \n as a spacer, it's terminating the last line,
// so move it there so that we don't get unecessary paragraph tags
tokens [ tokens . length - 1 ] . raw += '\n' ;
}
else {
tokens . push ( token ) ;
}
continue ;
}
// code
if ( token = this . tokenizer . code ( src ) ) {
src = src . substring ( token . raw . length ) ;
lastToken = tokens [ tokens . length - 1 ] ;
// An indented code block cannot interrupt a paragraph.
if ( lastToken && ( lastToken . type === 'paragraph' || lastToken . type === 'text' ) ) {
lastToken . raw += '\n' + token . raw ;
lastToken . text += '\n' + token . text ;
this . inlineQueue [ this . inlineQueue . length - 1 ] . src = lastToken . text ;
}
else {
tokens . push ( token ) ;
}
continue ;
}
// fences
if ( token = this . tokenizer . fences ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// heading
if ( token = this . tokenizer . heading ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// hr
if ( token = this . tokenizer . hr ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// blockquote
if ( token = this . tokenizer . blockquote ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// list
if ( token = this . tokenizer . list ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// html
if ( token = this . tokenizer . html ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// def
if ( token = this . tokenizer . def ( src ) ) {
src = src . substring ( token . raw . length ) ;
lastToken = tokens [ tokens . length - 1 ] ;
if ( lastToken && ( lastToken . type === 'paragraph' || lastToken . type === 'text' ) ) {
lastToken . raw += '\n' + token . raw ;
lastToken . text += '\n' + token . raw ;
this . inlineQueue [ this . inlineQueue . length - 1 ] . src = lastToken . text ;
}
else if ( ! this . tokens . links [ token . tag ] ) {
this . tokens . links [ token . tag ] = {
href : token . href ,
title : token . title
} ;
}
continue ;
}
// table (gfm)
if ( token = this . tokenizer . table ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// lheading
if ( token = this . tokenizer . lheading ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// top-level paragraph
// prevent paragraph consuming extensions by clipping 'src' to extension start
cutSrc = src ;
if ( this . options . extensions && this . options . extensions . startBlock ) {
let startIndex = Infinity ;
const tempSrc = src . slice ( 1 ) ;
let tempStart ;
this . options . extensions . startBlock . forEach ( ( getStartIndex ) => {
tempStart = getStartIndex . call ( { lexer : this } , tempSrc ) ;
if ( typeof tempStart === 'number' && tempStart >= 0 ) {
startIndex = Math . min ( startIndex , tempStart ) ;
}
} ) ;
if ( startIndex < Infinity && startIndex >= 0 ) {
cutSrc = src . substring ( 0 , startIndex + 1 ) ;
}
}
if ( this . state . top && ( token = this . tokenizer . paragraph ( cutSrc ) ) ) {
lastToken = tokens [ tokens . length - 1 ] ;
if ( lastParagraphClipped && lastToken . type === 'paragraph' ) {
lastToken . raw += '\n' + token . raw ;
lastToken . text += '\n' + token . text ;
this . inlineQueue . pop ( ) ;
this . inlineQueue [ this . inlineQueue . length - 1 ] . src = lastToken . text ;
}
else {
tokens . push ( token ) ;
}
lastParagraphClipped = ( cutSrc . length !== src . length ) ;
src = src . substring ( token . raw . length ) ;
continue ;
}
// text
if ( token = this . tokenizer . text ( src ) ) {
src = src . substring ( token . raw . length ) ;
lastToken = tokens [ tokens . length - 1 ] ;
if ( lastToken && lastToken . type === 'text' ) {
lastToken . raw += '\n' + token . raw ;
lastToken . text += '\n' + token . text ;
this . inlineQueue . pop ( ) ;
this . inlineQueue [ this . inlineQueue . length - 1 ] . src = lastToken . text ;
}
else {
tokens . push ( token ) ;
}
continue ;
}
if ( src ) {
const errMsg = 'Infinite loop on byte: ' + src . charCodeAt ( 0 ) ;
if ( this . options . silent ) {
console . error ( errMsg ) ;
break ;
}
else {
throw new Error ( errMsg ) ;
}
}
}
this . state . top = true ;
return tokens ;
}
inline ( src , tokens = [ ] ) {
this . inlineQueue . push ( { src , tokens } ) ;
return tokens ;
}
/ * *
* Lexing / Compiling
* /
inlineTokens ( src , tokens = [ ] ) {
let token , lastToken , cutSrc ;
// String with links masked to avoid interference with em and strong
let maskedSrc = src ;
let match ;
let keepPrevChar , prevChar ;
// Mask out reflinks
if ( this . tokens . links ) {
const links = Object . keys ( this . tokens . links ) ;
if ( links . length > 0 ) {
while ( ( match = this . tokenizer . rules . inline . reflinkSearch . exec ( maskedSrc ) ) != null ) {
if ( links . includes ( match [ 0 ] . slice ( match [ 0 ] . lastIndexOf ( '[' ) + 1 , - 1 ) ) ) {
maskedSrc = maskedSrc . slice ( 0 , match . index ) + '[' + 'a' . repeat ( match [ 0 ] . length - 2 ) + ']' + maskedSrc . slice ( this . tokenizer . rules . inline . reflinkSearch . lastIndex ) ;
}
}
}
}
// Mask out other blocks
while ( ( match = this . tokenizer . rules . inline . blockSkip . exec ( maskedSrc ) ) != null ) {
maskedSrc = maskedSrc . slice ( 0 , match . index ) + '[' + 'a' . repeat ( match [ 0 ] . length - 2 ) + ']' + maskedSrc . slice ( this . tokenizer . rules . inline . blockSkip . lastIndex ) ;
}
// Mask out escaped characters
while ( ( match = this . tokenizer . rules . inline . anyPunctuation . exec ( maskedSrc ) ) != null ) {
maskedSrc = maskedSrc . slice ( 0 , match . index ) + '++' + maskedSrc . slice ( this . tokenizer . rules . inline . anyPunctuation . lastIndex ) ;
}
while ( src ) {
if ( ! keepPrevChar ) {
prevChar = '' ;
}
keepPrevChar = false ;
// extensions
if ( this . options . extensions
&& this . options . extensions . inline
&& this . options . extensions . inline . some ( ( extTokenizer ) => {
if ( token = extTokenizer . call ( { lexer : this } , src , tokens ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
return true ;
}
return false ;
} ) ) {
continue ;
}
// escape
if ( token = this . tokenizer . escape ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// tag
if ( token = this . tokenizer . tag ( src ) ) {
src = src . substring ( token . raw . length ) ;
lastToken = tokens [ tokens . length - 1 ] ;
if ( lastToken && token . type === 'text' && lastToken . type === 'text' ) {
lastToken . raw += token . raw ;
lastToken . text += token . text ;
}
else {
tokens . push ( token ) ;
}
continue ;
}
// link
if ( token = this . tokenizer . link ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// reflink, nolink
if ( token = this . tokenizer . reflink ( src , this . tokens . links ) ) {
src = src . substring ( token . raw . length ) ;
lastToken = tokens [ tokens . length - 1 ] ;
if ( lastToken && token . type === 'text' && lastToken . type === 'text' ) {
lastToken . raw += token . raw ;
lastToken . text += token . text ;
}
else {
tokens . push ( token ) ;
}
continue ;
}
// em & strong
if ( token = this . tokenizer . emStrong ( src , maskedSrc , prevChar ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// code
if ( token = this . tokenizer . codespan ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// br
if ( token = this . tokenizer . br ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// del (gfm)
if ( token = this . tokenizer . del ( src ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// autolink
if ( token = this . tokenizer . autolink ( src , mangle ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// url (gfm)
if ( ! this . state . inLink && ( token = this . tokenizer . url ( src , mangle ) ) ) {
src = src . substring ( token . raw . length ) ;
tokens . push ( token ) ;
continue ;
}
// text
// prevent inlineText consuming extensions by clipping 'src' to extension start
cutSrc = src ;
if ( this . options . extensions && this . options . extensions . startInline ) {
let startIndex = Infinity ;
const tempSrc = src . slice ( 1 ) ;
let tempStart ;
this . options . extensions . startInline . forEach ( ( getStartIndex ) => {
tempStart = getStartIndex . call ( { lexer : this } , tempSrc ) ;
if ( typeof tempStart === 'number' && tempStart >= 0 ) {
startIndex = Math . min ( startIndex , tempStart ) ;
}
} ) ;
if ( startIndex < Infinity && startIndex >= 0 ) {
cutSrc = src . substring ( 0 , startIndex + 1 ) ;
}
}
if ( token = this . tokenizer . inlineText ( cutSrc , smartypants ) ) {
src = src . substring ( token . raw . length ) ;
if ( token . raw . slice ( - 1 ) !== '_' ) { // Track prevChar before string of ____ started
prevChar = token . raw . slice ( - 1 ) ;
}
keepPrevChar = true ;
lastToken = tokens [ tokens . length - 1 ] ;
if ( lastToken && lastToken . type === 'text' ) {
lastToken . raw += token . raw ;
lastToken . text += token . text ;
}
else {
tokens . push ( token ) ;
}
continue ;
}
if ( src ) {
const errMsg = 'Infinite loop on byte: ' + src . charCodeAt ( 0 ) ;
if ( this . options . silent ) {
console . error ( errMsg ) ;
break ;
}
else {
throw new Error ( errMsg ) ;
}
}
}
return tokens ;
}
}
2019-11-07 12:49:10 -06:00
2023-08-07 16:50:43 -06:00
/ * *
* Renderer
* /
class _Renderer {
options ;
constructor ( options ) {
this . options = options || _defaults ;
}
code ( code , infostring , escaped ) {
2023-08-19 16:55:56 -06:00
const lang = ( infostring || '' ) . match ( /^\S*/ ) ? . [ 0 ] ;
2023-08-07 16:50:43 -06:00
if ( this . options . highlight ) {
const out = this . options . highlight ( code , lang ) ;
if ( out != null && out !== code ) {
escaped = true ;
code = out ;
}
}
code = code . replace ( /\n$/ , '' ) + '\n' ;
if ( ! lang ) {
return '<pre><code>'
+ ( escaped ? code : escape ( code , true ) )
+ '</code></pre>\n' ;
}
return '<pre><code class="'
+ this . options . langPrefix
+ escape ( lang )
+ '">'
+ ( escaped ? code : escape ( code , true ) )
+ '</code></pre>\n' ;
}
blockquote ( quote ) {
return ` <blockquote> \n ${ quote } </blockquote> \n ` ;
}
html ( html , block ) {
return html ;
}
heading ( text , level , raw , slugger ) {
if ( this . options . headerIds ) {
const id = this . options . headerPrefix + slugger . slug ( raw ) ;
return ` <h ${ level } id=" ${ id } "> ${ text } </h ${ level } > \n ` ;
}
// ignore IDs
return ` <h ${ level } > ${ text } </h ${ level } > \n ` ;
}
hr ( ) {
return this . options . xhtml ? '<hr/>\n' : '<hr>\n' ;
}
list ( body , ordered , start ) {
2023-08-19 16:55:56 -06:00
const type = ordered ? 'ol' : 'ul' ;
const startatt = ( ordered && start !== 1 ) ? ( ' start="' + start + '"' ) : '' ;
2023-08-07 16:50:43 -06:00
return '<' + type + startatt + '>\n' + body + '</' + type + '>\n' ;
}
listitem ( text , task , checked ) {
return ` <li> ${ text } </li> \n ` ;
}
checkbox ( checked ) {
return '<input '
+ ( checked ? 'checked="" ' : '' )
+ 'disabled="" type="checkbox"'
+ ( this . options . xhtml ? ' /' : '' )
+ '> ' ;
}
paragraph ( text ) {
return ` <p> ${ text } </p> \n ` ;
}
table ( header , body ) {
if ( body )
body = ` <tbody> ${ body } </tbody> ` ;
return '<table>\n'
+ '<thead>\n'
+ header
+ '</thead>\n'
+ body
+ '</table>\n' ;
}
tablerow ( content ) {
return ` <tr> \n ${ content } </tr> \n ` ;
}
tablecell ( content , flags ) {
const type = flags . header ? 'th' : 'td' ;
const tag = flags . align
? ` < ${ type } align=" ${ flags . align } "> `
: ` < ${ type } > ` ;
return tag + content + ` </ ${ type } > \n ` ;
}
/ * *
* span level renderer
* /
strong ( text ) {
return ` <strong> ${ text } </strong> ` ;
}
em ( text ) {
return ` <em> ${ text } </em> ` ;
}
codespan ( text ) {
return ` <code> ${ text } </code> ` ;
}
br ( ) {
return this . options . xhtml ? '<br/>' : '<br>' ;
}
del ( text ) {
return ` <del> ${ text } </del> ` ;
}
link ( href , title , text ) {
2023-08-19 16:55:56 -06:00
const cleanHref = cleanUrl ( this . options . sanitize , this . options . baseUrl , href ) ;
if ( cleanHref === null ) {
2023-08-07 16:50:43 -06:00
return text ;
}
2023-08-19 16:55:56 -06:00
href = cleanHref ;
2023-08-07 16:50:43 -06:00
let out = '<a href="' + href + '"' ;
if ( title ) {
out += ' title="' + title + '"' ;
}
out += '>' + text + '</a>' ;
return out ;
}
image ( href , title , text ) {
2023-08-19 16:55:56 -06:00
const cleanHref = cleanUrl ( this . options . sanitize , this . options . baseUrl , href ) ;
if ( cleanHref === null ) {
2023-08-07 16:50:43 -06:00
return text ;
}
2023-08-19 16:55:56 -06:00
href = cleanHref ;
2023-08-07 16:50:43 -06:00
let out = ` <img src=" ${ href } " alt=" ${ text } " ` ;
if ( title ) {
out += ` title=" ${ title } " ` ;
}
out += this . options . xhtml ? '/>' : '>' ;
return out ;
}
text ( text ) {
return text ;
}
}
2019-11-07 12:49:10 -06:00
2023-08-07 16:50:43 -06:00
/ * *
* TextRenderer
* returns only the textual part of the token
* /
class _TextRenderer {
// no need for block level renderers
strong ( text ) {
return text ;
2020-04-02 00:23:40 -05:00
}
2023-08-07 16:50:43 -06:00
em ( text ) {
return text ;
2019-11-07 12:49:10 -06:00
}
2023-08-07 16:50:43 -06:00
codespan ( text ) {
return text ;
}
del ( text ) {
return text ;
}
html ( text ) {
return text ;
}
text ( text ) {
return text ;
}
link ( href , title , text ) {
return '' + text ;
}
image ( href , title , text ) {
return '' + text ;
}
br ( ) {
return '' ;
}
}
2019-11-07 12:49:10 -06:00
2023-08-07 16:50:43 -06:00
/ * *
* Slugger generates header id
* /
class _Slugger {
seen ;
constructor ( ) {
this . seen = { } ;
}
serialize ( value ) {
return value
. toLowerCase ( )
. trim ( )
// remove html tags
. replace ( /<[!\/a-z].*?>/ig , '' )
// remove unwanted chars
. replace ( /[\u2000-\u206F\u2E00-\u2E7F\\'!"#$%&()*+,./:;<=>?@[\]^`{|}~]/g , '' )
. replace ( /\s/g , '-' ) ;
}
/ * *
* Finds the next safe ( unique ) slug to use
* /
getNextSafeSlug ( originalSlug , isDryRun ) {
let slug = originalSlug ;
let occurenceAccumulator = 0 ;
if ( this . seen . hasOwnProperty ( slug ) ) {
occurenceAccumulator = this . seen [ originalSlug ] ;
do {
occurenceAccumulator ++ ;
slug = originalSlug + '-' + occurenceAccumulator ;
} while ( this . seen . hasOwnProperty ( slug ) ) ;
}
if ( ! isDryRun ) {
this . seen [ originalSlug ] = occurenceAccumulator ;
this . seen [ slug ] = 0 ;
}
return slug ;
}
/ * *
* Convert string to unique id
* /
slug ( value , options = { } ) {
const slug = this . serialize ( value ) ;
return this . getNextSafeSlug ( slug , options . dryrun ) ;
}
}
2023-07-29 08:31:34 +02:00
2023-08-07 16:50:43 -06:00
/ * *
* Parsing & Compiling
* /
class _Parser {
options ;
renderer ;
textRenderer ;
slugger ;
constructor ( options ) {
this . options = options || _defaults ;
this . options . renderer = this . options . renderer || new _Renderer ( ) ;
this . renderer = this . options . renderer ;
this . renderer . options = this . options ;
this . textRenderer = new _TextRenderer ( ) ;
this . slugger = new _Slugger ( ) ;
}
/ * *
* Static Parse Method
* /
static parse ( tokens , options ) {
const parser = new _Parser ( options ) ;
return parser . parse ( tokens ) ;
}
/ * *
* Static Parse Inline Method
* /
static parseInline ( tokens , options ) {
const parser = new _Parser ( options ) ;
return parser . parseInline ( tokens ) ;
}
/ * *
* Parse Loop
* /
parse ( tokens , top = true ) {
2023-08-19 16:55:56 -06:00
let out = '' ;
for ( let i = 0 ; i < tokens . length ; i ++ ) {
const token = tokens [ i ] ;
2023-08-07 16:50:43 -06:00
// Run any renderer extensions
if ( this . options . extensions && this . options . extensions . renderers && this . options . extensions . renderers [ token . type ] ) {
2023-08-19 16:55:56 -06:00
const genericToken = token ;
const ret = this . options . extensions . renderers [ genericToken . type ] . call ( { parser : this } , genericToken ) ;
if ( ret !== false || ! [ 'space' , 'hr' , 'heading' , 'code' , 'table' , 'blockquote' , 'list' , 'html' , 'paragraph' , 'text' ] . includes ( genericToken . type ) ) {
2023-08-07 16:50:43 -06:00
out += ret || '' ;
continue ;
}
}
switch ( token . type ) {
case 'space' : {
continue ;
}
case 'hr' : {
out += this . renderer . hr ( ) ;
continue ;
}
case 'heading' : {
2023-08-19 16:55:56 -06:00
const headingToken = token ;
out += this . renderer . heading ( this . parseInline ( headingToken . tokens ) , headingToken . depth , unescape ( this . parseInline ( headingToken . tokens , this . textRenderer ) ) , this . slugger ) ;
2023-08-07 16:50:43 -06:00
continue ;
}
case 'code' : {
2023-08-19 16:55:56 -06:00
const codeToken = token ;
out += this . renderer . code ( codeToken . text , codeToken . lang , ! ! codeToken . escaped ) ;
2023-08-07 16:50:43 -06:00
continue ;
}
case 'table' : {
2023-08-19 16:55:56 -06:00
const tableToken = token ;
let header = '' ;
2023-08-07 16:50:43 -06:00
// header
2023-08-19 16:55:56 -06:00
let cell = '' ;
for ( let j = 0 ; j < tableToken . header . length ; j ++ ) {
cell += this . renderer . tablecell ( this . parseInline ( tableToken . header [ j ] . tokens ) , { header : true , align : tableToken . align [ j ] } ) ;
2023-08-07 16:50:43 -06:00
}
header += this . renderer . tablerow ( cell ) ;
2023-08-19 16:55:56 -06:00
let body = '' ;
for ( let j = 0 ; j < tableToken . rows . length ; j ++ ) {
const row = tableToken . rows [ j ] ;
2023-08-07 16:50:43 -06:00
cell = '' ;
2023-08-19 16:55:56 -06:00
for ( let k = 0 ; k < row . length ; k ++ ) {
cell += this . renderer . tablecell ( this . parseInline ( row [ k ] . tokens ) , { header : false , align : tableToken . align [ k ] } ) ;
2023-08-07 16:50:43 -06:00
}
body += this . renderer . tablerow ( cell ) ;
}
out += this . renderer . table ( header , body ) ;
continue ;
}
case 'blockquote' : {
2023-08-19 16:55:56 -06:00
const blockquoteToken = token ;
const body = this . parse ( blockquoteToken . tokens ) ;
2023-08-07 16:50:43 -06:00
out += this . renderer . blockquote ( body ) ;
continue ;
}
case 'list' : {
2023-08-19 16:55:56 -06:00
const listToken = token ;
const ordered = listToken . ordered ;
const start = listToken . start ;
const loose = listToken . loose ;
let body = '' ;
for ( let j = 0 ; j < listToken . items . length ; j ++ ) {
const item = listToken . items [ j ] ;
const checked = item . checked ;
const task = item . task ;
let itemBody = '' ;
2023-08-07 16:50:43 -06:00
if ( item . task ) {
2023-08-19 16:55:56 -06:00
const checkbox = this . renderer . checkbox ( ! ! checked ) ;
2023-08-07 16:50:43 -06:00
if ( loose ) {
if ( item . tokens . length > 0 && item . tokens [ 0 ] . type === 'paragraph' ) {
item . tokens [ 0 ] . text = checkbox + ' ' + item . tokens [ 0 ] . text ;
if ( item . tokens [ 0 ] . tokens && item . tokens [ 0 ] . tokens . length > 0 && item . tokens [ 0 ] . tokens [ 0 ] . type === 'text' ) {
item . tokens [ 0 ] . tokens [ 0 ] . text = checkbox + ' ' + item . tokens [ 0 ] . tokens [ 0 ] . text ;
}
}
else {
item . tokens . unshift ( {
type : 'text' ,
text : checkbox
} ) ;
}
}
else {
itemBody += checkbox ;
}
}
itemBody += this . parse ( item . tokens , loose ) ;
body += this . renderer . listitem ( itemBody , task , ! ! checked ) ;
}
out += this . renderer . list ( body , ordered , start ) ;
continue ;
}
case 'html' : {
2023-08-19 16:55:56 -06:00
const htmlToken = token ;
out += this . renderer . html ( htmlToken . text , htmlToken . block ) ;
2023-08-07 16:50:43 -06:00
continue ;
}
case 'paragraph' : {
2023-08-19 16:55:56 -06:00
const paragraphToken = token ;
out += this . renderer . paragraph ( this . parseInline ( paragraphToken . tokens ) ) ;
2023-08-07 16:50:43 -06:00
continue ;
}
case 'text' : {
2023-08-19 16:55:56 -06:00
let textToken = token ;
let body = textToken . tokens ? this . parseInline ( textToken . tokens ) : textToken . text ;
while ( i + 1 < tokens . length && tokens [ i + 1 ] . type === 'text' ) {
textToken = tokens [ ++ i ] ;
body += '\n' + ( textToken . tokens ? this . parseInline ( textToken . tokens ) : textToken . text ) ;
2023-08-07 16:50:43 -06:00
}
out += top ? this . renderer . paragraph ( body ) : body ;
continue ;
}
default : {
const errMsg = 'Token with "' + token . type + '" type was not found.' ;
if ( this . options . silent ) {
console . error ( errMsg ) ;
return '' ;
}
else {
throw new Error ( errMsg ) ;
}
}
}
}
return out ;
2023-07-29 08:31:34 +02:00
}
2023-08-07 16:50:43 -06:00
/ * *
* Parse Inline Tokens
* /
parseInline ( tokens , renderer ) {
renderer = renderer || this . renderer ;
2023-08-19 16:55:56 -06:00
let out = '' ;
for ( let i = 0 ; i < tokens . length ; i ++ ) {
const token = tokens [ i ] ;
2023-08-07 16:50:43 -06:00
// Run any renderer extensions
if ( this . options . extensions && this . options . extensions . renderers && this . options . extensions . renderers [ token . type ] ) {
2023-08-19 16:55:56 -06:00
const ret = this . options . extensions . renderers [ token . type ] . call ( { parser : this } , token ) ;
2023-08-07 16:50:43 -06:00
if ( ret !== false || ! [ 'escape' , 'html' , 'link' , 'image' , 'strong' , 'em' , 'codespan' , 'br' , 'del' , 'text' ] . includes ( token . type ) ) {
out += ret || '' ;
continue ;
}
}
switch ( token . type ) {
case 'escape' : {
2023-08-19 16:55:56 -06:00
const escapeToken = token ;
out += renderer . text ( escapeToken . text ) ;
2023-08-07 16:50:43 -06:00
break ;
}
case 'html' : {
2023-08-19 16:55:56 -06:00
const tagToken = token ;
out += renderer . html ( tagToken . text ) ;
2023-08-07 16:50:43 -06:00
break ;
}
case 'link' : {
2023-08-19 16:55:56 -06:00
const linkToken = token ;
out += renderer . link ( linkToken . href , linkToken . title , this . parseInline ( linkToken . tokens , renderer ) ) ;
2023-08-07 16:50:43 -06:00
break ;
}
case 'image' : {
2023-08-19 16:55:56 -06:00
const imageToken = token ;
out += renderer . image ( imageToken . href , imageToken . title , imageToken . text ) ;
2023-08-07 16:50:43 -06:00
break ;
}
case 'strong' : {
2023-08-19 16:55:56 -06:00
const strongToken = token ;
out += renderer . strong ( this . parseInline ( strongToken . tokens , renderer ) ) ;
2023-08-07 16:50:43 -06:00
break ;
}
case 'em' : {
2023-08-19 16:55:56 -06:00
const emToken = token ;
out += renderer . em ( this . parseInline ( emToken . tokens , renderer ) ) ;
2023-08-07 16:50:43 -06:00
break ;
}
case 'codespan' : {
2023-08-19 16:55:56 -06:00
const codespanToken = token ;
out += renderer . codespan ( codespanToken . text ) ;
2023-08-07 16:50:43 -06:00
break ;
}
case 'br' : {
out += renderer . br ( ) ;
break ;
}
case 'del' : {
2023-08-19 16:55:56 -06:00
const delToken = token ;
out += renderer . del ( this . parseInline ( delToken . tokens , renderer ) ) ;
2023-08-07 16:50:43 -06:00
break ;
}
case 'text' : {
2023-08-19 16:55:56 -06:00
const textToken = token ;
out += renderer . text ( textToken . text ) ;
2023-08-07 16:50:43 -06:00
break ;
}
default : {
const errMsg = 'Token with "' + token . type + '" type was not found.' ;
if ( this . options . silent ) {
console . error ( errMsg ) ;
return '' ;
}
else {
throw new Error ( errMsg ) ;
}
}
}
2023-03-22 05:52:21 +00:00
}
2023-08-07 16:50:43 -06:00
return out ;
2023-07-29 08:31:34 +02:00
}
2023-08-07 16:50:43 -06:00
}
class _Hooks {
options ;
constructor ( options ) {
this . options = options || _defaults ;
}
static passThroughHooks = new Set ( [
'preprocess' ,
'postprocess'
] ) ;
/ * *
* Process markdown before marked
* /
preprocess ( markdown ) {
return markdown ;
}
/ * *
* Process HTML after marked is finished
* /
postprocess ( html ) {
return html ;
2023-07-29 08:31:34 +02:00
}
2023-08-07 16:50:43 -06:00
}
class Marked {
defaults = _getDefaults ( ) ;
options = this . setOptions ;
parse = this . # parseMarkdown ( _Lexer . lex , _Parser . parse ) ;
parseInline = this . # parseMarkdown ( _Lexer . lexInline , _Parser . parseInline ) ;
Parser = _Parser ;
parser = _Parser . parse ;
Renderer = _Renderer ;
TextRenderer = _TextRenderer ;
Lexer = _Lexer ;
lexer = _Lexer . lex ;
Tokenizer = _Tokenizer ;
Slugger = _Slugger ;
Hooks = _Hooks ;
constructor ( ... args ) {
this . use ( ... args ) ;
}
/ * *
* Run callback for every token
* /
walkTokens ( tokens , callback ) {
let values = [ ] ;
for ( const token of tokens ) {
values = values . concat ( callback . call ( this , token ) ) ;
switch ( token . type ) {
case 'table' : {
2023-08-19 16:55:56 -06:00
const tableToken = token ;
for ( const cell of tableToken . header ) {
2023-08-07 16:50:43 -06:00
values = values . concat ( this . walkTokens ( cell . tokens , callback ) ) ;
}
2023-08-19 16:55:56 -06:00
for ( const row of tableToken . rows ) {
2023-08-07 16:50:43 -06:00
for ( const cell of row ) {
values = values . concat ( this . walkTokens ( cell . tokens , callback ) ) ;
}
}
break ;
}
case 'list' : {
2023-08-19 16:55:56 -06:00
const listToken = token ;
values = values . concat ( this . walkTokens ( listToken . items , callback ) ) ;
2023-08-07 16:50:43 -06:00
break ;
}
default : {
2023-08-19 16:55:56 -06:00
const genericToken = token ;
if ( this . defaults . extensions ? . childTokens ? . [ genericToken . type ] ) {
this . defaults . extensions . childTokens [ genericToken . type ] . forEach ( ( childTokens ) => {
values = values . concat ( this . walkTokens ( genericToken [ childTokens ] , callback ) ) ;
2023-08-07 16:50:43 -06:00
} ) ;
}
2023-08-19 16:55:56 -06:00
else if ( genericToken . tokens ) {
values = values . concat ( this . walkTokens ( genericToken . tokens , callback ) ) ;
2023-08-07 16:50:43 -06:00
}
}
}
}
return values ;
}
use ( ... args ) {
const extensions = this . defaults . extensions || { renderers : { } , childTokens : { } } ;
args . forEach ( ( pack ) => {
// copy options to new object
const opts = { ... pack } ;
// set async to true if it was set to true before
opts . async = this . defaults . async || opts . async || false ;
// ==-- Parse "addon" extensions --== //
if ( pack . extensions ) {
pack . extensions . forEach ( ( ext ) => {
if ( ! ext . name ) {
throw new Error ( 'extension name required' ) ;
}
if ( 'renderer' in ext ) { // Renderer extensions
const prevRenderer = extensions . renderers [ ext . name ] ;
if ( prevRenderer ) {
// Replace extension with func to run new extension but fall back if false
extensions . renderers [ ext . name ] = function ( ... args ) {
let ret = ext . renderer . apply ( this , args ) ;
if ( ret === false ) {
ret = prevRenderer . apply ( this , args ) ;
}
return ret ;
} ;
}
else {
extensions . renderers [ ext . name ] = ext . renderer ;
}
}
if ( 'tokenizer' in ext ) { // Tokenizer Extensions
if ( ! ext . level || ( ext . level !== 'block' && ext . level !== 'inline' ) ) {
throw new Error ( "extension level must be 'block' or 'inline'" ) ;
}
2023-08-19 16:55:56 -06:00
const extLevel = extensions [ ext . level ] ;
if ( extLevel ) {
extLevel . unshift ( ext . tokenizer ) ;
2023-08-07 16:50:43 -06:00
}
else {
extensions [ ext . level ] = [ ext . tokenizer ] ;
}
if ( ext . start ) { // Function to check for start of token
if ( ext . level === 'block' ) {
if ( extensions . startBlock ) {
extensions . startBlock . push ( ext . start ) ;
}
else {
extensions . startBlock = [ ext . start ] ;
}
}
else if ( ext . level === 'inline' ) {
if ( extensions . startInline ) {
extensions . startInline . push ( ext . start ) ;
}
else {
extensions . startInline = [ ext . start ] ;
}
}
}
}
if ( 'childTokens' in ext && ext . childTokens ) { // Child tokens to be visited by walkTokens
extensions . childTokens [ ext . name ] = ext . childTokens ;
}
} ) ;
opts . extensions = extensions ;
}
// ==-- Parse "overwrite" extensions --== //
if ( pack . renderer ) {
const renderer = this . defaults . renderer || new _Renderer ( this . defaults ) ;
for ( const prop in pack . renderer ) {
2023-08-09 23:33:46 -06:00
const rendererFunc = pack . renderer [ prop ] ;
const rendererKey = prop ;
const prevRenderer = renderer [ rendererKey ] ;
2023-08-07 16:50:43 -06:00
// Replace renderer with func to run extension, but fall back if false
2023-08-09 23:33:46 -06:00
renderer [ rendererKey ] = ( ... args ) => {
let ret = rendererFunc . apply ( renderer , args ) ;
2023-08-07 16:50:43 -06:00
if ( ret === false ) {
ret = prevRenderer . apply ( renderer , args ) ;
}
2023-08-09 23:33:46 -06:00
return ret || '' ;
2023-08-07 16:50:43 -06:00
} ;
}
opts . renderer = renderer ;
}
if ( pack . tokenizer ) {
const tokenizer = this . defaults . tokenizer || new _Tokenizer ( this . defaults ) ;
for ( const prop in pack . tokenizer ) {
2023-08-09 23:33:46 -06:00
const tokenizerFunc = pack . tokenizer [ prop ] ;
const tokenizerKey = prop ;
const prevTokenizer = tokenizer [ tokenizerKey ] ;
2023-08-07 16:50:43 -06:00
// Replace tokenizer with func to run extension, but fall back if false
2023-08-09 23:33:46 -06:00
tokenizer [ tokenizerKey ] = ( ... args ) => {
let ret = tokenizerFunc . apply ( tokenizer , args ) ;
2023-08-07 16:50:43 -06:00
if ( ret === false ) {
ret = prevTokenizer . apply ( tokenizer , args ) ;
}
return ret ;
} ;
}
opts . tokenizer = tokenizer ;
}
// ==-- Parse Hooks extensions --== //
if ( pack . hooks ) {
const hooks = this . defaults . hooks || new _Hooks ( ) ;
for ( const prop in pack . hooks ) {
2023-08-09 23:33:46 -06:00
const hooksFunc = pack . hooks [ prop ] ;
const hooksKey = prop ;
const prevHook = hooks [ hooksKey ] ;
2023-08-07 16:50:43 -06:00
if ( _Hooks . passThroughHooks . has ( prop ) ) {
2023-08-09 23:33:46 -06:00
hooks [ hooksKey ] = ( arg ) => {
2023-08-07 16:50:43 -06:00
if ( this . defaults . async ) {
2023-08-09 23:33:46 -06:00
return Promise . resolve ( hooksFunc . call ( hooks , arg ) ) . then ( ret => {
2023-08-07 16:50:43 -06:00
return prevHook . call ( hooks , ret ) ;
} ) ;
}
2023-08-09 23:33:46 -06:00
const ret = hooksFunc . call ( hooks , arg ) ;
2023-08-07 16:50:43 -06:00
return prevHook . call ( hooks , ret ) ;
} ;
}
else {
2023-08-09 23:33:46 -06:00
hooks [ hooksKey ] = ( ... args ) => {
let ret = hooksFunc . apply ( hooks , args ) ;
2023-08-07 16:50:43 -06:00
if ( ret === false ) {
ret = prevHook . apply ( hooks , args ) ;
}
return ret ;
} ;
}
}
opts . hooks = hooks ;
}
// ==-- Parse WalkTokens extensions --== //
if ( pack . walkTokens ) {
const walkTokens = this . defaults . walkTokens ;
2023-08-19 16:55:56 -06:00
const packWalktokens = pack . walkTokens ;
2023-08-07 16:50:43 -06:00
opts . walkTokens = function ( token ) {
let values = [ ] ;
2023-08-19 16:55:56 -06:00
values . push ( packWalktokens . call ( this , token ) ) ;
2023-08-07 16:50:43 -06:00
if ( walkTokens ) {
values = values . concat ( walkTokens . call ( this , token ) ) ;
}
return values ;
} ;
}
this . defaults = { ... this . defaults , ... opts } ;
} ) ;
return this ;
2023-07-29 08:31:34 +02:00
}
2023-08-07 16:50:43 -06:00
setOptions ( opt ) {
this . defaults = { ... this . defaults , ... opt } ;
return this ;
2023-07-29 08:31:34 +02:00
}
2023-08-07 16:50:43 -06:00
# parseMarkdown ( lexer , parser ) {
return ( src , optOrCallback , callback ) => {
if ( typeof optOrCallback === 'function' ) {
callback = optOrCallback ;
optOrCallback = null ;
}
const origOpt = { ... optOrCallback } ;
const opt = { ... this . defaults , ... origOpt } ;
const throwError = this . # onError ( ! ! opt . silent , ! ! opt . async , callback ) ;
// throw error in case of non string input
if ( typeof src === 'undefined' || src === null ) {
return throwError ( new Error ( 'marked(): input parameter is undefined or null' ) ) ;
}
if ( typeof src !== 'string' ) {
return throwError ( new Error ( 'marked(): input parameter is of type '
+ Object . prototype . toString . call ( src ) + ', string expected' ) ) ;
}
checkDeprecations ( opt , callback ) ;
if ( opt . hooks ) {
opt . hooks . options = opt ;
}
if ( callback ) {
2023-08-19 16:55:56 -06:00
const resultCallback = callback ;
2023-08-07 16:50:43 -06:00
const highlight = opt . highlight ;
let tokens ;
try {
if ( opt . hooks ) {
src = opt . hooks . preprocess ( src ) ;
}
tokens = lexer ( src , opt ) ;
}
catch ( e ) {
return throwError ( e ) ;
}
const done = ( err ) => {
let out ;
if ( ! err ) {
try {
if ( opt . walkTokens ) {
this . walkTokens ( tokens , opt . walkTokens ) ;
}
out = parser ( tokens , opt ) ;
if ( opt . hooks ) {
out = opt . hooks . postprocess ( out ) ;
}
}
catch ( e ) {
err = e ;
}
}
opt . highlight = highlight ;
return err
? throwError ( err )
2023-08-19 16:55:56 -06:00
: resultCallback ( null , out ) ;
2023-08-07 16:50:43 -06:00
} ;
if ( ! highlight || highlight . length < 3 ) {
return done ( ) ;
}
delete opt . highlight ;
if ( ! tokens . length )
return done ( ) ;
let pending = 0 ;
this . walkTokens ( tokens , ( token ) => {
if ( token . type === 'code' ) {
pending ++ ;
setTimeout ( ( ) => {
highlight ( token . text , token . lang , ( err , code ) => {
if ( err ) {
return done ( err ) ;
}
if ( code != null && code !== token . text ) {
token . text = code ;
token . escaped = true ;
}
pending -- ;
if ( pending === 0 ) {
done ( ) ;
}
} ) ;
} , 0 ) ;
}
} ) ;
if ( pending === 0 ) {
done ( ) ;
}
return ;
}
if ( opt . async ) {
return Promise . resolve ( opt . hooks ? opt . hooks . preprocess ( src ) : src )
. then ( src => lexer ( src , opt ) )
. then ( tokens => opt . walkTokens ? Promise . all ( this . walkTokens ( tokens , opt . walkTokens ) ) . then ( ( ) => tokens ) : tokens )
. then ( tokens => parser ( tokens , opt ) )
. then ( html => opt . hooks ? opt . hooks . postprocess ( html ) : html )
. catch ( throwError ) ;
}
try {
if ( opt . hooks ) {
src = opt . hooks . preprocess ( src ) ;
}
const tokens = lexer ( src , opt ) ;
if ( opt . walkTokens ) {
this . walkTokens ( tokens , opt . walkTokens ) ;
}
let html = parser ( tokens , opt ) ;
if ( opt . hooks ) {
html = opt . hooks . postprocess ( html ) ;
}
return html ;
}
catch ( e ) {
return throwError ( e ) ;
}
} ;
2023-07-29 08:31:34 +02:00
}
2023-08-07 16:50:43 -06:00
# onError ( silent , async , callback ) {
return ( e ) => {
e . message += '\nPlease report this to https://github.com/markedjs/marked.' ;
if ( silent ) {
const msg = '<p>An error occurred:</p><pre>'
+ escape ( e . message + '' , true )
+ '</pre>' ;
if ( async ) {
return Promise . resolve ( msg ) ;
}
if ( callback ) {
callback ( null , msg ) ;
return ;
}
return msg ;
}
if ( async ) {
return Promise . reject ( e ) ;
}
if ( callback ) {
callback ( e ) ;
return ;
}
throw e ;
} ;
}
}
2023-06-10 03:13:30 +00:00
2023-08-07 16:50:43 -06:00
const markedInstance = new Marked ( ) ;
2023-03-22 05:52:21 +00:00
function marked ( src , opt , callback ) {
2023-08-07 16:50:43 -06:00
return markedInstance . parse ( src , opt , callback ) ;
2019-11-07 12:49:10 -06:00
}
2023-08-07 16:50:43 -06:00
/ * *
* Sets the default options .
*
* @ param options Hash of options
* /
marked . options =
marked . setOptions = function ( options ) {
markedInstance . setOptions ( options ) ;
marked . defaults = markedInstance . defaults ;
changeDefaults ( marked . defaults ) ;
return marked ;
} ;
/ * *
* Gets the original marked default options .
* /
2023-07-29 08:31:34 +02:00
marked . getDefaults = _getDefaults ;
marked . defaults = _defaults ;
2023-08-07 16:50:43 -06:00
/ * *
* Use Extension
* /
marked . use = function ( ... args ) {
markedInstance . use ( ... args ) ;
marked . defaults = markedInstance . defaults ;
changeDefaults ( marked . defaults ) ;
return marked ;
2020-04-20 18:30:22 +00:00
} ;
2023-08-07 16:50:43 -06:00
/ * *
* Run callback for every token
* /
marked . walkTokens = function ( tokens , callback ) {
return markedInstance . walkTokens ( tokens , callback ) ;
2020-05-14 15:54:39 +00:00
} ;
2023-08-07 16:50:43 -06:00
/ * *
* Compiles markdown to HTML without enclosing ` p ` tag .
*
* @ param src String of markdown source to be compiled
* @ param options Hash of options
* @ return String of compiled HTML
* /
2023-06-10 03:13:30 +00:00
marked . parseInline = markedInstance . parseInline ;
2023-08-07 16:50:43 -06:00
/ * *
* Expose
* /
2023-07-29 08:31:34 +02:00
marked . Parser = _Parser ;
marked . parser = _Parser . parse ;
marked . Renderer = _Renderer ;
marked . TextRenderer = _TextRenderer ;
2023-08-07 16:50:43 -06:00
marked . Lexer = _Lexer ;
marked . lexer = _Lexer . lex ;
2023-07-29 08:31:34 +02:00
marked . Tokenizer = _Tokenizer ;
marked . Slugger = _Slugger ;
marked . Hooks = _Hooks ;
2021-11-02 07:32:17 -07:00
marked . parse = marked ;
2023-08-07 16:50:43 -06:00
const options = marked . options ;
const setOptions = marked . setOptions ;
const use = marked . use ;
const walkTokens = marked . walkTokens ;
const parseInline = marked . parseInline ;
const parse = marked ;
const parser = _Parser . parse ;
const lexer = _Lexer . lex ;
export { _Hooks as Hooks , _Lexer as Lexer , Marked , _Parser as Parser , _Renderer as Renderer , _Slugger as Slugger , _TextRenderer as TextRenderer , _Tokenizer as Tokenizer , _defaults as defaults , _getDefaults as getDefaults , lexer , marked , options , parse , parseInline , parser , setOptions , use , walkTokens } ;
//# sourceMappingURL=marked.esm.js.map