Merge branch 'text_blocks'

This commit is contained in:
Christopher Jeffrey 2011-10-04 19:39:56 -05:00
commit 783eede3ed

View File

@ -22,14 +22,14 @@ var block = {
};
block.keys = [
'newline',
'block',
'heading',
'lheading',
'hr',
'blockquote',
'list',
'html',
'newline',
'block',
'heading',
'lheading',
'hr',
'blockquote',
'list',
'html',
'text'
];
@ -41,14 +41,11 @@ block.lexer = function(str) {
var tokens = []
, links = {};
// normalize whitespace
str = str.replace(/\r\n/g, '\n')
.replace(/\r/g, '\n')
str = str.replace(/\r\n|\r/g, '\n')
.replace(/\t/g, ' ');
// grab link definitons
str = str.replace(
/^ {0,3}\[([^\]]+)\]: *([^ ]+)(?: +"([^\n]+)")? *$/gm,
/^ {0,3}\[([^\]]+)\]: *([^ ]+)(?: +"([^\n]+)")? *$/gm,
function(__, id, href, title) {
links[id] = {
href: href,
@ -100,22 +97,22 @@ block.token = function(str, tokens) {
break;
case 'lheading':
tokens.push({
type: 'heading',
depth: cap[2] === '=' ? 1 : 2,
type: 'heading',
depth: cap[2] === '=' ? 1 : 2,
text: cap[1]
});
break;
case 'heading':
tokens.push({
type: 'heading',
depth: cap[1].length,
type: 'heading',
depth: cap[1].length,
text: cap[2]
});
break;
case 'block':
cap = cap[0].replace(/^ {4}/gm, '');
tokens.push({
type: 'block',
type: 'block',
text: cap
});
break;
@ -130,10 +127,10 @@ block.token = function(str, tokens) {
/^( *)([*+-]|\d+\.)[^\n]*(?:\n(?!\1(?:\2|\d+\.))[^\n]*)*/gm
);
each(cap, function(item) {
// remove the list items sigil
// remove the list items sigil
// so its seen as the next token
item = item.replace(/^ *([*+-]|\d+\.) */, '');
// outdent whatever the
// outdent whatever the
// list item contains, hacky
var space = /\n( +)/.exec(item);
if (space) {
@ -141,7 +138,7 @@ block.token = function(str, tokens) {
item = item.replace(space, '');
}
tokens.push({
type: loose
type: loose
? 'loose_item_start'
: 'list_item_start'
});
@ -157,7 +154,7 @@ block.token = function(str, tokens) {
case 'html':
case 'text':
tokens.push({
type: key,
type: key,
text: cap[0]
});
break;
@ -197,13 +194,13 @@ var inline = {
inline.keys = [
'escape',
'autolink',
'tag',
'link',
'reflink',
'autolink',
'tag',
'link',
'reflink',
'nolink',
'strong',
'em',
'strong',
'em',
'code',
'br',
'text'
@ -226,7 +223,7 @@ inline.text = (function() {
('code')
('br');
return new
return new
RegExp('^[^\\0]+?(?=' + body.join('|') + '|$)');
})();
@ -260,7 +257,7 @@ inline.lexer = function(str) {
while (scan()) {
switch (key) {
case 'escape':
case 'escape':
out += cap[1];
break;
case 'tag':
@ -285,28 +282,28 @@ inline.lexer = function(str) {
};
}
if (cap[0][0] !== '!') {
out += '<a href="'
+ escape(link.href)
+ '"'
out += '<a href="'
+ escape(link.href)
+ '"'
+ (link.title
? ' title="'
? ' title="'
+ escape(link.title)
+ '"'
: '')
+ '"'
: '')
+ '>'
+ inline.lexer(cap[1])
+ '</a>';
} else {
out += '<img src="'
out += '<img src="'
+ escape(link.href)
+ '" alt="'
+ '" alt="'
+ escape(cap[1])
+ '"'
+ '"'
+ (link.title
? ' title="'
? ' title="'
+ escape(link.title)
+ '"'
: '')
+ '"'
: '')
+ '>';
}
break;
@ -325,18 +322,18 @@ inline.lexer = function(str) {
+ '</a>';
break;
case 'strong':
out += '<strong>'
+ inline.lexer(cap[2] || cap[1])
out += '<strong>'
+ inline.lexer(cap[2] || cap[1])
+ '</strong>';
break;
case 'em':
out += '<em>'
+ inline.lexer(cap[2] || cap[1])
out += '<em>'
+ inline.lexer(cap[2] || cap[1])
+ '</em>';
break;
case 'code':
out += '<code>'
+ escape(cap[2] || cap[1])
out += '<code>'
+ escape(cap[2] || cap[1])
+ '</code>';
break;
case 'br':
@ -368,64 +365,76 @@ var tok = function() {
switch (token.type) {
case 'space':
return '';
case 'hr':
case 'hr':
return '<hr>';
case 'heading':
return '<h' + token.depth + '>'
case 'heading':
return '<h' + token.depth + '>'
+ inline.lexer(token.text)
+ '</h' + token.depth + '>';
case 'block':
return '<pre><code>'
case 'block':
return '<pre><code>'
+ escape(token.text)
+ '</code></pre>';
case 'blockquote_start':
case 'blockquote_start':
var body = [];
while (next().type !== 'blockquote_end') {
body.push(tok());
}
return '<blockquote>'
+ body.join('')
return '<blockquote>'
+ body.join('')
+ '</blockquote>';
case 'list_start':
var body = []
, type = token.ordered ? 'ol' : 'ul';
var type = token.ordered ? 'ol' : 'ul'
, body = [];
while (next().type !== 'list_end') {
body.push(tok());
}
return '<' + type + '>'
+ body.join('')
return '<' + type + '>'
+ body.join('')
+ '</' + type + '>';
case 'list_item_start':
case 'list_item_start':
var body = [];
while (next().type !== 'list_item_end') {
body.push(token.type === 'text'
? inline.lexer(token.text)
? text()
: tok());
}
return '<li>'
+ body.join(' ')
return '<li>'
+ body.join(' ')
+ '</li>';
case 'loose_item_start':
case 'loose_item_start':
var body = [];
while (next().type !== 'list_item_end') {
body.push(tok());
}
return '<li>'
+ body.join(' ')
return '<li>'
+ body.join(' ')
+ '</li>';
case 'html':
return inline.lexer(token.text);
case 'text':
var body = [ token.text ]
, top;
while ((top = tokens[tokens.length-1])
&& top.type === 'text') {
body.push(next().text);
}
return '<p>'
+ inline.lexer(body.join('\n'))
+ '</p>';
case 'text':
return '<p>' + text() + '</p>';
}
};
var text = function() {
var body = [ token.text ]
, top;
while ((top = tokens[tokens.length-1])
&& top.type === 'text') {
body.push(next().text);
}
return inline.lexer(body.join('\n'));
};
var parse = function(src) {