From 5196683b814b7269d2513160370a1d94e0b3ebdb Mon Sep 17 00:00:00 2001 From: win Date: Wed, 6 Mar 2024 15:55:30 -0800 Subject: [PATCH] Create Markdown.Converter.js --- docs/Markdown.Converter.js | 1519 ++++++++++++++++++++++++++++++++++++ 1 file changed, 1519 insertions(+) create mode 100644 docs/Markdown.Converter.js diff --git a/docs/Markdown.Converter.js b/docs/Markdown.Converter.js new file mode 100644 index 000000000..997a6ce90 --- /dev/null +++ b/docs/Markdown.Converter.js @@ -0,0 +1,1519 @@ +"use strict"; +var Markdown; + +if (typeof exports === "object" && typeof require === "function") // we're in a CommonJS (e.g. Node.js) module + Markdown = exports; +else + Markdown = {}; + +// The following text is included for historical reasons, but should +// be taken with a pinch of salt; it's not all true anymore. + +// +// Wherever possible, Showdown is a straight, line-by-line port +// of the Perl version of Markdown. +// +// This is not a normal parser design; it's basically just a +// series of string substitutions. It's hard to read and +// maintain this way, but keeping Showdown close to the original +// design makes it easier to port new features. +// +// More importantly, Showdown behaves like markdown.pl in most +// edge cases. So web applications can do client-side preview +// in Javascript, and then build identical HTML on the server. +// +// This port needs the new RegExp functionality of ECMA 262, +// 3rd Edition (i.e. Javascript 1.5). Most modern web browsers +// should do fine. Even with the new regular expression features, +// We do a lot of work to emulate Perl's regex functionality. +// The tricky changes in this file mostly have the "attacklab:" +// label. Major or self-explanatory changes don't. +// +// Smart diff tools like Araxis Merge will be able to match up +// this file with markdown.pl in a useful way. A little tweaking +// helps: in a copy of markdown.pl, replace "#" with "//" and +// replace "$text" with "text". Be sure to ignore whitespace +// and line endings. +// + + +// +// Usage: +// +// var text = "Markdown *rocks*."; +// +// var converter = new Markdown.Converter(); +// var html = converter.makeHtml(text); +// +// alert(html); +// +// Note: move the sample code to the bottom of this +// file before uncommenting it. +// + +(function () { + + function identity(x) { return x; } + function returnFalse(x) { return false; } + + function HookCollection() { } + + HookCollection.prototype = { + + chain: function (hookname, func) { + var original = this[hookname]; + if (!original) + throw new Error("unknown hook " + hookname); + + if (original === identity) + this[hookname] = func; + else + this[hookname] = function (text) { + var args = Array.prototype.slice.call(arguments, 0); + args[0] = original.apply(null, args); + return func.apply(null, args); + }; + }, + set: function (hookname, func) { + if (!this[hookname]) + throw new Error("unknown hook " + hookname); + this[hookname] = func; + }, + addNoop: function (hookname) { + this[hookname] = identity; + }, + addFalse: function (hookname) { + this[hookname] = returnFalse; + } + }; + + Markdown.HookCollection = HookCollection; + + // g_urls and g_titles allow arbitrary user-entered strings as keys. This + // caused an exception (and hence stopped the rendering) when the user entered + // e.g. [push] or [__proto__]. Adding a prefix to the actual key prevents this + // (since no builtin property starts with "s_"). See + // http://meta.stackexchange.com/questions/64655/strange-wmd-bug + // (granted, switching from Array() to Object() alone would have left only __proto__ + // to be a problem) + function SaveHash() { } + SaveHash.prototype = { + set: function (key, value) { + this["s_" + key] = value; + }, + get: function (key) { + return this["s_" + key]; + } + }; + + Markdown.Converter = function (OPTIONS) { + var pluginHooks = this.hooks = new HookCollection(); + + // given a URL that was encountered by itself (without markup), should return the link text that's to be given to this link + pluginHooks.addNoop("plainLinkText"); + + // called with the orignal text as given to makeHtml. The result of this plugin hook is the actual markdown source that will be cooked + pluginHooks.addNoop("preConversion"); + + // called with the text once all normalizations have been completed (tabs to spaces, line endings, etc.), but before any conversions have + pluginHooks.addNoop("postNormalization"); + + // Called with the text before / after creating block elements like code blocks and lists. Note that this is called recursively + // with inner content, e.g. it's called with the full text, and then only with the content of a blockquote. The inner + // call will receive outdented text. + pluginHooks.addNoop("preBlockGamut"); + pluginHooks.addNoop("postBlockGamut"); + + // called with the text of a single block element before / after the span-level conversions (bold, code spans, etc.) have been made + pluginHooks.addNoop("preSpanGamut"); + pluginHooks.addNoop("postSpanGamut"); + + // called with the final cooked HTML code. The result of this plugin hook is the actual output of makeHtml + pluginHooks.addNoop("postConversion"); + + // + // Private state of the converter instance: + // + + // Global hashes, used by various utility routines + var g_urls; + var g_titles; + var g_html_blocks; + + // Used to track when we're inside an ordered or unordered list + // (see _ProcessListItems() for details): + var g_list_level; + + OPTIONS = OPTIONS || {}; + var asciify = identity, deasciify = identity; + if (OPTIONS.nonAsciiLetters) { + + /* In JavaScript regular expressions, \w only denotes [a-zA-Z0-9_]. + * That's why there's inconsistent handling e.g. with intra-word bolding + * of Japanese words. That's why we do the following if OPTIONS.nonAsciiLetters + * is true: + * + * Before doing bold and italics, we find every instance + * of a unicode word character in the Markdown source that is not + * matched by \w, and the letter "Q". We take the character's code point + * and encode it in base 51, using the "digits" + * + * A, B, ..., P, R, ..., Y, Z, a, b, ..., y, z + * + * delimiting it with "Q" on both sides. For example, the source + * + * > In Chinese, the smurfs are called 藍精靈, meaning "blue spirits". + * + * turns into + * + * > In Chinese, the smurfs are called QNIhQQMOIQQOuUQ, meaning "blue spirits". + * + * Since everything that is a letter in Unicode is now a letter (or + * several letters) in ASCII, \w and \b should always do the right thing. + * + * After the bold/italic conversion, we decode again; since "Q" was encoded + * alongside all non-ascii characters (as "QBfQ"), and the conversion + * will not generate "Q", the only instances of that letter should be our + * encoded characters. And since the conversion will not break words, the + * "Q...Q" should all still be in one piece. + * + * We're using "Q" as the delimiter because it's probably one of the + * rarest characters, and also because I can't think of any special behavior + * that would ever be triggered by this letter (to use a silly example, if we + * delimited with "H" on the left and "P" on the right, then "Ψ" would be + * encoded as "HTTP", which may cause special behavior). The latter would not + * actually be a huge issue for bold/italic, but may be if we later use it + * in other places as well. + * */ + (function () { + var lettersThatJavaScriptDoesNotKnowAndQ = /[Q\u00aa\u00b5\u00ba\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376-\u0377\u037a-\u037d\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u0523\u0531-\u0556\u0559\u0561-\u0587\u05d0-\u05ea\u05f0-\u05f2\u0621-\u064a\u0660-\u0669\u066e-\u066f\u0671-\u06d3\u06d5\u06e5-\u06e6\u06ee-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07c0-\u07ea\u07f4-\u07f5\u07fa\u0904-\u0939\u093d\u0950\u0958-\u0961\u0966-\u096f\u0971-\u0972\u097b-\u097f\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09e6-\u09f1\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a66-\u0a6f\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0ae6-\u0aef\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b66-\u0b6f\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0be6-\u0bef\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d\u0c58-\u0c59\u0c60-\u0c61\u0c66-\u0c6f\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0ce6-\u0cef\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d28\u0d2a-\u0d39\u0d3d\u0d60-\u0d61\u0d66-\u0d6f\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32-\u0e33\u0e40-\u0e46\u0e50-\u0e59\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2-\u0eb3\u0ebd\u0ec0-\u0ec4\u0ec6\u0ed0-\u0ed9\u0edc-\u0edd\u0f00\u0f20-\u0f29\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8b\u1000-\u102a\u103f-\u1049\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u1090-\u1099\u10a0-\u10c5\u10d0-\u10fa\u10fc\u1100-\u1159\u115f-\u11a2\u11a8-\u11f9\u1200-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f4\u1401-\u166c\u166f-\u1676\u1681-\u169a\u16a0-\u16ea\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u17e0-\u17e9\u1810-\u1819\u1820-\u1877\u1880-\u18a8\u18aa\u1900-\u191c\u1946-\u196d\u1970-\u1974\u1980-\u19a9\u19c1-\u19c7\u19d0-\u19d9\u1a00-\u1a16\u1b05-\u1b33\u1b45-\u1b4b\u1b50-\u1b59\u1b83-\u1ba0\u1bae-\u1bb9\u1c00-\u1c23\u1c40-\u1c49\u1c4d-\u1c7d\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u203f-\u2040\u2054\u2071\u207f\u2090-\u2094\u2102\u2107\u210a-\u2113\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u212f-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2183-\u2184\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2c6f\u2c71-\u2c7d\u2c80-\u2ce4\u2d00-\u2d25\u2d30-\u2d65\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2e2f\u3005-\u3006\u3031-\u3035\u303b-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31b7\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fc3\ua000-\ua48c\ua500-\ua60c\ua610-\ua62b\ua640-\ua65f\ua662-\ua66e\ua67f-\ua697\ua717-\ua71f\ua722-\ua788\ua78b-\ua78c\ua7fb-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8d0-\ua8d9\ua900-\ua925\ua930-\ua946\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa50-\uaa59\uac00-\ud7a3\uf900-\ufa2d\ufa30-\ufa6a\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe33-\ufe34\ufe4d-\ufe4f\ufe70-\ufe74\ufe76-\ufefc\uff10-\uff19\uff21-\uff3a\uff3f\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc]/g; + var cp_Q = "Q".charCodeAt(0); + var cp_A = "A".charCodeAt(0); + var cp_Z = "Z".charCodeAt(0); + var dist_Za = "a".charCodeAt(0) - cp_Z - 1; + + asciify = function(text) { + return text.replace(lettersThatJavaScriptDoesNotKnowAndQ, function (m) { + var c = m.charCodeAt(0); + var s = ""; + var v; + while (c > 0) { + v = (c % 51) + cp_A; + if (v >= cp_Q) + v++; + if (v > cp_Z) + v += dist_Za; + s = String.fromCharCode(v) + s; + c = c / 51 | 0; + } + return "Q" + s + "Q"; + }) + }; + + deasciify = function(text) { + return text.replace(/Q([A-PR-Za-z]{1,3})Q/g, function (m, s) { + var c = 0; + var v; + for (var i = 0; i < s.length; i++) { + v = s.charCodeAt(i); + if (v > cp_Z) + v -= dist_Za; + if (v > cp_Q) + v--; + v -= cp_A; + c = (c * 51) + v; + } + return String.fromCharCode(c); + }) + } + })(); + } + + this.makeHtml = function (text) { + + // + // Main function. The order in which other subs are called here is + // essential. Link and image substitutions need to happen before + // _EscapeSpecialCharsWithinTagAttributes(), so that any *'s or _'s in the + // and tags get encoded. + // + + // This will only happen if makeHtml on the same converter instance is called from a plugin hook. + // Don't do that. + if (g_urls) + throw new Error("Recursive call to converter.makeHtml"); + + // Create the private state objects. + g_urls = new SaveHash(); + g_titles = new SaveHash(); + g_html_blocks = []; + g_list_level = 0; + + text = pluginHooks.preConversion(text); + + // attacklab: Replace ~ with ~T + // This lets us use tilde as an escape char to avoid md5 hashes + // The choice of character is arbitray; anything that isn't + // magic in Markdown will work. + text = text.replace(/~/g, "~T"); + + // attacklab: Replace $ with ~D + // RegExp interprets $ as a special character + // when it's in a replacement string + text = text.replace(/\$/g, "~D"); + + // Standardize line endings + text = text.replace(/\r\n/g, "\n"); // DOS to Unix + text = text.replace(/\r/g, "\n"); // Mac to Unix + + // Make sure text begins and ends with a couple of newlines: + text = "\n\n" + text + "\n\n"; + + // Convert all tabs to spaces. + text = _Detab(text); + + // Strip any lines consisting only of spaces and tabs. + // This makes subsequent regexen easier to write, because we can + // match consecutive blank lines with /\n+/ instead of something + // contorted like /[ \t]*\n+/ . + text = text.replace(/^[ \t]+$/mg, ""); + + text = pluginHooks.postNormalization(text); + + // Turn block-level HTML blocks into hash entries + text = _HashHTMLBlocks(text); + + // Strip link definitions, store in hashes. + text = _StripLinkDefinitions(text); + + text = _RunBlockGamut(text); + + text = _UnescapeSpecialChars(text); + + // attacklab: Restore dollar signs + text = text.replace(/~D/g, "$$"); + + // attacklab: Restore tildes + text = text.replace(/~T/g, "~"); + + text = pluginHooks.postConversion(text); + + g_html_blocks = g_titles = g_urls = null; + + return text; + }; + + function _StripLinkDefinitions(text) { + // + // Strips link definitions from text, stores the URLs and titles in + // hash references. + // + + // Link defs are in the form: ^[id]: url "optional title" + + /* + text = text.replace(/ + ^[ ]{0,3}\[([^\[\]]+)\]: // id = $1 attacklab: g_tab_width - 1 + [ \t]* + \n? // maybe *one* newline + [ \t]* + ? // url = $2 + (?=\s|$) // lookahead for whitespace instead of the lookbehind removed below + [ \t]* + \n? // maybe one newline + [ \t]* + ( // (potential) title = $3 + (\n*) // any lines skipped = $4 attacklab: lookbehind removed + [ \t]+ + ["(] + (.+?) // title = $5 + [")] + [ \t]* + )? // title is optional + (?:\n+|$) + /gm, function(){...}); + */ + + text = text.replace(/^[ ]{0,3}\[([^\[\]]+)\]:[ \t]*\n?[ \t]*?(?=\s|$)[ \t]*\n?[ \t]*((\n*)["(](.+?)[")][ \t]*)?(?:\n+)/gm, + function (wholeMatch, m1, m2, m3, m4, m5) { + m1 = m1.toLowerCase(); + g_urls.set(m1, _EncodeAmpsAndAngles(m2)); // Link IDs are case-insensitive + if (m4) { + // Oops, found blank lines, so it's not a title. + // Put back the parenthetical statement we stole. + return m3; + } else if (m5) { + g_titles.set(m1, m5.replace(/"/g, """)); + } + + // Completely remove the definition from the text + return ""; + } + ); + + return text; + } + + function _HashHTMLBlocks(text) { + + // Hashify HTML blocks: + // We only want to do this for block-level HTML tags, such as headers, + // lists, and tables. That's because we still want to wrap

s around + // "paragraphs" that are wrapped in non-block-level tags, such as anchors, + // phrase emphasis, and spans. The list of tags we're looking for is + // hard-coded: + var block_tags_a = "p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del" + var block_tags_b = "p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math" + + // First, look for nested blocks, e.g.: + //

+ //
+ // tags for inner block must be indented. + //
+ //
+ // + // The outermost tags must start at the left margin for this to match, and + // the inner nested divs must be indented. + // We need to do this before the next, more liberal match, because the next + // match will start at the first `
` and stop at the first `
`. + + // attacklab: This regex can be expensive when it fails. + + /* + text = text.replace(/ + ( // save in $1 + ^ // start of line (with /m) + <($block_tags_a) // start tag = $2 + \b // word break + // attacklab: hack around khtml/pcre bug... + [^\r]*?\n // any number of lines, minimally matching + // the matching end tag + [ \t]* // trailing spaces/tabs + (?=\n+) // followed by a newline + ) // attacklab: there are sentinel newlines at end of document + /gm,function(){...}}; + */ + text = text.replace(/^(<(p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del)\b[^\r]*?\n<\/\2>[ \t]*(?=\n+))/gm, hashMatch); + + // + // Now match more liberally, simply from `\n` to `\n` + // + + /* + text = text.replace(/ + ( // save in $1 + ^ // start of line (with /m) + <($block_tags_b) // start tag = $2 + \b // word break + // attacklab: hack around khtml/pcre bug... + [^\r]*? // any number of lines, minimally matching + .* // the matching end tag + [ \t]* // trailing spaces/tabs + (?=\n+) // followed by a newline + ) // attacklab: there are sentinel newlines at end of document + /gm,function(){...}}; + */ + text = text.replace(/^(<(p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math)\b[^\r]*?.*<\/\2>[ \t]*(?=\n+)\n)/gm, hashMatch); + + // Special case just for
. It was easier to make a special case than + // to make the other regex more complicated. + + /* + text = text.replace(/ + \n // Starting after a blank line + [ ]{0,3} + ( // save in $1 + (<(hr) // start tag = $2 + \b // word break + ([^<>])*? + \/?>) // the matching end tag + [ \t]* + (?=\n{2,}) // followed by a blank line + ) + /g,hashMatch); + */ + text = text.replace(/\n[ ]{0,3}((<(hr)\b([^<>])*?\/?>)[ \t]*(?=\n{2,}))/g, hashMatch); + + // Special case for standalone HTML comments: + + /* + text = text.replace(/ + \n\n // Starting after a blank line + [ ]{0,3} // attacklab: g_tab_width - 1 + ( // save in $1 + -]|-[^>])(?:[^-]|-[^-])*)--) // see http://www.w3.org/TR/html-markup/syntax.html#comments and http://meta.stackexchange.com/q/95256 + > + [ \t]* + (?=\n{2,}) // followed by a blank line + ) + /g,hashMatch); + */ + text = text.replace(/\n\n[ ]{0,3}(-]|-[^>])(?:[^-]|-[^-])*)--)>[ \t]*(?=\n{2,}))/g, hashMatch); + + // PHP and ASP-style processor instructions ( and <%...%>) + + /* + text = text.replace(/ + (?: + \n\n // Starting after a blank line + ) + ( // save in $1 + [ ]{0,3} // attacklab: g_tab_width - 1 + (?: + <([?%]) // $2 + [^\r]*? + \2> + ) + [ \t]* + (?=\n{2,}) // followed by a blank line + ) + /g,hashMatch); + */ + text = text.replace(/(?:\n\n)([ ]{0,3}(?:<([?%])[^\r]*?\2>)[ \t]*(?=\n{2,}))/g, hashMatch); + + return text; + } + + function hashBlock(text) { + text = text.replace(/(^\n+|\n+$)/g, ""); + // Replace the element text with a marker ("~KxK" where x is its key) + return "\n\n~K" + (g_html_blocks.push(text) - 1) + "K\n\n"; + } + + function hashMatch(wholeMatch, m1) { + return hashBlock(m1); + } + + var blockGamutHookCallback = function (t) { return _RunBlockGamut(t); } + + function _RunBlockGamut(text, doNotUnhash) { + // + // These are all the transformations that form block-level + // tags like paragraphs, headers, and list items. + // + + text = pluginHooks.preBlockGamut(text, blockGamutHookCallback); + + text = _DoHeaders(text); + + // Do Horizontal Rules: + var replacement = "
\n"; + text = text.replace(/^[ ]{0,2}([ ]?\*[ ]?){3,}[ \t]*$/gm, replacement); + text = text.replace(/^[ ]{0,2}([ ]?-[ ]?){3,}[ \t]*$/gm, replacement); + text = text.replace(/^[ ]{0,2}([ ]?_[ ]?){3,}[ \t]*$/gm, replacement); + + text = _DoLists(text); + text = _DoCodeBlocks(text); + text = _DoBlockQuotes(text); + + text = pluginHooks.postBlockGamut(text, blockGamutHookCallback); + + // We already ran _HashHTMLBlocks() before, in Markdown(), but that + // was to escape raw HTML in the original Markdown source. This time, + // we're escaping the markup we've just created, so that we don't wrap + //

tags around block-level tags. + text = _HashHTMLBlocks(text); + text = _FormParagraphs(text, doNotUnhash); + + return text; + } + + function _RunSpanGamut(text) { + // + // These are all the transformations that occur *within* block-level + // tags like paragraphs, headers, and list items. + // + + text = pluginHooks.preSpanGamut(text); + + text = _DoCodeSpans(text); + text = _EscapeSpecialCharsWithinTagAttributes(text); + text = _EncodeBackslashEscapes(text); + + // Process anchor and image tags. Images must come first, + // because ![foo][f] looks like an anchor. + text = _DoImages(text); + text = _DoAnchors(text); + + // Make links out of things like `` + // Must come after _DoAnchors(), because you can use < and > + // delimiters in inline links like [this](). + text = _DoAutoLinks(text); + + text = text.replace(/~P/g, "://"); // put in place to prevent autolinking; reset now + + text = _EncodeAmpsAndAngles(text); + text = _DoItalicsAndBold(text); + + // Do hard breaks: + text = text.replace(/ +\n/g, "
\n"); + + text = pluginHooks.postSpanGamut(text); + + return text; + } + + function _EscapeSpecialCharsWithinTagAttributes(text) { + // + // Within tags -- meaning between < and > -- encode [\ ` * _] so they + // don't conflict with their use in Markdown for code, italics and strong. + // + + // Build a regex to find HTML tags and comments. See Friedl's + // "Mastering Regular Expressions", 2nd Ed., pp. 200-201. + + // SE: changed the comment part of the regex + + var regex = /(<[a-z\/!$]("[^"]*"|'[^']*'|[^'">])*>|-]|-[^>])(?:[^-]|-[^-])*)--)>)/gi; + + text = text.replace(regex, function (wholeMatch) { + var tag = wholeMatch.replace(/(.)<\/?code>(?=.)/g, "$1`"); + tag = escapeCharacters(tag, wholeMatch.charAt(1) == "!" ? "\\`*_/" : "\\`*_"); // also escape slashes in comments to prevent autolinking there -- http://meta.stackexchange.com/questions/95987 + return tag; + }); + + return text; + } + + function _DoAnchors(text) { + // + // Turn Markdown link shortcuts into XHTML
tags. + // + // + // First, handle reference-style links: [link text] [id] + // + + /* + text = text.replace(/ + ( // wrap whole match in $1 + \[ + ( + (?: + \[[^\]]*\] // allow brackets nested one level + | + [^\[] // or anything else + )* + ) + \] + + [ ]? // one optional space + (?:\n[ ]*)? // one optional newline followed by spaces + + \[ + (.*?) // id = $3 + \] + ) + ()()()() // pad remaining backreferences + /g, writeAnchorTag); + */ + text = text.replace(/(\[((?:\[[^\]]*\]|[^\[\]])*)\][ ]?(?:\n[ ]*)?\[(.*?)\])()()()()/g, writeAnchorTag); + + // + // Next, inline-style links: [link text](url "optional title") + // + + /* + text = text.replace(/ + ( // wrap whole match in $1 + \[ + ( + (?: + \[[^\]]*\] // allow brackets nested one level + | + [^\[\]] // or anything else + )* + ) + \] + \( // literal paren + [ \t]* + () // no id, so leave $3 empty + ? + [ \t]* + ( // $5 + (['"]) // quote char = $6 + (.*?) // Title = $7 + \6 // matching quote + [ \t]* // ignore any spaces/tabs between closing quote and ) + )? // title is optional + \) + ) + /g, writeAnchorTag); + */ + + text = text.replace(/(\[((?:\[[^\]]*\]|[^\[\]])*)\]\([ \t]*()?[ \t]*((['"])(.*?)\6[ \t]*)?\))/g, writeAnchorTag); + + // + // Last, handle reference-style shortcuts: [link text] + // These must come last in case you've also got [link test][1] + // or [link test](/foo) + // + + /* + text = text.replace(/ + ( // wrap whole match in $1 + \[ + ([^\[\]]+) // link text = $2; can't contain '[' or ']' + \] + ) + ()()()()() // pad rest of backreferences + /g, writeAnchorTag); + */ + text = text.replace(/(\[([^\[\]]+)\])()()()()()/g, writeAnchorTag); + + return text; + } + + function writeAnchorTag(wholeMatch, m1, m2, m3, m4, m5, m6, m7) { + if (m7 == undefined) m7 = ""; + var whole_match = m1; + var link_text = m2.replace(/:\/\//g, "~P"); // to prevent auto-linking withing the link. will be converted back after the auto-linker runs + var link_id = m3.toLowerCase(); + var url = m4; + var title = m7; + + if (url == "") { + if (link_id == "") { + // lower-case and turn embedded newlines into spaces + link_id = link_text.toLowerCase().replace(/ ?\n/g, " "); + } + url = "#" + link_id; + + if (g_urls.get(link_id) != undefined) { + url = g_urls.get(link_id); + if (g_titles.get(link_id) != undefined) { + title = g_titles.get(link_id); + } + } + else { + if (whole_match.search(/\(\s*\)$/m) > -1) { + // Special case for explicit empty url + url = ""; + } else { + return whole_match; + } + } + } + url = encodeProblemUrlChars(url); + url = escapeCharacters(url, "*_"); + var result = ""; + + return result; + } + + function _DoImages(text) { + // + // Turn Markdown image shortcuts into tags. + // + + // + // First, handle reference-style labeled images: ![alt text][id] + // + + /* + text = text.replace(/ + ( // wrap whole match in $1 + !\[ + (.*?) // alt text = $2 + \] + + [ ]? // one optional space + (?:\n[ ]*)? // one optional newline followed by spaces + + \[ + (.*?) // id = $3 + \] + ) + ()()()() // pad rest of backreferences + /g, writeImageTag); + */ + text = text.replace(/(!\[(.*?)\][ ]?(?:\n[ ]*)?\[(.*?)\])()()()()/g, writeImageTag); + + // + // Next, handle inline images: ![alt text](url "optional title") + // Don't forget: encode * and _ + + /* + text = text.replace(/ + ( // wrap whole match in $1 + !\[ + (.*?) // alt text = $2 + \] + \s? // One optional whitespace character + \( // literal paren + [ \t]* + () // no id, so leave $3 empty + ? // src url = $4 + [ \t]* + ( // $5 + (['"]) // quote char = $6 + (.*?) // title = $7 + \6 // matching quote + [ \t]* + )? // title is optional + \) + ) + /g, writeImageTag); + */ + text = text.replace(/(!\[(.*?)\]\s?\([ \t]*()?[ \t]*((['"])(.*?)\6[ \t]*)?\))/g, writeImageTag); + + return text; + } + + function attributeEncode(text) { + // unconditionally replace angle brackets here -- what ends up in an attribute (e.g. alt or title) + // never makes sense to have verbatim HTML in it (and the sanitizer would totally break it) + return text.replace(/>/g, ">").replace(/" + _RunSpanGamut(m1) + "\n\n"; } + ); + + text = text.replace(/^(.+)[ \t]*\n-+[ \t]*\n+/gm, + function (matchFound, m1) { return "

" + _RunSpanGamut(m1) + "

\n\n"; } + ); + + // atx-style headers: + // # Header 1 + // ## Header 2 + // ## Header 2 with closing hashes ## + // ... + // ###### Header 6 + // + + /* + text = text.replace(/ + ^(\#{1,6}) // $1 = string of #'s + [ \t]* + (.+?) // $2 = Header text + [ \t]* + \#* // optional closing #'s (not counted) + \n+ + /gm, function() {...}); + */ + + text = text.replace(/^(\#{1,6})[ \t]*(.+?)[ \t]*\#*\n+/gm, + function (wholeMatch, m1, m2) { + var h_level = m1.length; + return "" + _RunSpanGamut(m2) + "\n\n"; + } + ); + + return text; + } + + function _DoLists(text, isInsideParagraphlessListItem) { + // + // Form HTML ordered (numbered) and unordered (bulleted) lists. + // + + // attacklab: add sentinel to hack around khtml/safari bug: + // http://bugs.webkit.org/show_bug.cgi?id=11231 + text += "~0"; + + // Re-usable pattern to match any entirel ul or ol list: + + /* + var whole_list = / + ( // $1 = whole list + ( // $2 + [ ]{0,3} // attacklab: g_tab_width - 1 + ([*+-]|\d+[.]) // $3 = first list item marker + [ \t]+ + ) + [^\r]+? + ( // $4 + ~0 // sentinel for workaround; should be $ + | + \n{2,} + (?=\S) + (?! // Negative lookahead for another list item marker + [ \t]* + (?:[*+-]|\d+[.])[ \t]+ + ) + ) + ) + /g + */ + var whole_list = /^(([ ]{0,3}([*+-]|\d+[.])[ \t]+)[^\r]+?(~0|\n{2,}(?=\S)(?![ \t]*(?:[*+-]|\d+[.])[ \t]+)))/gm; + + if (g_list_level) { + text = text.replace(whole_list, function (wholeMatch, m1, m2) { + var list = m1; + var list_type = (m2.search(/[*+-]/g) > -1) ? "ul" : "ol"; + + var result = _ProcessListItems(list, list_type, isInsideParagraphlessListItem); + + // Trim any trailing whitespace, to put the closing `` + // up on the preceding line, to get it past the current stupid + // HTML block parser. This is a hack to work around the terrible + // hack that is the HTML block parser. + result = result.replace(/\s+$/, ""); + result = "<" + list_type + ">" + result + "\n"; + return result; + }); + } else { + whole_list = /(\n\n|^\n?)(([ ]{0,3}([*+-]|\d+[.])[ \t]+)[^\r]+?(~0|\n{2,}(?=\S)(?![ \t]*(?:[*+-]|\d+[.])[ \t]+)))/g; + text = text.replace(whole_list, function (wholeMatch, m1, m2, m3) { + var runup = m1; + var list = m2; + + var list_type = (m3.search(/[*+-]/g) > -1) ? "ul" : "ol"; + var result = _ProcessListItems(list, list_type); + result = runup + "<" + list_type + ">\n" + result + "\n"; + return result; + }); + } + + // attacklab: strip sentinel + text = text.replace(/~0/, ""); + + return text; + } + + var _listItemMarkers = { ol: "\\d+[.]", ul: "[*+-]" }; + + function _ProcessListItems(list_str, list_type, isInsideParagraphlessListItem) { + // + // Process the contents of a single ordered or unordered list, splitting it + // into individual list items. + // + // list_type is either "ul" or "ol". + + // The $g_list_level global keeps track of when we're inside a list. + // Each time we enter a list, we increment it; when we leave a list, + // we decrement. If it's zero, we're not in a list anymore. + // + // We do this because when we're not inside a list, we want to treat + // something like this: + // + // I recommend upgrading to version + // 8. Oops, now this line is treated + // as a sub-list. + // + // As a single paragraph, despite the fact that the second line starts + // with a digit-period-space sequence. + // + // Whereas when we're inside a list (or sub-list), that line will be + // treated as the start of a sub-list. What a kludge, huh? This is + // an aspect of Markdown's syntax that's hard to parse perfectly + // without resorting to mind-reading. Perhaps the solution is to + // change the syntax rules such that sub-lists must start with a + // starting cardinal number; e.g. "1." or "a.". + + g_list_level++; + + // trim trailing blank lines: + list_str = list_str.replace(/\n{2,}$/, "\n"); + + // attacklab: add sentinel to emulate \z + list_str += "~0"; + + // In the original attacklab showdown, list_type was not given to this function, and anything + // that matched /[*+-]|\d+[.]/ would just create the next
  • , causing this mismatch: + // + // Markdown rendered by WMD rendered by MarkdownSharp + // ------------------------------------------------------------------ + // 1. first 1. first 1. first + // 2. second 2. second 2. second + // - third 3. third * third + // + // We changed this to behave identical to MarkdownSharp. This is the constructed RegEx, + // with {MARKER} being one of \d+[.] or [*+-], depending on list_type: + + /* + list_str = list_str.replace(/ + (^[ \t]*) // leading whitespace = $1 + ({MARKER}) [ \t]+ // list marker = $2 + ([^\r]+? // list item text = $3 + (\n+) + ) + (?= + (~0 | \2 ({MARKER}) [ \t]+) + ) + /gm, function(){...}); + */ + + var marker = _listItemMarkers[list_type]; + var re = new RegExp("(^[ \\t]*)(" + marker + ")[ \\t]+([^\\r]+?(\\n+))(?=(~0|\\1(" + marker + ")[ \\t]+))", "gm"); + var last_item_had_a_double_newline = false; + list_str = list_str.replace(re, + function (wholeMatch, m1, m2, m3) { + var item = m3; + var leading_space = m1; + var ends_with_double_newline = /\n\n$/.test(item); + var contains_double_newline = ends_with_double_newline || item.search(/\n{2,}/) > -1; + + if (contains_double_newline || last_item_had_a_double_newline) { + item = _RunBlockGamut(_Outdent(item), /* doNotUnhash = */true); + } + else { + // Recursion for sub-lists: + item = _DoLists(_Outdent(item), /* isInsideParagraphlessListItem= */ true); + item = item.replace(/\n$/, ""); // chomp(item) + if (!isInsideParagraphlessListItem) // only the outer-most item should run this, otherwise it's run multiple times for the inner ones + item = _RunSpanGamut(item); + } + last_item_had_a_double_newline = ends_with_double_newline; + return "
  • " + item + "
  • \n"; + } + ); + + // attacklab: strip sentinel + list_str = list_str.replace(/~0/g, ""); + + g_list_level--; + return list_str; + } + + function _DoCodeBlocks(text) { + // + // Process Markdown `
    ` blocks.
    +            //  
    +
    +            /*
    +            text = text.replace(/
    +                (?:\n\n|^)
    +                (                               // $1 = the code block -- one or more lines, starting with a space/tab
    +                    (?:
    +                        (?:[ ]{4}|\t)           // Lines must start with a tab or a tab-width of spaces - attacklab: g_tab_width
    +                        .*\n+
    +                    )+
    +                )
    +                (\n*[ ]{0,3}[^ \t\n]|(?=~0))    // attacklab: g_tab_width
    +            /g ,function(){...});
    +            */
    +
    +            // attacklab: sentinel workarounds for lack of \A and \Z, safari\khtml bug
    +            text += "~0";
    +
    +            text = text.replace(/(?:\n\n|^\n?)((?:(?:[ ]{4}|\t).*\n+)+)(\n*[ ]{0,3}[^ \t\n]|(?=~0))/g,
    +                function (wholeMatch, m1, m2) {
    +                    var codeblock = m1;
    +                    var nextChar = m2;
    +
    +                    codeblock = _EncodeCode(_Outdent(codeblock));
    +                    codeblock = _Detab(codeblock);
    +                    codeblock = codeblock.replace(/^\n+/g, ""); // trim leading newlines
    +                    codeblock = codeblock.replace(/\n+$/g, ""); // trim trailing whitespace
    +
    +                    codeblock = "
    " + codeblock + "\n
    "; + + return "\n\n" + codeblock + "\n\n" + nextChar; + } + ); + + // attacklab: strip sentinel + text = text.replace(/~0/, ""); + + return text; + } + + function _DoCodeSpans(text) { + // + // * Backtick quotes are used for spans. + // + // * You can use multiple backticks as the delimiters if you want to + // include literal backticks in the code span. So, this input: + // + // Just type ``foo `bar` baz`` at the prompt. + // + // Will translate to: + // + //

    Just type foo `bar` baz at the prompt.

    + // + // There's no arbitrary limit to the number of backticks you + // can use as delimters. If you need three consecutive backticks + // in your code, use four for delimiters, etc. + // + // * You can use spaces to get literal backticks at the edges: + // + // ... type `` `bar` `` ... + // + // Turns to: + // + // ... type `bar` ... + // + + /* + text = text.replace(/ + (^|[^\\`]) // Character before opening ` can't be a backslash or backtick + (`+) // $2 = Opening run of ` + (?!`) // and no more backticks -- match the full run + ( // $3 = The code block + [^\r]*? + [^`] // attacklab: work around lack of lookbehind + ) + \2 // Matching closer + (?!`) + /gm, function(){...}); + */ + + text = text.replace(/(^|[^\\`])(`+)(?!`)([^\r]*?[^`])\2(?!`)/gm, + function (wholeMatch, m1, m2, m3, m4) { + var c = m3; + c = c.replace(/^([ \t]*)/g, ""); // leading whitespace + c = c.replace(/[ \t]*$/g, ""); // trailing whitespace + c = _EncodeCode(c); + c = c.replace(/:\/\//g, "~P"); // to prevent auto-linking. Not necessary in code *blocks*, but in code spans. Will be converted back after the auto-linker runs. + return m1 + "" + c + ""; + } + ); + + return text; + } + + function _EncodeCode(text) { + // + // Encode/escape certain characters inside Markdown code runs. + // The point is that in code, these characters are literals, + // and lose their special Markdown meanings. + // + // Encode all ampersands; HTML entities are not + // entities within a Markdown code span. + text = text.replace(/&/g, "&"); + + // Do the angle bracket song and dance: + text = text.replace(//g, ">"); + + // Now, escape characters that are magic in Markdown: + text = escapeCharacters(text, "\*_{}[]\\", false); + + // jj the line above breaks this: + //--- + + //* Item + + // 1. Subitem + + // special char: * + //--- + + return text; + } + + function _DoItalicsAndBold(text) { + + text = asciify(text); + + // must go first: + + // (^|[\W_]) Start with a non-letter or beginning of string. Store in \1. + // (?:(?!\1)|(?=^)) Either the next character is *not* the same as the previous, + // or we started at the end of the string (in which case the previous + // group had zero width, so we're still there). Because the next + // character is the marker, this means that if there are e.g. multiple + // underscores in a row, we can only match the left-most ones (which + // prevents foo___bar__ from getting bolded) + // (\*|_) The marker character itself, asterisk or underscore. Store in \2. + // \2 The marker again, since bold needs two. + // (?=\S) The first bolded character cannot be a space. + // ([^\r]*?\S) The actual bolded string. At least one character, and it cannot *end* + // with a space either. Note that like in many other places, [^\r] is + // just a workaround for JS' lack of single-line regexes; it's equivalent + // to a . in an /s regex, because the string cannot contain any \r (they + // are removed in the normalizing step). + // \2\2 The marker character, twice -- end of bold. + // (?!\2) Not followed by another marker character (ensuring that we match the + // rightmost two in a longer row)... + // (?=[\W_]|$) ...but by any other non-word character or the end of string. + text = text.replace(/(^|[\W_])(?:(?!\1)|(?=^))(\*|_)\2(?=\S)([^\r]*?\S)\2\2(?!\2)(?=[\W_]|$)/g, + "$1$3"); + + // This is almost identical to the regex, except 1) there's obviously just one marker + // character, and 2) the italicized string cannot contain the marker character. + text = text.replace(/(^|[\W_])(?:(?!\1)|(?=^))(\*|_)(?=\S)((?:(?!\2)[^\r])*?\S)\2(?!\2)(?=[\W_]|$)/g, + "$1$3"); + + return deasciify(text); + } + + function _DoBlockQuotes(text) { + + /* + text = text.replace(/ + ( // Wrap whole match in $1 + ( + ^[ \t]*>[ \t]? // '>' at the start of a line + .+\n // rest of the first line + (.+\n)* // subsequent consecutive lines + \n* // blanks + )+ + ) + /gm, function(){...}); + */ + + text = text.replace(/((^[ \t]*>[ \t]?.+\n(.+\n)*\n*)+)/gm, + function (wholeMatch, m1) { + var bq = m1; + + // attacklab: hack around Konqueror 3.5.4 bug: + // "----------bug".replace(/^-/g,"") == "bug" + + bq = bq.replace(/^[ \t]*>[ \t]?/gm, "~0"); // trim one level of quoting + + // attacklab: clean up hack + bq = bq.replace(/~0/g, ""); + + bq = bq.replace(/^[ \t]+$/gm, ""); // trim whitespace-only lines + bq = _RunBlockGamut(bq); // recurse + + bq = bq.replace(/(^|\n)/g, "$1 "); + // These leading spaces screw with
     content, so we need to fix that:
    +                    bq = bq.replace(
    +                            /(\s*
    [^\r]+?<\/pre>)/gm,
    +                        function (wholeMatch, m1) {
    +                            var pre = m1;
    +                            // attacklab: hack around Konqueror 3.5.4 bug:
    +                            pre = pre.replace(/^  /mg, "~0");
    +                            pre = pre.replace(/~0/g, "");
    +                            return pre;
    +                        });
    +
    +                    return hashBlock("
    \n" + bq + "\n
    "); + } + ); + return text; + } + + function _FormParagraphs(text, doNotUnhash) { + // + // Params: + // $text - string to process with html

    tags + // + + // Strip leading and trailing lines: + text = text.replace(/^\n+/g, ""); + text = text.replace(/\n+$/g, ""); + + var grafs = text.split(/\n{2,}/g); + var grafsOut = []; + + var markerRe = /~K(\d+)K/; + + // + // Wrap

    tags. + // + var end = grafs.length; + for (var i = 0; i < end; i++) { + var str = grafs[i]; + + // if this is an HTML marker, copy it + if (markerRe.test(str)) { + grafsOut.push(str); + } + else if (/\S/.test(str)) { + str = _RunSpanGamut(str); + str = str.replace(/^([ \t]*)/g, "

    "); + str += "

    " + grafsOut.push(str); + } + + } + // + // Unhashify HTML blocks + // + if (!doNotUnhash) { + end = grafsOut.length; + for (var i = 0; i < end; i++) { + var foundAny = true; + while (foundAny) { // we may need several runs, since the data may be nested + foundAny = false; + grafsOut[i] = grafsOut[i].replace(/~K(\d+)K/g, function (wholeMatch, id) { + foundAny = true; + return g_html_blocks[id]; + }); + } + } + } + return grafsOut.join("\n\n"); + } + + function _EncodeAmpsAndAngles(text) { + // Smart processing for ampersands and angle brackets that need to be encoded. + + // Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin: + // http://bumppo.net/projects/amputator/ + text = text.replace(/&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)/g, "&"); + + // Encode naked <'s + text = text.replace(/<(?![a-z\/?!]|~D)/gi, "<"); + + return text; + } + + function _EncodeBackslashEscapes(text) { + // + // Parameter: String. + // Returns: The string, with after processing the following backslash + // escape sequences. + // + + // attacklab: The polite way to do this is with the new + // escapeCharacters() function: + // + // text = escapeCharacters(text,"\\",true); + // text = escapeCharacters(text,"`*_{}[]()>#+-.!",true); + // + // ...but we're sidestepping its use of the (slow) RegExp constructor + // as an optimization for Firefox. This function gets called a LOT. + + text = text.replace(/\\(\\)/g, escapeCharacters_callback); + text = text.replace(/\\([`*_{}\[\]()>#+-.!])/g, escapeCharacters_callback); + return text; + } + + var charInsideUrl = "[-A-Z0-9+&@#/%?=~_|[\\]()!:,.;]", + charEndingUrl = "[-A-Z0-9+&@#/%=~_|[\\])]", + autoLinkRegex = new RegExp("(=\"|<)?\\b(https?|ftp)(://" + charInsideUrl + "*" + charEndingUrl + ")(?=$|\\W)", "gi"), + endCharRegex = new RegExp(charEndingUrl, "i"); + + function handleTrailingParens(wholeMatch, lookbehind, protocol, link) { + if (lookbehind) + return wholeMatch; + if (link.charAt(link.length - 1) !== ")") + return "<" + protocol + link + ">"; + var parens = link.match(/[()]/g); + var level = 0; + for (var i = 0; i < parens.length; i++) { + if (parens[i] === "(") { + if (level <= 0) + level = 1; + else + level++; + } + else { + level--; + } + } + var tail = ""; + if (level < 0) { + var re = new RegExp("\\){1," + (-level) + "}$"); + link = link.replace(re, function (trailingParens) { + tail = trailingParens; + return ""; + }); + } + if (tail) { + var lastChar = link.charAt(link.length - 1); + if (!endCharRegex.test(lastChar)) { + tail = lastChar + tail; + link = link.substr(0, link.length - 1); + } + } + return "<" + protocol + link + ">" + tail; + } + + function _DoAutoLinks(text) { + + // note that at this point, all other URL in the text are already hyperlinked as
    + // *except* for the case + + // automatically add < and > around unadorned raw hyperlinks + // must be preceded by a non-word character (and not by =" or <) and followed by non-word/EOF character + // simulating the lookbehind in a consuming way is okay here, since a URL can neither and with a " nor + // with a <, so there is no risk of overlapping matches. + text = text.replace(autoLinkRegex, handleTrailingParens); + + // autolink anything like + + + var replacer = function (wholematch, m1) { + var url = encodeProblemUrlChars(m1); + url = escapeCharacters(url, "*_"); + + return "" + pluginHooks.plainLinkText(m1) + ""; + }; + text = text.replace(/<((https?|ftp):[^'">\s]+)>/gi, replacer); + + // Email addresses: + /* + text = text.replace(/ + < + (?:mailto:)? + ( + [-.\w]+ + \@ + [-a-z0-9]+(\.[-a-z0-9]+)*\.[a-z]+ + ) + > + /gi, _DoAutoLinks_callback()); + */ + + /* disabling email autolinking, since we don't do that on the server, either + text = text.replace(/<(?:mailto:)?([-.\w]+\@[-a-z0-9]+(\.[-a-z0-9]+)*\.[a-z]+)>/gi, + function(wholeMatch,m1) { + return _EncodeEmailAddress( _UnescapeSpecialChars(m1) ); + } + ); + */ + return text; + } + + function _UnescapeSpecialChars(text) { + // + // Swap back in all the special characters we've hidden. + // + text = text.replace(/~E(\d+)E/g, + function (wholeMatch, m1) { + var charCodeToReplace = parseInt(m1); + return String.fromCharCode(charCodeToReplace); + } + ); + return text; + } + + function _Outdent(text) { + // + // Remove one level of line-leading tabs or spaces + // + + // attacklab: hack around Konqueror 3.5.4 bug: + // "----------bug".replace(/^-/g,"") == "bug" + + text = text.replace(/^(\t|[ ]{1,4})/gm, "~0"); // attacklab: g_tab_width + + // attacklab: clean up hack + text = text.replace(/~0/g, "") + + return text; + } + + function _Detab(text) { + if (!/\t/.test(text)) + return text; + + var spaces = [" ", " ", " ", " "], + skew = 0, + v; + + return text.replace(/[\n\t]/g, function (match, offset) { + if (match === "\n") { + skew = offset + 1; + return match; + } + v = (offset - skew) % 4; + skew = offset + 1; + return spaces[v]; + }); + } + + // + // attacklab: Utility functions + // + + var _problemUrlChars = /(?:["'*()[\]:]|~D)/g; + + // hex-encodes some unusual "problem" chars in URLs to avoid URL detection problems + function encodeProblemUrlChars(url) { + if (!url) + return ""; + + var len = url.length; + + return url.replace(_problemUrlChars, function (match, offset) { + if (match == "~D") // escape for dollar + return "%24"; + if (match == ":") { + if (offset == len - 1 || /[0-9\/]/.test(url.charAt(offset + 1))) + return ":" + } + return "%" + match.charCodeAt(0).toString(16); + }); + } + + + function escapeCharacters(text, charsToEscape, afterBackslash) { + // First we have to escape the escape characters so that + // we can build a character class out of them + var regexString = "([" + charsToEscape.replace(/([\[\]\\])/g, "\\$1") + "])"; + + if (afterBackslash) { + regexString = "\\\\" + regexString; + } + + var regex = new RegExp(regexString, "g"); + text = text.replace(regex, escapeCharacters_callback); + + return text; + } + + + function escapeCharacters_callback(wholeMatch, m1) { + var charCodeToEscape = m1.charCodeAt(0); + return "~E" + charCodeToEscape + "E"; + } + + }; // end of the Markdown.Converter constructor + +})();