\n * ^\n * |\n * ```\n *\n * @type {State}\n */\n function start(code) {\n effects.enter('lineEnding')\n effects.consume(code)\n effects.exit('lineEnding')\n return effects.attempt(blankLine, ok, nok)\n }\n}\n","/**\n * @typedef {import('micromark-util-types').Code} Code\n * @typedef {import('micromark-util-types').Construct} Construct\n * @typedef {import('micromark-util-types').State} State\n * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext\n * @typedef {import('micromark-util-types').Tokenizer} Tokenizer\n */\n\nimport {factorySpace} from 'micromark-factory-space'\nimport {\n asciiAlpha,\n asciiAlphanumeric,\n markdownLineEnding,\n markdownLineEndingOrSpace,\n markdownSpace\n} from 'micromark-util-character'\n/** @type {Construct} */\nexport const htmlText = {\n name: 'htmlText',\n tokenize: tokenizeHtmlText\n}\n\n/**\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeHtmlText(effects, ok, nok) {\n const self = this\n /** @type {NonNullable
| undefined} */\n let marker\n /** @type {number} */\n let index\n /** @type {State} */\n let returnState\n return start\n\n /**\n * Start of HTML (text).\n *\n * ```markdown\n * > | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function start(code) {\n effects.enter('htmlText')\n effects.enter('htmlTextData')\n effects.consume(code)\n return open\n }\n\n /**\n * After `<`, at tag name or other stuff.\n *\n * ```markdown\n * > | a c\n * ^\n * > | a c\n * ^\n * > | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function open(code) {\n if (code === 33) {\n effects.consume(code)\n return declarationOpen\n }\n if (code === 47) {\n effects.consume(code)\n return tagCloseStart\n }\n if (code === 63) {\n effects.consume(code)\n return instruction\n }\n\n // ASCII alphabetical.\n if (asciiAlpha(code)) {\n effects.consume(code)\n return tagOpen\n }\n return nok(code)\n }\n\n /**\n * After ` | a c\n * ^\n * > | a c\n * ^\n * > | a &<]]> c\n * ^\n * ```\n *\n * @type {State}\n */\n function declarationOpen(code) {\n if (code === 45) {\n effects.consume(code)\n return commentOpenInside\n }\n if (code === 91) {\n effects.consume(code)\n index = 0\n return cdataOpenInside\n }\n if (asciiAlpha(code)) {\n effects.consume(code)\n return declaration\n }\n return nok(code)\n }\n\n /**\n * In a comment, after ` | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function commentOpenInside(code) {\n if (code === 45) {\n effects.consume(code)\n return commentEnd\n }\n return nok(code)\n }\n\n /**\n * In comment.\n *\n * ```markdown\n * > | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function comment(code) {\n if (code === null) {\n return nok(code)\n }\n if (code === 45) {\n effects.consume(code)\n return commentClose\n }\n if (markdownLineEnding(code)) {\n returnState = comment\n return lineEndingBefore(code)\n }\n effects.consume(code)\n return comment\n }\n\n /**\n * In comment, after `-`.\n *\n * ```markdown\n * > | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function commentClose(code) {\n if (code === 45) {\n effects.consume(code)\n return commentEnd\n }\n return comment(code)\n }\n\n /**\n * In comment, after `--`.\n *\n * ```markdown\n * > | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function commentEnd(code) {\n return code === 62\n ? end(code)\n : code === 45\n ? commentClose(code)\n : comment(code)\n }\n\n /**\n * After ` | a &<]]> b\n * ^^^^^^\n * ```\n *\n * @type {State}\n */\n function cdataOpenInside(code) {\n const value = 'CDATA['\n if (code === value.charCodeAt(index++)) {\n effects.consume(code)\n return index === value.length ? cdata : cdataOpenInside\n }\n return nok(code)\n }\n\n /**\n * In CDATA.\n *\n * ```markdown\n * > | a &<]]> b\n * ^^^\n * ```\n *\n * @type {State}\n */\n function cdata(code) {\n if (code === null) {\n return nok(code)\n }\n if (code === 93) {\n effects.consume(code)\n return cdataClose\n }\n if (markdownLineEnding(code)) {\n returnState = cdata\n return lineEndingBefore(code)\n }\n effects.consume(code)\n return cdata\n }\n\n /**\n * In CDATA, after `]`, at another `]`.\n *\n * ```markdown\n * > | a &<]]> b\n * ^\n * ```\n *\n * @type {State}\n */\n function cdataClose(code) {\n if (code === 93) {\n effects.consume(code)\n return cdataEnd\n }\n return cdata(code)\n }\n\n /**\n * In CDATA, after `]]`, at `>`.\n *\n * ```markdown\n * > | a &<]]> b\n * ^\n * ```\n *\n * @type {State}\n */\n function cdataEnd(code) {\n if (code === 62) {\n return end(code)\n }\n if (code === 93) {\n effects.consume(code)\n return cdataEnd\n }\n return cdata(code)\n }\n\n /**\n * In declaration.\n *\n * ```markdown\n * > | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function declaration(code) {\n if (code === null || code === 62) {\n return end(code)\n }\n if (markdownLineEnding(code)) {\n returnState = declaration\n return lineEndingBefore(code)\n }\n effects.consume(code)\n return declaration\n }\n\n /**\n * In instruction.\n *\n * ```markdown\n * > | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function instruction(code) {\n if (code === null) {\n return nok(code)\n }\n if (code === 63) {\n effects.consume(code)\n return instructionClose\n }\n if (markdownLineEnding(code)) {\n returnState = instruction\n return lineEndingBefore(code)\n }\n effects.consume(code)\n return instruction\n }\n\n /**\n * In instruction, after `?`, at `>`.\n *\n * ```markdown\n * > | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function instructionClose(code) {\n return code === 62 ? end(code) : instruction(code)\n }\n\n /**\n * After ``, in closing tag, at tag name.\n *\n * ```markdown\n * > | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function tagCloseStart(code) {\n // ASCII alphabetical.\n if (asciiAlpha(code)) {\n effects.consume(code)\n return tagClose\n }\n return nok(code)\n }\n\n /**\n * After ` | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function tagClose(code) {\n // ASCII alphanumerical and `-`.\n if (code === 45 || asciiAlphanumeric(code)) {\n effects.consume(code)\n return tagClose\n }\n return tagCloseBetween(code)\n }\n\n /**\n * In closing tag, after tag name.\n *\n * ```markdown\n * > | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function tagCloseBetween(code) {\n if (markdownLineEnding(code)) {\n returnState = tagCloseBetween\n return lineEndingBefore(code)\n }\n if (markdownSpace(code)) {\n effects.consume(code)\n return tagCloseBetween\n }\n return end(code)\n }\n\n /**\n * After ` | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function tagOpen(code) {\n // ASCII alphanumerical and `-`.\n if (code === 45 || asciiAlphanumeric(code)) {\n effects.consume(code)\n return tagOpen\n }\n if (code === 47 || code === 62 || markdownLineEndingOrSpace(code)) {\n return tagOpenBetween(code)\n }\n return nok(code)\n }\n\n /**\n * In opening tag, after tag name.\n *\n * ```markdown\n * > | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function tagOpenBetween(code) {\n if (code === 47) {\n effects.consume(code)\n return end\n }\n\n // ASCII alphabetical and `:` and `_`.\n if (code === 58 || code === 95 || asciiAlpha(code)) {\n effects.consume(code)\n return tagOpenAttributeName\n }\n if (markdownLineEnding(code)) {\n returnState = tagOpenBetween\n return lineEndingBefore(code)\n }\n if (markdownSpace(code)) {\n effects.consume(code)\n return tagOpenBetween\n }\n return end(code)\n }\n\n /**\n * In attribute name.\n *\n * ```markdown\n * > | a d\n * ^\n * ```\n *\n * @type {State}\n */\n function tagOpenAttributeName(code) {\n // ASCII alphabetical and `-`, `.`, `:`, and `_`.\n if (\n code === 45 ||\n code === 46 ||\n code === 58 ||\n code === 95 ||\n asciiAlphanumeric(code)\n ) {\n effects.consume(code)\n return tagOpenAttributeName\n }\n return tagOpenAttributeNameAfter(code)\n }\n\n /**\n * After attribute name, before initializer, the end of the tag, or\n * whitespace.\n *\n * ```markdown\n * > | a d\n * ^\n * ```\n *\n * @type {State}\n */\n function tagOpenAttributeNameAfter(code) {\n if (code === 61) {\n effects.consume(code)\n return tagOpenAttributeValueBefore\n }\n if (markdownLineEnding(code)) {\n returnState = tagOpenAttributeNameAfter\n return lineEndingBefore(code)\n }\n if (markdownSpace(code)) {\n effects.consume(code)\n return tagOpenAttributeNameAfter\n }\n return tagOpenBetween(code)\n }\n\n /**\n * Before unquoted, double quoted, or single quoted attribute value, allowing\n * whitespace.\n *\n * ```markdown\n * > | a e\n * ^\n * ```\n *\n * @type {State}\n */\n function tagOpenAttributeValueBefore(code) {\n if (\n code === null ||\n code === 60 ||\n code === 61 ||\n code === 62 ||\n code === 96\n ) {\n return nok(code)\n }\n if (code === 34 || code === 39) {\n effects.consume(code)\n marker = code\n return tagOpenAttributeValueQuoted\n }\n if (markdownLineEnding(code)) {\n returnState = tagOpenAttributeValueBefore\n return lineEndingBefore(code)\n }\n if (markdownSpace(code)) {\n effects.consume(code)\n return tagOpenAttributeValueBefore\n }\n effects.consume(code)\n return tagOpenAttributeValueUnquoted\n }\n\n /**\n * In double or single quoted attribute value.\n *\n * ```markdown\n * > | a e\n * ^\n * ```\n *\n * @type {State}\n */\n function tagOpenAttributeValueQuoted(code) {\n if (code === marker) {\n effects.consume(code)\n marker = undefined\n return tagOpenAttributeValueQuotedAfter\n }\n if (code === null) {\n return nok(code)\n }\n if (markdownLineEnding(code)) {\n returnState = tagOpenAttributeValueQuoted\n return lineEndingBefore(code)\n }\n effects.consume(code)\n return tagOpenAttributeValueQuoted\n }\n\n /**\n * In unquoted attribute value.\n *\n * ```markdown\n * > | a e\n * ^\n * ```\n *\n * @type {State}\n */\n function tagOpenAttributeValueUnquoted(code) {\n if (\n code === null ||\n code === 34 ||\n code === 39 ||\n code === 60 ||\n code === 61 ||\n code === 96\n ) {\n return nok(code)\n }\n if (code === 47 || code === 62 || markdownLineEndingOrSpace(code)) {\n return tagOpenBetween(code)\n }\n effects.consume(code)\n return tagOpenAttributeValueUnquoted\n }\n\n /**\n * After double or single quoted attribute value, before whitespace or the end\n * of the tag.\n *\n * ```markdown\n * > | a e\n * ^\n * ```\n *\n * @type {State}\n */\n function tagOpenAttributeValueQuotedAfter(code) {\n if (code === 47 || code === 62 || markdownLineEndingOrSpace(code)) {\n return tagOpenBetween(code)\n }\n return nok(code)\n }\n\n /**\n * In certain circumstances of a tag where only an `>` is allowed.\n *\n * ```markdown\n * > | a e\n * ^\n * ```\n *\n * @type {State}\n */\n function end(code) {\n if (code === 62) {\n effects.consume(code)\n effects.exit('htmlTextData')\n effects.exit('htmlText')\n return ok\n }\n return nok(code)\n }\n\n /**\n * At eol.\n *\n * > 👉 **Note**: we can’t have blank lines in text, so no need to worry about\n * > empty tokens.\n *\n * ```markdown\n * > | a \n * ```\n *\n * @type {State}\n */\n function lineEndingBefore(code) {\n effects.exit('htmlTextData')\n effects.enter('lineEnding')\n effects.consume(code)\n effects.exit('lineEnding')\n return lineEndingAfter\n }\n\n /**\n * After eol, at optional whitespace.\n *\n * > 👉 **Note**: we can’t have blank lines in text, so no need to worry about\n * > empty tokens.\n *\n * ```markdown\n * | a \n * ^\n * ```\n *\n * @type {State}\n */\n function lineEndingAfter(code) {\n // Always populated by defaults.\n\n return markdownSpace(code)\n ? factorySpace(\n effects,\n lineEndingAfterPrefix,\n 'linePrefix',\n self.parser.constructs.disable.null.includes('codeIndented')\n ? undefined\n : 4\n )(code)\n : lineEndingAfterPrefix(code)\n }\n\n /**\n * After eol, after optional whitespace.\n *\n * > 👉 **Note**: we can’t have blank lines in text, so no need to worry about\n * > empty tokens.\n *\n * ```markdown\n * | a \n * ^\n * ```\n *\n * @type {State}\n */\n function lineEndingAfterPrefix(code) {\n effects.enter('htmlTextData')\n return returnState(code)\n }\n}\n","/**\n * @typedef {import('micromark-util-types').Construct} Construct\n * @typedef {import('micromark-util-types').Event} Event\n * @typedef {import('micromark-util-types').Resolver} Resolver\n * @typedef {import('micromark-util-types').State} State\n * @typedef {import('micromark-util-types').Token} Token\n * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext\n * @typedef {import('micromark-util-types').Tokenizer} Tokenizer\n */\n\nimport {factoryDestination} from 'micromark-factory-destination'\nimport {factoryLabel} from 'micromark-factory-label'\nimport {factoryTitle} from 'micromark-factory-title'\nimport {factoryWhitespace} from 'micromark-factory-whitespace'\nimport {markdownLineEndingOrSpace} from 'micromark-util-character'\nimport {push, splice} from 'micromark-util-chunked'\nimport {normalizeIdentifier} from 'micromark-util-normalize-identifier'\nimport {resolveAll} from 'micromark-util-resolve-all'\n/** @type {Construct} */\nexport const labelEnd = {\n name: 'labelEnd',\n tokenize: tokenizeLabelEnd,\n resolveTo: resolveToLabelEnd,\n resolveAll: resolveAllLabelEnd\n}\n\n/** @type {Construct} */\nconst resourceConstruct = {\n tokenize: tokenizeResource\n}\n/** @type {Construct} */\nconst referenceFullConstruct = {\n tokenize: tokenizeReferenceFull\n}\n/** @type {Construct} */\nconst referenceCollapsedConstruct = {\n tokenize: tokenizeReferenceCollapsed\n}\n\n/** @type {Resolver} */\nfunction resolveAllLabelEnd(events) {\n let index = -1\n while (++index < events.length) {\n const token = events[index][1]\n if (\n token.type === 'labelImage' ||\n token.type === 'labelLink' ||\n token.type === 'labelEnd'\n ) {\n // Remove the marker.\n events.splice(index + 1, token.type === 'labelImage' ? 4 : 2)\n token.type = 'data'\n index++\n }\n }\n return events\n}\n\n/** @type {Resolver} */\nfunction resolveToLabelEnd(events, context) {\n let index = events.length\n let offset = 0\n /** @type {Token} */\n let token\n /** @type {number | undefined} */\n let open\n /** @type {number | undefined} */\n let close\n /** @type {Array} */\n let media\n\n // Find an opening.\n while (index--) {\n token = events[index][1]\n if (open) {\n // If we see another link, or inactive link label, we’ve been here before.\n if (\n token.type === 'link' ||\n (token.type === 'labelLink' && token._inactive)\n ) {\n break\n }\n\n // Mark other link openings as inactive, as we can’t have links in\n // links.\n if (events[index][0] === 'enter' && token.type === 'labelLink') {\n token._inactive = true\n }\n } else if (close) {\n if (\n events[index][0] === 'enter' &&\n (token.type === 'labelImage' || token.type === 'labelLink') &&\n !token._balanced\n ) {\n open = index\n if (token.type !== 'labelLink') {\n offset = 2\n break\n }\n }\n } else if (token.type === 'labelEnd') {\n close = index\n }\n }\n const group = {\n type: events[open][1].type === 'labelLink' ? 'link' : 'image',\n start: Object.assign({}, events[open][1].start),\n end: Object.assign({}, events[events.length - 1][1].end)\n }\n const label = {\n type: 'label',\n start: Object.assign({}, events[open][1].start),\n end: Object.assign({}, events[close][1].end)\n }\n const text = {\n type: 'labelText',\n start: Object.assign({}, events[open + offset + 2][1].end),\n end: Object.assign({}, events[close - 2][1].start)\n }\n media = [\n ['enter', group, context],\n ['enter', label, context]\n ]\n\n // Opening marker.\n media = push(media, events.slice(open + 1, open + offset + 3))\n\n // Text open.\n media = push(media, [['enter', text, context]])\n\n // Always populated by defaults.\n\n // Between.\n media = push(\n media,\n resolveAll(\n context.parser.constructs.insideSpan.null,\n events.slice(open + offset + 4, close - 3),\n context\n )\n )\n\n // Text close, marker close, label close.\n media = push(media, [\n ['exit', text, context],\n events[close - 2],\n events[close - 1],\n ['exit', label, context]\n ])\n\n // Reference, resource, or so.\n media = push(media, events.slice(close + 1))\n\n // Media close.\n media = push(media, [['exit', group, context]])\n splice(events, open, events.length, media)\n return events\n}\n\n/**\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeLabelEnd(effects, ok, nok) {\n const self = this\n let index = self.events.length\n /** @type {Token} */\n let labelStart\n /** @type {boolean} */\n let defined\n\n // Find an opening.\n while (index--) {\n if (\n (self.events[index][1].type === 'labelImage' ||\n self.events[index][1].type === 'labelLink') &&\n !self.events[index][1]._balanced\n ) {\n labelStart = self.events[index][1]\n break\n }\n }\n return start\n\n /**\n * Start of label end.\n *\n * ```markdown\n * > | [a](b) c\n * ^\n * > | [a][b] c\n * ^\n * > | [a][] b\n * ^\n * > | [a] b\n * ```\n *\n * @type {State}\n */\n function start(code) {\n // If there is not an okay opening.\n if (!labelStart) {\n return nok(code)\n }\n\n // If the corresponding label (link) start is marked as inactive,\n // it means we’d be wrapping a link, like this:\n //\n // ```markdown\n // > | a [b [c](d) e](f) g.\n // ^\n // ```\n //\n // We can’t have that, so it’s just balanced brackets.\n if (labelStart._inactive) {\n return labelEndNok(code)\n }\n defined = self.parser.defined.includes(\n normalizeIdentifier(\n self.sliceSerialize({\n start: labelStart.end,\n end: self.now()\n })\n )\n )\n effects.enter('labelEnd')\n effects.enter('labelMarker')\n effects.consume(code)\n effects.exit('labelMarker')\n effects.exit('labelEnd')\n return after\n }\n\n /**\n * After `]`.\n *\n * ```markdown\n * > | [a](b) c\n * ^\n * > | [a][b] c\n * ^\n * > | [a][] b\n * ^\n * > | [a] b\n * ^\n * ```\n *\n * @type {State}\n */\n function after(code) {\n // Note: `markdown-rs` also parses GFM footnotes here, which for us is in\n // an extension.\n\n // Resource (`[asd](fgh)`)?\n if (code === 40) {\n return effects.attempt(\n resourceConstruct,\n labelEndOk,\n defined ? labelEndOk : labelEndNok\n )(code)\n }\n\n // Full (`[asd][fgh]`) or collapsed (`[asd][]`) reference?\n if (code === 91) {\n return effects.attempt(\n referenceFullConstruct,\n labelEndOk,\n defined ? referenceNotFull : labelEndNok\n )(code)\n }\n\n // Shortcut (`[asd]`) reference?\n return defined ? labelEndOk(code) : labelEndNok(code)\n }\n\n /**\n * After `]`, at `[`, but not at a full reference.\n *\n * > 👉 **Note**: we only get here if the label is defined.\n *\n * ```markdown\n * > | [a][] b\n * ^\n * > | [a] b\n * ^\n * ```\n *\n * @type {State}\n */\n function referenceNotFull(code) {\n return effects.attempt(\n referenceCollapsedConstruct,\n labelEndOk,\n labelEndNok\n )(code)\n }\n\n /**\n * Done, we found something.\n *\n * ```markdown\n * > | [a](b) c\n * ^\n * > | [a][b] c\n * ^\n * > | [a][] b\n * ^\n * > | [a] b\n * ^\n * ```\n *\n * @type {State}\n */\n function labelEndOk(code) {\n // Note: `markdown-rs` does a bunch of stuff here.\n return ok(code)\n }\n\n /**\n * Done, it’s nothing.\n *\n * There was an okay opening, but we didn’t match anything.\n *\n * ```markdown\n * > | [a](b c\n * ^\n * > | [a][b c\n * ^\n * > | [a] b\n * ^\n * ```\n *\n * @type {State}\n */\n function labelEndNok(code) {\n labelStart._balanced = true\n return nok(code)\n }\n}\n\n/**\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeResource(effects, ok, nok) {\n return resourceStart\n\n /**\n * At a resource.\n *\n * ```markdown\n * > | [a](b) c\n * ^\n * ```\n *\n * @type {State}\n */\n function resourceStart(code) {\n effects.enter('resource')\n effects.enter('resourceMarker')\n effects.consume(code)\n effects.exit('resourceMarker')\n return resourceBefore\n }\n\n /**\n * In resource, after `(`, at optional whitespace.\n *\n * ```markdown\n * > | [a](b) c\n * ^\n * ```\n *\n * @type {State}\n */\n function resourceBefore(code) {\n return markdownLineEndingOrSpace(code)\n ? factoryWhitespace(effects, resourceOpen)(code)\n : resourceOpen(code)\n }\n\n /**\n * In resource, after optional whitespace, at `)` or a destination.\n *\n * ```markdown\n * > | [a](b) c\n * ^\n * ```\n *\n * @type {State}\n */\n function resourceOpen(code) {\n if (code === 41) {\n return resourceEnd(code)\n }\n return factoryDestination(\n effects,\n resourceDestinationAfter,\n resourceDestinationMissing,\n 'resourceDestination',\n 'resourceDestinationLiteral',\n 'resourceDestinationLiteralMarker',\n 'resourceDestinationRaw',\n 'resourceDestinationString',\n 32\n )(code)\n }\n\n /**\n * In resource, after destination, at optional whitespace.\n *\n * ```markdown\n * > | [a](b) c\n * ^\n * ```\n *\n * @type {State}\n */\n function resourceDestinationAfter(code) {\n return markdownLineEndingOrSpace(code)\n ? factoryWhitespace(effects, resourceBetween)(code)\n : resourceEnd(code)\n }\n\n /**\n * At invalid destination.\n *\n * ```markdown\n * > | [a](<<) b\n * ^\n * ```\n *\n * @type {State}\n */\n function resourceDestinationMissing(code) {\n return nok(code)\n }\n\n /**\n * In resource, after destination and whitespace, at `(` or title.\n *\n * ```markdown\n * > | [a](b ) c\n * ^\n * ```\n *\n * @type {State}\n */\n function resourceBetween(code) {\n if (code === 34 || code === 39 || code === 40) {\n return factoryTitle(\n effects,\n resourceTitleAfter,\n nok,\n 'resourceTitle',\n 'resourceTitleMarker',\n 'resourceTitleString'\n )(code)\n }\n return resourceEnd(code)\n }\n\n /**\n * In resource, after title, at optional whitespace.\n *\n * ```markdown\n * > | [a](b \"c\") d\n * ^\n * ```\n *\n * @type {State}\n */\n function resourceTitleAfter(code) {\n return markdownLineEndingOrSpace(code)\n ? factoryWhitespace(effects, resourceEnd)(code)\n : resourceEnd(code)\n }\n\n /**\n * In resource, at `)`.\n *\n * ```markdown\n * > | [a](b) d\n * ^\n * ```\n *\n * @type {State}\n */\n function resourceEnd(code) {\n if (code === 41) {\n effects.enter('resourceMarker')\n effects.consume(code)\n effects.exit('resourceMarker')\n effects.exit('resource')\n return ok\n }\n return nok(code)\n }\n}\n\n/**\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeReferenceFull(effects, ok, nok) {\n const self = this\n return referenceFull\n\n /**\n * In a reference (full), at the `[`.\n *\n * ```markdown\n * > | [a][b] d\n * ^\n * ```\n *\n * @type {State}\n */\n function referenceFull(code) {\n return factoryLabel.call(\n self,\n effects,\n referenceFullAfter,\n referenceFullMissing,\n 'reference',\n 'referenceMarker',\n 'referenceString'\n )(code)\n }\n\n /**\n * In a reference (full), after `]`.\n *\n * ```markdown\n * > | [a][b] d\n * ^\n * ```\n *\n * @type {State}\n */\n function referenceFullAfter(code) {\n return self.parser.defined.includes(\n normalizeIdentifier(\n self.sliceSerialize(self.events[self.events.length - 1][1]).slice(1, -1)\n )\n )\n ? ok(code)\n : nok(code)\n }\n\n /**\n * In reference (full) that was missing.\n *\n * ```markdown\n * > | [a][b d\n * ^\n * ```\n *\n * @type {State}\n */\n function referenceFullMissing(code) {\n return nok(code)\n }\n}\n\n/**\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeReferenceCollapsed(effects, ok, nok) {\n return referenceCollapsedStart\n\n /**\n * In reference (collapsed), at `[`.\n *\n * > 👉 **Note**: we only get here if the label is defined.\n *\n * ```markdown\n * > | [a][] d\n * ^\n * ```\n *\n * @type {State}\n */\n function referenceCollapsedStart(code) {\n // We only attempt a collapsed label if there’s a `[`.\n\n effects.enter('reference')\n effects.enter('referenceMarker')\n effects.consume(code)\n effects.exit('referenceMarker')\n return referenceCollapsedOpen\n }\n\n /**\n * In reference (collapsed), at `]`.\n *\n * > 👉 **Note**: we only get here if the label is defined.\n *\n * ```markdown\n * > | [a][] d\n * ^\n * ```\n *\n * @type {State}\n */\n function referenceCollapsedOpen(code) {\n if (code === 93) {\n effects.enter('referenceMarker')\n effects.consume(code)\n effects.exit('referenceMarker')\n effects.exit('reference')\n return ok\n }\n return nok(code)\n }\n}\n","/**\n * @typedef {import('micromark-util-types').Construct} Construct\n * @typedef {import('micromark-util-types').State} State\n * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext\n * @typedef {import('micromark-util-types').Tokenizer} Tokenizer\n */\n\nimport {labelEnd} from './label-end.js'\n\n/** @type {Construct} */\nexport const labelStartImage = {\n name: 'labelStartImage',\n tokenize: tokenizeLabelStartImage,\n resolveAll: labelEnd.resolveAll\n}\n\n/**\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeLabelStartImage(effects, ok, nok) {\n const self = this\n return start\n\n /**\n * Start of label (image) start.\n *\n * ```markdown\n * > | a ![b] c\n * ^\n * ```\n *\n * @type {State}\n */\n function start(code) {\n effects.enter('labelImage')\n effects.enter('labelImageMarker')\n effects.consume(code)\n effects.exit('labelImageMarker')\n return open\n }\n\n /**\n * After `!`, at `[`.\n *\n * ```markdown\n * > | a ![b] c\n * ^\n * ```\n *\n * @type {State}\n */\n function open(code) {\n if (code === 91) {\n effects.enter('labelMarker')\n effects.consume(code)\n effects.exit('labelMarker')\n effects.exit('labelImage')\n return after\n }\n return nok(code)\n }\n\n /**\n * After `![`.\n *\n * ```markdown\n * > | a ![b] c\n * ^\n * ```\n *\n * This is needed in because, when GFM footnotes are enabled, images never\n * form when started with a `^`.\n * Instead, links form:\n *\n * ```markdown\n * ![^a](b)\n *\n * ![^a][b]\n *\n * [b]: c\n * ```\n *\n * ```html\n * !^a
\n * !^a
\n * ```\n *\n * @type {State}\n */\n function after(code) {\n // To do: use a new field to do this, this is still needed for\n // `micromark-extension-gfm-footnote`, but the `label-start-link`\n // behavior isn’t.\n // Hidden footnotes hook.\n /* c8 ignore next 3 */\n return code === 94 && '_hiddenFootnoteSupport' in self.parser.constructs\n ? nok(code)\n : ok(code)\n }\n}\n","/**\n * @typedef {import('micromark-util-types').Construct} Construct\n * @typedef {import('micromark-util-types').State} State\n * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext\n * @typedef {import('micromark-util-types').Tokenizer} Tokenizer\n */\n\nimport {labelEnd} from './label-end.js'\n\n/** @type {Construct} */\nexport const labelStartLink = {\n name: 'labelStartLink',\n tokenize: tokenizeLabelStartLink,\n resolveAll: labelEnd.resolveAll\n}\n\n/**\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeLabelStartLink(effects, ok, nok) {\n const self = this\n return start\n\n /**\n * Start of label (link) start.\n *\n * ```markdown\n * > | a [b] c\n * ^\n * ```\n *\n * @type {State}\n */\n function start(code) {\n effects.enter('labelLink')\n effects.enter('labelMarker')\n effects.consume(code)\n effects.exit('labelMarker')\n effects.exit('labelLink')\n return after\n }\n\n /** @type {State} */\n function after(code) {\n // To do: this isn’t needed in `micromark-extension-gfm-footnote`,\n // remove.\n // Hidden footnotes hook.\n /* c8 ignore next 3 */\n return code === 94 && '_hiddenFootnoteSupport' in self.parser.constructs\n ? nok(code)\n : ok(code)\n }\n}\n","/**\n * @typedef {import('micromark-util-types').Construct} Construct\n * @typedef {import('micromark-util-types').State} State\n * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext\n * @typedef {import('micromark-util-types').Tokenizer} Tokenizer\n */\n\nimport {factorySpace} from 'micromark-factory-space'\nimport {markdownLineEnding} from 'micromark-util-character'\n/** @type {Construct} */\nexport const lineEnding = {\n name: 'lineEnding',\n tokenize: tokenizeLineEnding\n}\n\n/**\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeLineEnding(effects, ok) {\n return start\n\n /** @type {State} */\n function start(code) {\n effects.enter('lineEnding')\n effects.consume(code)\n effects.exit('lineEnding')\n return factorySpace(effects, ok, 'linePrefix')\n }\n}\n","/**\n * @typedef {import('micromark-util-types').Code} Code\n * @typedef {import('micromark-util-types').Construct} Construct\n * @typedef {import('micromark-util-types').State} State\n * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext\n * @typedef {import('micromark-util-types').Tokenizer} Tokenizer\n */\n\nimport {factorySpace} from 'micromark-factory-space'\nimport {markdownLineEnding, markdownSpace} from 'micromark-util-character'\n/** @type {Construct} */\nexport const thematicBreak = {\n name: 'thematicBreak',\n tokenize: tokenizeThematicBreak\n}\n\n/**\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeThematicBreak(effects, ok, nok) {\n let size = 0\n /** @type {NonNullable} */\n let marker\n return start\n\n /**\n * Start of thematic break.\n *\n * ```markdown\n * > | ***\n * ^\n * ```\n *\n * @type {State}\n */\n function start(code) {\n effects.enter('thematicBreak')\n // To do: parse indent like `markdown-rs`.\n return before(code)\n }\n\n /**\n * After optional whitespace, at marker.\n *\n * ```markdown\n * > | ***\n * ^\n * ```\n *\n * @type {State}\n */\n function before(code) {\n marker = code\n return atBreak(code)\n }\n\n /**\n * After something, before something else.\n *\n * ```markdown\n * > | ***\n * ^\n * ```\n *\n * @type {State}\n */\n function atBreak(code) {\n if (code === marker) {\n effects.enter('thematicBreakSequence')\n return sequence(code)\n }\n if (size >= 3 && (code === null || markdownLineEnding(code))) {\n effects.exit('thematicBreak')\n return ok(code)\n }\n return nok(code)\n }\n\n /**\n * In sequence.\n *\n * ```markdown\n * > | ***\n * ^\n * ```\n *\n * @type {State}\n */\n function sequence(code) {\n if (code === marker) {\n effects.consume(code)\n size++\n return sequence\n }\n effects.exit('thematicBreakSequence')\n return markdownSpace(code)\n ? factorySpace(effects, atBreak, 'whitespace')(code)\n : atBreak(code)\n }\n}\n","/**\n * @typedef {import('micromark-util-types').Code} Code\n * @typedef {import('micromark-util-types').Construct} Construct\n * @typedef {import('micromark-util-types').ContainerState} ContainerState\n * @typedef {import('micromark-util-types').Exiter} Exiter\n * @typedef {import('micromark-util-types').State} State\n * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext\n * @typedef {import('micromark-util-types').Tokenizer} Tokenizer\n */\n\nimport {factorySpace} from 'micromark-factory-space'\nimport {asciiDigit, markdownSpace} from 'micromark-util-character'\nimport {blankLine} from './blank-line.js'\nimport {thematicBreak} from './thematic-break.js'\n\n/** @type {Construct} */\nexport const list = {\n name: 'list',\n tokenize: tokenizeListStart,\n continuation: {\n tokenize: tokenizeListContinuation\n },\n exit: tokenizeListEnd\n}\n\n/** @type {Construct} */\nconst listItemPrefixWhitespaceConstruct = {\n tokenize: tokenizeListItemPrefixWhitespace,\n partial: true\n}\n\n/** @type {Construct} */\nconst indentConstruct = {\n tokenize: tokenizeIndent,\n partial: true\n}\n\n// To do: `markdown-rs` parses list items on their own and later stitches them\n// together.\n\n/**\n * @type {Tokenizer}\n * @this {TokenizeContext}\n */\nfunction tokenizeListStart(effects, ok, nok) {\n const self = this\n const tail = self.events[self.events.length - 1]\n let initialSize =\n tail && tail[1].type === 'linePrefix'\n ? tail[2].sliceSerialize(tail[1], true).length\n : 0\n let size = 0\n return start\n\n /** @type {State} */\n function start(code) {\n const kind =\n self.containerState.type ||\n (code === 42 || code === 43 || code === 45\n ? 'listUnordered'\n : 'listOrdered')\n if (\n kind === 'listUnordered'\n ? !self.containerState.marker || code === self.containerState.marker\n : asciiDigit(code)\n ) {\n if (!self.containerState.type) {\n self.containerState.type = kind\n effects.enter(kind, {\n _container: true\n })\n }\n if (kind === 'listUnordered') {\n effects.enter('listItemPrefix')\n return code === 42 || code === 45\n ? effects.check(thematicBreak, nok, atMarker)(code)\n : atMarker(code)\n }\n if (!self.interrupt || code === 49) {\n effects.enter('listItemPrefix')\n effects.enter('listItemValue')\n return inside(code)\n }\n }\n return nok(code)\n }\n\n /** @type {State} */\n function inside(code) {\n if (asciiDigit(code) && ++size < 10) {\n effects.consume(code)\n return inside\n }\n if (\n (!self.interrupt || size < 2) &&\n (self.containerState.marker\n ? code === self.containerState.marker\n : code === 41 || code === 46)\n ) {\n effects.exit('listItemValue')\n return atMarker(code)\n }\n return nok(code)\n }\n\n /**\n * @type {State}\n **/\n function atMarker(code) {\n effects.enter('listItemMarker')\n effects.consume(code)\n effects.exit('listItemMarker')\n self.containerState.marker = self.containerState.marker || code\n return effects.check(\n blankLine,\n // Can’t be empty when interrupting.\n self.interrupt ? nok : onBlank,\n effects.attempt(\n listItemPrefixWhitespaceConstruct,\n endOfPrefix,\n otherPrefix\n )\n )\n }\n\n /** @type {State} */\n function onBlank(code) {\n self.containerState.initialBlankLine = true\n initialSize++\n return endOfPrefix(code)\n }\n\n /** @type {State} */\n function otherPrefix(code) {\n if (markdownSpace(code)) {\n effects.enter('listItemPrefixWhitespace')\n effects.consume(code)\n effects.exit('listItemPrefixWhitespace')\n return endOfPrefix\n }\n return nok(code)\n }\n\n /** @type {State} */\n function endOfPrefix(code) {\n self.containerState.size =\n initialSize +\n self.sliceSerialize(effects.exit('listItemPrefix'), true).length\n return ok(code)\n }\n}\n\n/**\n * @type {Tokenizer}\n * @this {TokenizeContext}\n */\nfunction tokenizeListContinuation(effects, ok, nok) {\n const self = this\n self.containerState._closeFlow = undefined\n return effects.check(blankLine, onBlank, notBlank)\n\n /** @type {State} */\n function onBlank(code) {\n self.containerState.furtherBlankLines =\n self.containerState.furtherBlankLines ||\n self.containerState.initialBlankLine\n\n // We have a blank line.\n // Still, try to consume at most the items size.\n return factorySpace(\n effects,\n ok,\n 'listItemIndent',\n self.containerState.size + 1\n )(code)\n }\n\n /** @type {State} */\n function notBlank(code) {\n if (self.containerState.furtherBlankLines || !markdownSpace(code)) {\n self.containerState.furtherBlankLines = undefined\n self.containerState.initialBlankLine = undefined\n return notInCurrentItem(code)\n }\n self.containerState.furtherBlankLines = undefined\n self.containerState.initialBlankLine = undefined\n return effects.attempt(indentConstruct, ok, notInCurrentItem)(code)\n }\n\n /** @type {State} */\n function notInCurrentItem(code) {\n // While we do continue, we signal that the flow should be closed.\n self.containerState._closeFlow = true\n // As we’re closing flow, we’re no longer interrupting.\n self.interrupt = undefined\n // Always populated by defaults.\n\n return factorySpace(\n effects,\n effects.attempt(list, ok, nok),\n 'linePrefix',\n self.parser.constructs.disable.null.includes('codeIndented')\n ? undefined\n : 4\n )(code)\n }\n}\n\n/**\n * @type {Tokenizer}\n * @this {TokenizeContext}\n */\nfunction tokenizeIndent(effects, ok, nok) {\n const self = this\n return factorySpace(\n effects,\n afterPrefix,\n 'listItemIndent',\n self.containerState.size + 1\n )\n\n /** @type {State} */\n function afterPrefix(code) {\n const tail = self.events[self.events.length - 1]\n return tail &&\n tail[1].type === 'listItemIndent' &&\n tail[2].sliceSerialize(tail[1], true).length === self.containerState.size\n ? ok(code)\n : nok(code)\n }\n}\n\n/**\n * @type {Exiter}\n * @this {TokenizeContext}\n */\nfunction tokenizeListEnd(effects) {\n effects.exit(this.containerState.type)\n}\n\n/**\n * @type {Tokenizer}\n * @this {TokenizeContext}\n */\nfunction tokenizeListItemPrefixWhitespace(effects, ok, nok) {\n const self = this\n\n // Always populated by defaults.\n\n return factorySpace(\n effects,\n afterPrefix,\n 'listItemPrefixWhitespace',\n self.parser.constructs.disable.null.includes('codeIndented')\n ? undefined\n : 4 + 1\n )\n\n /** @type {State} */\n function afterPrefix(code) {\n const tail = self.events[self.events.length - 1]\n return !markdownSpace(code) &&\n tail &&\n tail[1].type === 'listItemPrefixWhitespace'\n ? ok(code)\n : nok(code)\n }\n}\n","/**\n * @typedef {import('micromark-util-types').Code} Code\n * @typedef {import('micromark-util-types').Construct} Construct\n * @typedef {import('micromark-util-types').Resolver} Resolver\n * @typedef {import('micromark-util-types').State} State\n * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext\n * @typedef {import('micromark-util-types').Tokenizer} Tokenizer\n */\n\nimport {factorySpace} from 'micromark-factory-space'\nimport {markdownLineEnding, markdownSpace} from 'micromark-util-character'\n/** @type {Construct} */\nexport const setextUnderline = {\n name: 'setextUnderline',\n tokenize: tokenizeSetextUnderline,\n resolveTo: resolveToSetextUnderline\n}\n\n/** @type {Resolver} */\nfunction resolveToSetextUnderline(events, context) {\n // To do: resolve like `markdown-rs`.\n let index = events.length\n /** @type {number | undefined} */\n let content\n /** @type {number | undefined} */\n let text\n /** @type {number | undefined} */\n let definition\n\n // Find the opening of the content.\n // It’ll always exist: we don’t tokenize if it isn’t there.\n while (index--) {\n if (events[index][0] === 'enter') {\n if (events[index][1].type === 'content') {\n content = index\n break\n }\n if (events[index][1].type === 'paragraph') {\n text = index\n }\n }\n // Exit\n else {\n if (events[index][1].type === 'content') {\n // Remove the content end (if needed we’ll add it later)\n events.splice(index, 1)\n }\n if (!definition && events[index][1].type === 'definition') {\n definition = index\n }\n }\n }\n const heading = {\n type: 'setextHeading',\n start: Object.assign({}, events[text][1].start),\n end: Object.assign({}, events[events.length - 1][1].end)\n }\n\n // Change the paragraph to setext heading text.\n events[text][1].type = 'setextHeadingText'\n\n // If we have definitions in the content, we’ll keep on having content,\n // but we need move it.\n if (definition) {\n events.splice(text, 0, ['enter', heading, context])\n events.splice(definition + 1, 0, ['exit', events[content][1], context])\n events[content][1].end = Object.assign({}, events[definition][1].end)\n } else {\n events[content][1] = heading\n }\n\n // Add the heading exit at the end.\n events.push(['exit', heading, context])\n return events\n}\n\n/**\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeSetextUnderline(effects, ok, nok) {\n const self = this\n /** @type {NonNullable} */\n let marker\n return start\n\n /**\n * At start of heading (setext) underline.\n *\n * ```markdown\n * | aa\n * > | ==\n * ^\n * ```\n *\n * @type {State}\n */\n function start(code) {\n let index = self.events.length\n /** @type {boolean | undefined} */\n let paragraph\n // Find an opening.\n while (index--) {\n // Skip enter/exit of line ending, line prefix, and content.\n // We can now either have a definition or a paragraph.\n if (\n self.events[index][1].type !== 'lineEnding' &&\n self.events[index][1].type !== 'linePrefix' &&\n self.events[index][1].type !== 'content'\n ) {\n paragraph = self.events[index][1].type === 'paragraph'\n break\n }\n }\n\n // To do: handle lazy/pierce like `markdown-rs`.\n // To do: parse indent like `markdown-rs`.\n if (!self.parser.lazy[self.now().line] && (self.interrupt || paragraph)) {\n effects.enter('setextHeadingLine')\n marker = code\n return before(code)\n }\n return nok(code)\n }\n\n /**\n * After optional whitespace, at `-` or `=`.\n *\n * ```markdown\n * | aa\n * > | ==\n * ^\n * ```\n *\n * @type {State}\n */\n function before(code) {\n effects.enter('setextHeadingLineSequence')\n return inside(code)\n }\n\n /**\n * In sequence.\n *\n * ```markdown\n * | aa\n * > | ==\n * ^\n * ```\n *\n * @type {State}\n */\n function inside(code) {\n if (code === marker) {\n effects.consume(code)\n return inside\n }\n effects.exit('setextHeadingLineSequence')\n return markdownSpace(code)\n ? factorySpace(effects, after, 'lineSuffix')(code)\n : after(code)\n }\n\n /**\n * After sequence, after optional whitespace.\n *\n * ```markdown\n * | aa\n * > | ==\n * ^\n * ```\n *\n * @type {State}\n */\n function after(code) {\n if (code === null || markdownLineEnding(code)) {\n effects.exit('setextHeadingLine')\n return ok(code)\n }\n return nok(code)\n }\n}\n","/**\n * @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct\n * @typedef {import('micromark-util-types').Initializer} Initializer\n * @typedef {import('micromark-util-types').State} State\n * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext\n */\n\nimport {blankLine, content} from 'micromark-core-commonmark'\nimport {factorySpace} from 'micromark-factory-space'\nimport {markdownLineEnding} from 'micromark-util-character'\n/** @type {InitialConstruct} */\nexport const flow = {\n tokenize: initializeFlow\n}\n\n/**\n * @this {TokenizeContext}\n * @type {Initializer}\n */\nfunction initializeFlow(effects) {\n const self = this\n const initial = effects.attempt(\n // Try to parse a blank line.\n blankLine,\n atBlankEnding,\n // Try to parse initial flow (essentially, only code).\n effects.attempt(\n this.parser.constructs.flowInitial,\n afterConstruct,\n factorySpace(\n effects,\n effects.attempt(\n this.parser.constructs.flow,\n afterConstruct,\n effects.attempt(content, afterConstruct)\n ),\n 'linePrefix'\n )\n )\n )\n return initial\n\n /** @type {State} */\n function atBlankEnding(code) {\n if (code === null) {\n effects.consume(code)\n return\n }\n effects.enter('lineEndingBlank')\n effects.consume(code)\n effects.exit('lineEndingBlank')\n self.currentConstruct = undefined\n return initial\n }\n\n /** @type {State} */\n function afterConstruct(code) {\n if (code === null) {\n effects.consume(code)\n return\n }\n effects.enter('lineEnding')\n effects.consume(code)\n effects.exit('lineEnding')\n self.currentConstruct = undefined\n return initial\n }\n}\n","/**\n * @typedef {import('micromark-util-types').Code} Code\n * @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct\n * @typedef {import('micromark-util-types').Initializer} Initializer\n * @typedef {import('micromark-util-types').Resolver} Resolver\n * @typedef {import('micromark-util-types').State} State\n * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext\n */\n\nexport const resolver = {\n resolveAll: createResolver()\n}\nexport const string = initializeFactory('string')\nexport const text = initializeFactory('text')\n\n/**\n * @param {'string' | 'text'} field\n * @returns {InitialConstruct}\n */\nfunction initializeFactory(field) {\n return {\n tokenize: initializeText,\n resolveAll: createResolver(\n field === 'text' ? resolveAllLineSuffixes : undefined\n )\n }\n\n /**\n * @this {TokenizeContext}\n * @type {Initializer}\n */\n function initializeText(effects) {\n const self = this\n const constructs = this.parser.constructs[field]\n const text = effects.attempt(constructs, start, notText)\n return start\n\n /** @type {State} */\n function start(code) {\n return atBreak(code) ? text(code) : notText(code)\n }\n\n /** @type {State} */\n function notText(code) {\n if (code === null) {\n effects.consume(code)\n return\n }\n effects.enter('data')\n effects.consume(code)\n return data\n }\n\n /** @type {State} */\n function data(code) {\n if (atBreak(code)) {\n effects.exit('data')\n return text(code)\n }\n\n // Data.\n effects.consume(code)\n return data\n }\n\n /**\n * @param {Code} code\n * @returns {boolean}\n */\n function atBreak(code) {\n if (code === null) {\n return true\n }\n const list = constructs[code]\n let index = -1\n if (list) {\n // Always populated by defaults.\n\n while (++index < list.length) {\n const item = list[index]\n if (!item.previous || item.previous.call(self, self.previous)) {\n return true\n }\n }\n }\n return false\n }\n }\n}\n\n/**\n * @param {Resolver | undefined} [extraResolver]\n * @returns {Resolver}\n */\nfunction createResolver(extraResolver) {\n return resolveAllText\n\n /** @type {Resolver} */\n function resolveAllText(events, context) {\n let index = -1\n /** @type {number | undefined} */\n let enter\n\n // A rather boring computation (to merge adjacent `data` events) which\n // improves mm performance by 29%.\n while (++index <= events.length) {\n if (enter === undefined) {\n if (events[index] && events[index][1].type === 'data') {\n enter = index\n index++\n }\n } else if (!events[index] || events[index][1].type !== 'data') {\n // Don’t do anything if there is one data token.\n if (index !== enter + 2) {\n events[enter][1].end = events[index - 1][1].end\n events.splice(enter + 2, index - enter - 2)\n index = enter + 2\n }\n enter = undefined\n }\n }\n return extraResolver ? extraResolver(events, context) : events\n }\n}\n\n/**\n * A rather ugly set of instructions which again looks at chunks in the input\n * stream.\n * The reason to do this here is that it is *much* faster to parse in reverse.\n * And that we can’t hook into `null` to split the line suffix before an EOF.\n * To do: figure out if we can make this into a clean utility, or even in core.\n * As it will be useful for GFMs literal autolink extension (and maybe even\n * tables?)\n *\n * @type {Resolver}\n */\nfunction resolveAllLineSuffixes(events, context) {\n let eventIndex = 0 // Skip first.\n\n while (++eventIndex <= events.length) {\n if (\n (eventIndex === events.length ||\n events[eventIndex][1].type === 'lineEnding') &&\n events[eventIndex - 1][1].type === 'data'\n ) {\n const data = events[eventIndex - 1][1]\n const chunks = context.sliceStream(data)\n let index = chunks.length\n let bufferIndex = -1\n let size = 0\n /** @type {boolean | undefined} */\n let tabs\n while (index--) {\n const chunk = chunks[index]\n if (typeof chunk === 'string') {\n bufferIndex = chunk.length\n while (chunk.charCodeAt(bufferIndex - 1) === 32) {\n size++\n bufferIndex--\n }\n if (bufferIndex) break\n bufferIndex = -1\n }\n // Number\n else if (chunk === -2) {\n tabs = true\n size++\n } else if (chunk === -1) {\n // Empty\n } else {\n // Replacement character, exit.\n index++\n break\n }\n }\n if (size) {\n const token = {\n type:\n eventIndex === events.length || tabs || size < 2\n ? 'lineSuffix'\n : 'hardBreakTrailing',\n start: {\n line: data.end.line,\n column: data.end.column - size,\n offset: data.end.offset - size,\n _index: data.start._index + index,\n _bufferIndex: index\n ? bufferIndex\n : data.start._bufferIndex + bufferIndex\n },\n end: Object.assign({}, data.end)\n }\n data.end = Object.assign({}, token.start)\n if (data.start.offset === data.end.offset) {\n Object.assign(data, token)\n } else {\n events.splice(\n eventIndex,\n 0,\n ['enter', token, context],\n ['exit', token, context]\n )\n eventIndex += 2\n }\n }\n eventIndex++\n }\n }\n return events\n}\n","/**\n * @typedef {import('micromark-util-types').Chunk} Chunk\n * @typedef {import('micromark-util-types').Code} Code\n * @typedef {import('micromark-util-types').Construct} Construct\n * @typedef {import('micromark-util-types').ConstructRecord} ConstructRecord\n * @typedef {import('micromark-util-types').Effects} Effects\n * @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct\n * @typedef {import('micromark-util-types').ParseContext} ParseContext\n * @typedef {import('micromark-util-types').Point} Point\n * @typedef {import('micromark-util-types').State} State\n * @typedef {import('micromark-util-types').Token} Token\n * @typedef {import('micromark-util-types').TokenType} TokenType\n * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext\n */\n\n/**\n * @callback Restore\n * @returns {undefined}\n *\n * @typedef Info\n * @property {Restore} restore\n * @property {number} from\n *\n * @callback ReturnHandle\n * Handle a successful run.\n * @param {Construct} construct\n * @param {Info} info\n * @returns {undefined}\n */\n\nimport {markdownLineEnding} from 'micromark-util-character'\nimport {push, splice} from 'micromark-util-chunked'\nimport {resolveAll} from 'micromark-util-resolve-all'\n/**\n * Create a tokenizer.\n * Tokenizers deal with one type of data (e.g., containers, flow, text).\n * The parser is the object dealing with it all.\n * `initialize` works like other constructs, except that only its `tokenize`\n * function is used, in which case it doesn’t receive an `ok` or `nok`.\n * `from` can be given to set the point before the first character, although\n * when further lines are indented, they must be set with `defineSkip`.\n *\n * @param {ParseContext} parser\n * @param {InitialConstruct} initialize\n * @param {Omit | undefined} [from]\n * @returns {TokenizeContext}\n */\nexport function createTokenizer(parser, initialize, from) {\n /** @type {Point} */\n let point = Object.assign(\n from\n ? Object.assign({}, from)\n : {\n line: 1,\n column: 1,\n offset: 0\n },\n {\n _index: 0,\n _bufferIndex: -1\n }\n )\n /** @type {Record} */\n const columnStart = {}\n /** @type {Array} */\n const resolveAllConstructs = []\n /** @type {Array} */\n let chunks = []\n /** @type {Array} */\n let stack = []\n /** @type {boolean | undefined} */\n let consumed = true\n\n /**\n * Tools used for tokenizing.\n *\n * @type {Effects}\n */\n const effects = {\n consume,\n enter,\n exit,\n attempt: constructFactory(onsuccessfulconstruct),\n check: constructFactory(onsuccessfulcheck),\n interrupt: constructFactory(onsuccessfulcheck, {\n interrupt: true\n })\n }\n\n /**\n * State and tools for resolving and serializing.\n *\n * @type {TokenizeContext}\n */\n const context = {\n previous: null,\n code: null,\n containerState: {},\n events: [],\n parser,\n sliceStream,\n sliceSerialize,\n now,\n defineSkip,\n write\n }\n\n /**\n * The state function.\n *\n * @type {State | undefined}\n */\n let state = initialize.tokenize.call(context, effects)\n\n /**\n * Track which character we expect to be consumed, to catch bugs.\n *\n * @type {Code}\n */\n let expectedCode\n if (initialize.resolveAll) {\n resolveAllConstructs.push(initialize)\n }\n return context\n\n /** @type {TokenizeContext['write']} */\n function write(slice) {\n chunks = push(chunks, slice)\n main()\n\n // Exit if we’re not done, resolve might change stuff.\n if (chunks[chunks.length - 1] !== null) {\n return []\n }\n addResult(initialize, 0)\n\n // Otherwise, resolve, and exit.\n context.events = resolveAll(resolveAllConstructs, context.events, context)\n return context.events\n }\n\n //\n // Tools.\n //\n\n /** @type {TokenizeContext['sliceSerialize']} */\n function sliceSerialize(token, expandTabs) {\n return serializeChunks(sliceStream(token), expandTabs)\n }\n\n /** @type {TokenizeContext['sliceStream']} */\n function sliceStream(token) {\n return sliceChunks(chunks, token)\n }\n\n /** @type {TokenizeContext['now']} */\n function now() {\n // This is a hot path, so we clone manually instead of `Object.assign({}, point)`\n const {line, column, offset, _index, _bufferIndex} = point\n return {\n line,\n column,\n offset,\n _index,\n _bufferIndex\n }\n }\n\n /** @type {TokenizeContext['defineSkip']} */\n function defineSkip(value) {\n columnStart[value.line] = value.column\n accountForPotentialSkip()\n }\n\n //\n // State management.\n //\n\n /**\n * Main loop (note that `_index` and `_bufferIndex` in `point` are modified by\n * `consume`).\n * Here is where we walk through the chunks, which either include strings of\n * several characters, or numerical character codes.\n * The reason to do this in a loop instead of a call is so the stack can\n * drain.\n *\n * @returns {undefined}\n */\n function main() {\n /** @type {number} */\n let chunkIndex\n while (point._index < chunks.length) {\n const chunk = chunks[point._index]\n\n // If we’re in a buffer chunk, loop through it.\n if (typeof chunk === 'string') {\n chunkIndex = point._index\n if (point._bufferIndex < 0) {\n point._bufferIndex = 0\n }\n while (\n point._index === chunkIndex &&\n point._bufferIndex < chunk.length\n ) {\n go(chunk.charCodeAt(point._bufferIndex))\n }\n } else {\n go(chunk)\n }\n }\n }\n\n /**\n * Deal with one code.\n *\n * @param {Code} code\n * @returns {undefined}\n */\n function go(code) {\n consumed = undefined\n expectedCode = code\n state = state(code)\n }\n\n /** @type {Effects['consume']} */\n function consume(code) {\n if (markdownLineEnding(code)) {\n point.line++\n point.column = 1\n point.offset += code === -3 ? 2 : 1\n accountForPotentialSkip()\n } else if (code !== -1) {\n point.column++\n point.offset++\n }\n\n // Not in a string chunk.\n if (point._bufferIndex < 0) {\n point._index++\n } else {\n point._bufferIndex++\n\n // At end of string chunk.\n // @ts-expect-error Points w/ non-negative `_bufferIndex` reference\n // strings.\n if (point._bufferIndex === chunks[point._index].length) {\n point._bufferIndex = -1\n point._index++\n }\n }\n\n // Expose the previous character.\n context.previous = code\n\n // Mark as consumed.\n consumed = true\n }\n\n /** @type {Effects['enter']} */\n function enter(type, fields) {\n /** @type {Token} */\n // @ts-expect-error Patch instead of assign required fields to help GC.\n const token = fields || {}\n token.type = type\n token.start = now()\n context.events.push(['enter', token, context])\n stack.push(token)\n return token\n }\n\n /** @type {Effects['exit']} */\n function exit(type) {\n const token = stack.pop()\n token.end = now()\n context.events.push(['exit', token, context])\n return token\n }\n\n /**\n * Use results.\n *\n * @type {ReturnHandle}\n */\n function onsuccessfulconstruct(construct, info) {\n addResult(construct, info.from)\n }\n\n /**\n * Discard results.\n *\n * @type {ReturnHandle}\n */\n function onsuccessfulcheck(_, info) {\n info.restore()\n }\n\n /**\n * Factory to attempt/check/interrupt.\n *\n * @param {ReturnHandle} onreturn\n * @param {{interrupt?: boolean | undefined} | undefined} [fields]\n */\n function constructFactory(onreturn, fields) {\n return hook\n\n /**\n * Handle either an object mapping codes to constructs, a list of\n * constructs, or a single construct.\n *\n * @param {Array | Construct | ConstructRecord} constructs\n * @param {State} returnState\n * @param {State | undefined} [bogusState]\n * @returns {State}\n */\n function hook(constructs, returnState, bogusState) {\n /** @type {Array} */\n let listOfConstructs\n /** @type {number} */\n let constructIndex\n /** @type {Construct} */\n let currentConstruct\n /** @type {Info} */\n let info\n return Array.isArray(constructs) /* c8 ignore next 1 */\n ? handleListOfConstructs(constructs)\n : 'tokenize' in constructs\n ? // @ts-expect-error Looks like a construct.\n handleListOfConstructs([constructs])\n : handleMapOfConstructs(constructs)\n\n /**\n * Handle a list of construct.\n *\n * @param {ConstructRecord} map\n * @returns {State}\n */\n function handleMapOfConstructs(map) {\n return start\n\n /** @type {State} */\n function start(code) {\n const def = code !== null && map[code]\n const all = code !== null && map.null\n const list = [\n // To do: add more extension tests.\n /* c8 ignore next 2 */\n ...(Array.isArray(def) ? def : def ? [def] : []),\n ...(Array.isArray(all) ? all : all ? [all] : [])\n ]\n return handleListOfConstructs(list)(code)\n }\n }\n\n /**\n * Handle a list of construct.\n *\n * @param {Array} list\n * @returns {State}\n */\n function handleListOfConstructs(list) {\n listOfConstructs = list\n constructIndex = 0\n if (list.length === 0) {\n return bogusState\n }\n return handleConstruct(list[constructIndex])\n }\n\n /**\n * Handle a single construct.\n *\n * @param {Construct} construct\n * @returns {State}\n */\n function handleConstruct(construct) {\n return start\n\n /** @type {State} */\n function start(code) {\n // To do: not needed to store if there is no bogus state, probably?\n // Currently doesn’t work because `inspect` in document does a check\n // w/o a bogus, which doesn’t make sense. But it does seem to help perf\n // by not storing.\n info = store()\n currentConstruct = construct\n if (!construct.partial) {\n context.currentConstruct = construct\n }\n\n // Always populated by defaults.\n\n if (\n construct.name &&\n context.parser.constructs.disable.null.includes(construct.name)\n ) {\n return nok(code)\n }\n return construct.tokenize.call(\n // If we do have fields, create an object w/ `context` as its\n // prototype.\n // This allows a “live binding”, which is needed for `interrupt`.\n fields ? Object.assign(Object.create(context), fields) : context,\n effects,\n ok,\n nok\n )(code)\n }\n }\n\n /** @type {State} */\n function ok(code) {\n consumed = true\n onreturn(currentConstruct, info)\n return returnState\n }\n\n /** @type {State} */\n function nok(code) {\n consumed = true\n info.restore()\n if (++constructIndex < listOfConstructs.length) {\n return handleConstruct(listOfConstructs[constructIndex])\n }\n return bogusState\n }\n }\n }\n\n /**\n * @param {Construct} construct\n * @param {number} from\n * @returns {undefined}\n */\n function addResult(construct, from) {\n if (construct.resolveAll && !resolveAllConstructs.includes(construct)) {\n resolveAllConstructs.push(construct)\n }\n if (construct.resolve) {\n splice(\n context.events,\n from,\n context.events.length - from,\n construct.resolve(context.events.slice(from), context)\n )\n }\n if (construct.resolveTo) {\n context.events = construct.resolveTo(context.events, context)\n }\n }\n\n /**\n * Store state.\n *\n * @returns {Info}\n */\n function store() {\n const startPoint = now()\n const startPrevious = context.previous\n const startCurrentConstruct = context.currentConstruct\n const startEventsIndex = context.events.length\n const startStack = Array.from(stack)\n return {\n restore,\n from: startEventsIndex\n }\n\n /**\n * Restore state.\n *\n * @returns {undefined}\n */\n function restore() {\n point = startPoint\n context.previous = startPrevious\n context.currentConstruct = startCurrentConstruct\n context.events.length = startEventsIndex\n stack = startStack\n accountForPotentialSkip()\n }\n }\n\n /**\n * Move the current point a bit forward in the line when it’s on a column\n * skip.\n *\n * @returns {undefined}\n */\n function accountForPotentialSkip() {\n if (point.line in columnStart && point.column < 2) {\n point.column = columnStart[point.line]\n point.offset += columnStart[point.line] - 1\n }\n }\n}\n\n/**\n * Get the chunks from a slice of chunks in the range of a token.\n *\n * @param {Array} chunks\n * @param {Pick} token\n * @returns {Array}\n */\nfunction sliceChunks(chunks, token) {\n const startIndex = token.start._index\n const startBufferIndex = token.start._bufferIndex\n const endIndex = token.end._index\n const endBufferIndex = token.end._bufferIndex\n /** @type {Array} */\n let view\n if (startIndex === endIndex) {\n // @ts-expect-error `_bufferIndex` is used on string chunks.\n view = [chunks[startIndex].slice(startBufferIndex, endBufferIndex)]\n } else {\n view = chunks.slice(startIndex, endIndex)\n if (startBufferIndex > -1) {\n const head = view[0]\n if (typeof head === 'string') {\n view[0] = head.slice(startBufferIndex)\n } else {\n view.shift()\n }\n }\n if (endBufferIndex > 0) {\n // @ts-expect-error `_bufferIndex` is used on string chunks.\n view.push(chunks[endIndex].slice(0, endBufferIndex))\n }\n }\n return view\n}\n\n/**\n * Get the string value of a slice of chunks.\n *\n * @param {Array} chunks\n * @param {boolean | undefined} [expandTabs=false]\n * @returns {string}\n */\nfunction serializeChunks(chunks, expandTabs) {\n let index = -1\n /** @type {Array} */\n const result = []\n /** @type {boolean | undefined} */\n let atTab\n while (++index < chunks.length) {\n const chunk = chunks[index]\n /** @type {string} */\n let value\n if (typeof chunk === 'string') {\n value = chunk\n } else\n switch (chunk) {\n case -5: {\n value = '\\r'\n break\n }\n case -4: {\n value = '\\n'\n break\n }\n case -3: {\n value = '\\r' + '\\n'\n break\n }\n case -2: {\n value = expandTabs ? ' ' : '\\t'\n break\n }\n case -1: {\n if (!expandTabs && atTab) continue\n value = ' '\n break\n }\n default: {\n // Currently only replacement character.\n value = String.fromCharCode(chunk)\n }\n }\n atTab = chunk === -2\n result.push(value)\n }\n return result.join('')\n}\n","/**\n * @typedef {import('micromark-util-types').Extension} Extension\n */\n\nimport {\n attention,\n autolink,\n blockQuote,\n characterEscape,\n characterReference,\n codeFenced,\n codeIndented,\n codeText,\n definition,\n hardBreakEscape,\n headingAtx,\n htmlFlow,\n htmlText,\n labelEnd,\n labelStartImage,\n labelStartLink,\n lineEnding,\n list,\n setextUnderline,\n thematicBreak\n} from 'micromark-core-commonmark'\nimport {resolver as resolveText} from './initialize/text.js'\n\n/** @satisfies {Extension['document']} */\nexport const document = {\n [42]: list,\n [43]: list,\n [45]: list,\n [48]: list,\n [49]: list,\n [50]: list,\n [51]: list,\n [52]: list,\n [53]: list,\n [54]: list,\n [55]: list,\n [56]: list,\n [57]: list,\n [62]: blockQuote\n}\n\n/** @satisfies {Extension['contentInitial']} */\nexport const contentInitial = {\n [91]: definition\n}\n\n/** @satisfies {Extension['flowInitial']} */\nexport const flowInitial = {\n [-2]: codeIndented,\n [-1]: codeIndented,\n [32]: codeIndented\n}\n\n/** @satisfies {Extension['flow']} */\nexport const flow = {\n [35]: headingAtx,\n [42]: thematicBreak,\n [45]: [setextUnderline, thematicBreak],\n [60]: htmlFlow,\n [61]: setextUnderline,\n [95]: thematicBreak,\n [96]: codeFenced,\n [126]: codeFenced\n}\n\n/** @satisfies {Extension['string']} */\nexport const string = {\n [38]: characterReference,\n [92]: characterEscape\n}\n\n/** @satisfies {Extension['text']} */\nexport const text = {\n [-5]: lineEnding,\n [-4]: lineEnding,\n [-3]: lineEnding,\n [33]: labelStartImage,\n [38]: characterReference,\n [42]: attention,\n [60]: [autolink, htmlText],\n [91]: labelStartLink,\n [92]: [hardBreakEscape, characterEscape],\n [93]: labelEnd,\n [95]: attention,\n [96]: codeText\n}\n\n/** @satisfies {Extension['insideSpan']} */\nexport const insideSpan = {\n null: [attention, resolveText]\n}\n\n/** @satisfies {Extension['attentionMarkers']} */\nexport const attentionMarkers = {\n null: [42, 95]\n}\n\n/** @satisfies {Extension['disable']} */\nexport const disable = {\n null: []\n}\n","/**\n * @typedef {import('micromark-util-types').Create} Create\n * @typedef {import('micromark-util-types').FullNormalizedExtension} FullNormalizedExtension\n * @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct\n * @typedef {import('micromark-util-types').ParseContext} ParseContext\n * @typedef {import('micromark-util-types').ParseOptions} ParseOptions\n */\n\nimport {combineExtensions} from 'micromark-util-combine-extensions'\nimport {content} from './initialize/content.js'\nimport {document} from './initialize/document.js'\nimport {flow} from './initialize/flow.js'\nimport {string, text} from './initialize/text.js'\nimport {createTokenizer} from './create-tokenizer.js'\nimport * as defaultConstructs from './constructs.js'\n\n/**\n * @param {ParseOptions | null | undefined} [options]\n * @returns {ParseContext}\n */\nexport function parse(options) {\n const settings = options || {}\n const constructs =\n /** @type {FullNormalizedExtension} */\n combineExtensions([defaultConstructs, ...(settings.extensions || [])])\n\n /** @type {ParseContext} */\n const parser = {\n defined: [],\n lazy: {},\n constructs,\n content: create(content),\n document: create(document),\n flow: create(flow),\n string: create(string),\n text: create(text)\n }\n return parser\n\n /**\n * @param {InitialConstruct} initial\n */\n function create(initial) {\n return creator\n /** @type {Create} */\n function creator(from) {\n return createTokenizer(parser, initial, from)\n }\n }\n}\n","/**\n * @typedef {import('micromark-util-types').Event} Event\n */\n\nimport {subtokenize} from 'micromark-util-subtokenize'\n\n/**\n * @param {Array} events\n * @returns {Array}\n */\nexport function postprocess(events) {\n while (!subtokenize(events)) {\n // Empty\n }\n return events\n}\n","/**\n * @typedef {import('micromark-util-types').Chunk} Chunk\n * @typedef {import('micromark-util-types').Code} Code\n * @typedef {import('micromark-util-types').Encoding} Encoding\n * @typedef {import('micromark-util-types').Value} Value\n */\n\n/**\n * @callback Preprocessor\n * @param {Value} value\n * @param {Encoding | null | undefined} [encoding]\n * @param {boolean | null | undefined} [end=false]\n * @returns {Array}\n */\n\nconst search = /[\\0\\t\\n\\r]/g\n\n/**\n * @returns {Preprocessor}\n */\nexport function preprocess() {\n let column = 1\n let buffer = ''\n /** @type {boolean | undefined} */\n let start = true\n /** @type {boolean | undefined} */\n let atCarriageReturn\n return preprocessor\n\n /** @type {Preprocessor} */\n // eslint-disable-next-line complexity\n function preprocessor(value, encoding, end) {\n /** @type {Array} */\n const chunks = []\n /** @type {RegExpMatchArray | null} */\n let match\n /** @type {number} */\n let next\n /** @type {number} */\n let startPosition\n /** @type {number} */\n let endPosition\n /** @type {Code} */\n let code\n value =\n buffer +\n (typeof value === 'string'\n ? value.toString()\n : new TextDecoder(encoding || undefined).decode(value))\n startPosition = 0\n buffer = ''\n if (start) {\n // To do: `markdown-rs` actually parses BOMs (byte order mark).\n if (value.charCodeAt(0) === 65279) {\n startPosition++\n }\n start = undefined\n }\n while (startPosition < value.length) {\n search.lastIndex = startPosition\n match = search.exec(value)\n endPosition =\n match && match.index !== undefined ? match.index : value.length\n code = value.charCodeAt(endPosition)\n if (!match) {\n buffer = value.slice(startPosition)\n break\n }\n if (code === 10 && startPosition === endPosition && atCarriageReturn) {\n chunks.push(-3)\n atCarriageReturn = undefined\n } else {\n if (atCarriageReturn) {\n chunks.push(-5)\n atCarriageReturn = undefined\n }\n if (startPosition < endPosition) {\n chunks.push(value.slice(startPosition, endPosition))\n column += endPosition - startPosition\n }\n switch (code) {\n case 0: {\n chunks.push(65533)\n column++\n break\n }\n case 9: {\n next = Math.ceil(column / 4) * 4\n chunks.push(-2)\n while (column++ < next) chunks.push(-1)\n break\n }\n case 10: {\n chunks.push(-4)\n column = 1\n break\n }\n default: {\n atCarriageReturn = true\n column = 1\n }\n }\n }\n startPosition = endPosition + 1\n }\n if (end) {\n if (atCarriageReturn) chunks.push(-5)\n if (buffer) chunks.push(buffer)\n chunks.push(null)\n }\n return chunks\n }\n}\n","import {decodeNamedCharacterReference} from 'decode-named-character-reference'\nimport {decodeNumericCharacterReference} from 'micromark-util-decode-numeric-character-reference'\nconst characterEscapeOrReference =\n /\\\\([!-/:-@[-`{-~])|&(#(?:\\d{1,7}|x[\\da-f]{1,6})|[\\da-z]{1,31});/gi\n\n/**\n * Decode markdown strings (which occur in places such as fenced code info\n * strings, destinations, labels, and titles).\n *\n * The “string” content type allows character escapes and -references.\n * This decodes those.\n *\n * @param {string} value\n * Value to decode.\n * @returns {string}\n * Decoded value.\n */\nexport function decodeString(value) {\n return value.replace(characterEscapeOrReference, decode)\n}\n\n/**\n * @param {string} $0\n * @param {string} $1\n * @param {string} $2\n * @returns {string}\n */\nfunction decode($0, $1, $2) {\n if ($1) {\n // Escape.\n return $1\n }\n\n // Reference.\n const head = $2.charCodeAt(0)\n if (head === 35) {\n const head = $2.charCodeAt(1)\n const hex = head === 120 || head === 88\n return decodeNumericCharacterReference($2.slice(hex ? 2 : 1), hex ? 16 : 10)\n }\n return decodeNamedCharacterReference($2) || $0\n}\n","/**\n * @typedef {import('mdast').Break} Break\n * @typedef {import('mdast').Blockquote} Blockquote\n * @typedef {import('mdast').Code} Code\n * @typedef {import('mdast').Definition} Definition\n * @typedef {import('mdast').Emphasis} Emphasis\n * @typedef {import('mdast').Heading} Heading\n * @typedef {import('mdast').Html} Html\n * @typedef {import('mdast').Image} Image\n * @typedef {import('mdast').InlineCode} InlineCode\n * @typedef {import('mdast').Link} Link\n * @typedef {import('mdast').List} List\n * @typedef {import('mdast').ListItem} ListItem\n * @typedef {import('mdast').Nodes} Nodes\n * @typedef {import('mdast').Paragraph} Paragraph\n * @typedef {import('mdast').Parent} Parent\n * @typedef {import('mdast').PhrasingContent} PhrasingContent\n * @typedef {import('mdast').ReferenceType} ReferenceType\n * @typedef {import('mdast').Root} Root\n * @typedef {import('mdast').Strong} Strong\n * @typedef {import('mdast').Text} Text\n * @typedef {import('mdast').ThematicBreak} ThematicBreak\n *\n * @typedef {import('micromark-util-types').Encoding} Encoding\n * @typedef {import('micromark-util-types').Event} Event\n * @typedef {import('micromark-util-types').ParseOptions} ParseOptions\n * @typedef {import('micromark-util-types').Token} Token\n * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext\n * @typedef {import('micromark-util-types').Value} Value\n *\n * @typedef {import('unist').Point} Point\n *\n * @typedef {import('../index.js').CompileData} CompileData\n */\n\n/**\n * @typedef {Omit & {type: 'fragment', children: Array}} Fragment\n */\n\n/**\n * @callback Transform\n * Extra transform, to change the AST afterwards.\n * @param {Root} tree\n * Tree to transform.\n * @returns {Root | null | undefined | void}\n * New tree or nothing (in which case the current tree is used).\n *\n * @callback Handle\n * Handle a token.\n * @param {CompileContext} this\n * Context.\n * @param {Token} token\n * Current token.\n * @returns {undefined | void}\n * Nothing.\n *\n * @typedef {Record} Handles\n * Token types mapping to handles\n *\n * @callback OnEnterError\n * Handle the case where the `right` token is open, but it is closed (by the\n * `left` token) or because we reached the end of the document.\n * @param {Omit} this\n * Context.\n * @param {Token | undefined} left\n * Left token.\n * @param {Token} right\n * Right token.\n * @returns {undefined}\n * Nothing.\n *\n * @callback OnExitError\n * Handle the case where the `right` token is open but it is closed by\n * exiting the `left` token.\n * @param {Omit} this\n * Context.\n * @param {Token} left\n * Left token.\n * @param {Token} right\n * Right token.\n * @returns {undefined}\n * Nothing.\n *\n * @typedef {[Token, OnEnterError | undefined]} TokenTuple\n * Open token on the stack, with an optional error handler for when\n * that token isn’t closed properly.\n */\n\n/**\n * @typedef Config\n * Configuration.\n *\n * We have our defaults, but extensions will add more.\n * @property {Array} canContainEols\n * Token types where line endings are used.\n * @property {Handles} enter\n * Opening handles.\n * @property {Handles} exit\n * Closing handles.\n * @property {Array} transforms\n * Tree transforms.\n *\n * @typedef {Partial} Extension\n * Change how markdown tokens from micromark are turned into mdast.\n *\n * @typedef CompileContext\n * mdast compiler context.\n * @property {Array} stack\n * Stack of nodes.\n * @property {Array} tokenStack\n * Stack of tokens.\n * @property {(this: CompileContext) => undefined} buffer\n * Capture some of the output data.\n * @property {(this: CompileContext) => string} resume\n * Stop capturing and access the output data.\n * @property {(this: CompileContext, node: Nodes, token: Token, onError?: OnEnterError) => undefined} enter\n * Enter a node.\n * @property {(this: CompileContext, token: Token, onError?: OnExitError) => undefined} exit\n * Exit a node.\n * @property {TokenizeContext['sliceSerialize']} sliceSerialize\n * Get the string value of a token.\n * @property {Config} config\n * Configuration.\n * @property {CompileData} data\n * Info passed around; key/value store.\n *\n * @typedef FromMarkdownOptions\n * Configuration for how to build mdast.\n * @property {Array> | null | undefined} [mdastExtensions]\n * Extensions for this utility to change how tokens are turned into a tree.\n *\n * @typedef {ParseOptions & FromMarkdownOptions} Options\n * Configuration.\n */\n\nimport {toString} from 'mdast-util-to-string'\nimport {parse, postprocess, preprocess} from 'micromark'\nimport {decodeNumericCharacterReference} from 'micromark-util-decode-numeric-character-reference'\nimport {decodeString} from 'micromark-util-decode-string'\nimport {normalizeIdentifier} from 'micromark-util-normalize-identifier'\nimport {decodeNamedCharacterReference} from 'decode-named-character-reference'\nimport {stringifyPosition} from 'unist-util-stringify-position'\nconst own = {}.hasOwnProperty\n\n/**\n * Turn markdown into a syntax tree.\n *\n * @overload\n * @param {Value} value\n * @param {Encoding | null | undefined} [encoding]\n * @param {Options | null | undefined} [options]\n * @returns {Root}\n *\n * @overload\n * @param {Value} value\n * @param {Options | null | undefined} [options]\n * @returns {Root}\n *\n * @param {Value} value\n * Markdown to parse.\n * @param {Encoding | Options | null | undefined} [encoding]\n * Character encoding for when `value` is `Buffer`.\n * @param {Options | null | undefined} [options]\n * Configuration.\n * @returns {Root}\n * mdast tree.\n */\nexport function fromMarkdown(value, encoding, options) {\n if (typeof encoding !== 'string') {\n options = encoding\n encoding = undefined\n }\n return compiler(options)(\n postprocess(\n parse(options).document().write(preprocess()(value, encoding, true))\n )\n )\n}\n\n/**\n * Note this compiler only understand complete buffering, not streaming.\n *\n * @param {Options | null | undefined} [options]\n */\nfunction compiler(options) {\n /** @type {Config} */\n const config = {\n transforms: [],\n canContainEols: ['emphasis', 'fragment', 'heading', 'paragraph', 'strong'],\n enter: {\n autolink: opener(link),\n autolinkProtocol: onenterdata,\n autolinkEmail: onenterdata,\n atxHeading: opener(heading),\n blockQuote: opener(blockQuote),\n characterEscape: onenterdata,\n characterReference: onenterdata,\n codeFenced: opener(codeFlow),\n codeFencedFenceInfo: buffer,\n codeFencedFenceMeta: buffer,\n codeIndented: opener(codeFlow, buffer),\n codeText: opener(codeText, buffer),\n codeTextData: onenterdata,\n data: onenterdata,\n codeFlowValue: onenterdata,\n definition: opener(definition),\n definitionDestinationString: buffer,\n definitionLabelString: buffer,\n definitionTitleString: buffer,\n emphasis: opener(emphasis),\n hardBreakEscape: opener(hardBreak),\n hardBreakTrailing: opener(hardBreak),\n htmlFlow: opener(html, buffer),\n htmlFlowData: onenterdata,\n htmlText: opener(html, buffer),\n htmlTextData: onenterdata,\n image: opener(image),\n label: buffer,\n link: opener(link),\n listItem: opener(listItem),\n listItemValue: onenterlistitemvalue,\n listOrdered: opener(list, onenterlistordered),\n listUnordered: opener(list),\n paragraph: opener(paragraph),\n reference: onenterreference,\n referenceString: buffer,\n resourceDestinationString: buffer,\n resourceTitleString: buffer,\n setextHeading: opener(heading),\n strong: opener(strong),\n thematicBreak: opener(thematicBreak)\n },\n exit: {\n atxHeading: closer(),\n atxHeadingSequence: onexitatxheadingsequence,\n autolink: closer(),\n autolinkEmail: onexitautolinkemail,\n autolinkProtocol: onexitautolinkprotocol,\n blockQuote: closer(),\n characterEscapeValue: onexitdata,\n characterReferenceMarkerHexadecimal: onexitcharacterreferencemarker,\n characterReferenceMarkerNumeric: onexitcharacterreferencemarker,\n characterReferenceValue: onexitcharacterreferencevalue,\n codeFenced: closer(onexitcodefenced),\n codeFencedFence: onexitcodefencedfence,\n codeFencedFenceInfo: onexitcodefencedfenceinfo,\n codeFencedFenceMeta: onexitcodefencedfencemeta,\n codeFlowValue: onexitdata,\n codeIndented: closer(onexitcodeindented),\n codeText: closer(onexitcodetext),\n codeTextData: onexitdata,\n data: onexitdata,\n definition: closer(),\n definitionDestinationString: onexitdefinitiondestinationstring,\n definitionLabelString: onexitdefinitionlabelstring,\n definitionTitleString: onexitdefinitiontitlestring,\n emphasis: closer(),\n hardBreakEscape: closer(onexithardbreak),\n hardBreakTrailing: closer(onexithardbreak),\n htmlFlow: closer(onexithtmlflow),\n htmlFlowData: onexitdata,\n htmlText: closer(onexithtmltext),\n htmlTextData: onexitdata,\n image: closer(onexitimage),\n label: onexitlabel,\n labelText: onexitlabeltext,\n lineEnding: onexitlineending,\n link: closer(onexitlink),\n listItem: closer(),\n listOrdered: closer(),\n listUnordered: closer(),\n paragraph: closer(),\n referenceString: onexitreferencestring,\n resourceDestinationString: onexitresourcedestinationstring,\n resourceTitleString: onexitresourcetitlestring,\n resource: onexitresource,\n setextHeading: closer(onexitsetextheading),\n setextHeadingLineSequence: onexitsetextheadinglinesequence,\n setextHeadingText: onexitsetextheadingtext,\n strong: closer(),\n thematicBreak: closer()\n }\n }\n configure(config, (options || {}).mdastExtensions || [])\n\n /** @type {CompileData} */\n const data = {}\n return compile\n\n /**\n * Turn micromark events into an mdast tree.\n *\n * @param {Array} events\n * Events.\n * @returns {Root}\n * mdast tree.\n */\n function compile(events) {\n /** @type {Root} */\n let tree = {\n type: 'root',\n children: []\n }\n /** @type {Omit} */\n const context = {\n stack: [tree],\n tokenStack: [],\n config,\n enter,\n exit,\n buffer,\n resume,\n data\n }\n /** @type {Array} */\n const listStack = []\n let index = -1\n while (++index < events.length) {\n // We preprocess lists to add `listItem` tokens, and to infer whether\n // items the list itself are spread out.\n if (\n events[index][1].type === 'listOrdered' ||\n events[index][1].type === 'listUnordered'\n ) {\n if (events[index][0] === 'enter') {\n listStack.push(index)\n } else {\n const tail = listStack.pop()\n index = prepareList(events, tail, index)\n }\n }\n }\n index = -1\n while (++index < events.length) {\n const handler = config[events[index][0]]\n if (own.call(handler, events[index][1].type)) {\n handler[events[index][1].type].call(\n Object.assign(\n {\n sliceSerialize: events[index][2].sliceSerialize\n },\n context\n ),\n events[index][1]\n )\n }\n }\n\n // Handle tokens still being open.\n if (context.tokenStack.length > 0) {\n const tail = context.tokenStack[context.tokenStack.length - 1]\n const handler = tail[1] || defaultOnError\n handler.call(context, undefined, tail[0])\n }\n\n // Figure out `root` position.\n tree.position = {\n start: point(\n events.length > 0\n ? events[0][1].start\n : {\n line: 1,\n column: 1,\n offset: 0\n }\n ),\n end: point(\n events.length > 0\n ? events[events.length - 2][1].end\n : {\n line: 1,\n column: 1,\n offset: 0\n }\n )\n }\n\n // Call transforms.\n index = -1\n while (++index < config.transforms.length) {\n tree = config.transforms[index](tree) || tree\n }\n return tree\n }\n\n /**\n * @param {Array} events\n * @param {number} start\n * @param {number} length\n * @returns {number}\n */\n function prepareList(events, start, length) {\n let index = start - 1\n let containerBalance = -1\n let listSpread = false\n /** @type {Token | undefined} */\n let listItem\n /** @type {number | undefined} */\n let lineIndex\n /** @type {number | undefined} */\n let firstBlankLineIndex\n /** @type {boolean | undefined} */\n let atMarker\n while (++index <= length) {\n const event = events[index]\n switch (event[1].type) {\n case 'listUnordered':\n case 'listOrdered':\n case 'blockQuote': {\n if (event[0] === 'enter') {\n containerBalance++\n } else {\n containerBalance--\n }\n atMarker = undefined\n break\n }\n case 'lineEndingBlank': {\n if (event[0] === 'enter') {\n if (\n listItem &&\n !atMarker &&\n !containerBalance &&\n !firstBlankLineIndex\n ) {\n firstBlankLineIndex = index\n }\n atMarker = undefined\n }\n break\n }\n case 'linePrefix':\n case 'listItemValue':\n case 'listItemMarker':\n case 'listItemPrefix':\n case 'listItemPrefixWhitespace': {\n // Empty.\n\n break\n }\n default: {\n atMarker = undefined\n }\n }\n if (\n (!containerBalance &&\n event[0] === 'enter' &&\n event[1].type === 'listItemPrefix') ||\n (containerBalance === -1 &&\n event[0] === 'exit' &&\n (event[1].type === 'listUnordered' ||\n event[1].type === 'listOrdered'))\n ) {\n if (listItem) {\n let tailIndex = index\n lineIndex = undefined\n while (tailIndex--) {\n const tailEvent = events[tailIndex]\n if (\n tailEvent[1].type === 'lineEnding' ||\n tailEvent[1].type === 'lineEndingBlank'\n ) {\n if (tailEvent[0] === 'exit') continue\n if (lineIndex) {\n events[lineIndex][1].type = 'lineEndingBlank'\n listSpread = true\n }\n tailEvent[1].type = 'lineEnding'\n lineIndex = tailIndex\n } else if (\n tailEvent[1].type === 'linePrefix' ||\n tailEvent[1].type === 'blockQuotePrefix' ||\n tailEvent[1].type === 'blockQuotePrefixWhitespace' ||\n tailEvent[1].type === 'blockQuoteMarker' ||\n tailEvent[1].type === 'listItemIndent'\n ) {\n // Empty\n } else {\n break\n }\n }\n if (\n firstBlankLineIndex &&\n (!lineIndex || firstBlankLineIndex < lineIndex)\n ) {\n listItem._spread = true\n }\n\n // Fix position.\n listItem.end = Object.assign(\n {},\n lineIndex ? events[lineIndex][1].start : event[1].end\n )\n events.splice(lineIndex || index, 0, ['exit', listItem, event[2]])\n index++\n length++\n }\n\n // Create a new list item.\n if (event[1].type === 'listItemPrefix') {\n /** @type {Token} */\n const item = {\n type: 'listItem',\n _spread: false,\n start: Object.assign({}, event[1].start),\n // @ts-expect-error: we’ll add `end` in a second.\n end: undefined\n }\n listItem = item\n events.splice(index, 0, ['enter', item, event[2]])\n index++\n length++\n firstBlankLineIndex = undefined\n atMarker = true\n }\n }\n }\n events[start][1]._spread = listSpread\n return length\n }\n\n /**\n * Create an opener handle.\n *\n * @param {(token: Token) => Nodes} create\n * Create a node.\n * @param {Handle | undefined} [and]\n * Optional function to also run.\n * @returns {Handle}\n * Handle.\n */\n function opener(create, and) {\n return open\n\n /**\n * @this {CompileContext}\n * @param {Token} token\n * @returns {undefined}\n */\n function open(token) {\n enter.call(this, create(token), token)\n if (and) and.call(this, token)\n }\n }\n\n /**\n * @this {CompileContext}\n * @returns {undefined}\n */\n function buffer() {\n this.stack.push({\n type: 'fragment',\n children: []\n })\n }\n\n /**\n * @this {CompileContext}\n * Context.\n * @param {Nodes} node\n * Node to enter.\n * @param {Token} token\n * Corresponding token.\n * @param {OnEnterError | undefined} [errorHandler]\n * Handle the case where this token is open, but it is closed by something else.\n * @returns {undefined}\n * Nothing.\n */\n function enter(node, token, errorHandler) {\n const parent = this.stack[this.stack.length - 1]\n /** @type {Array} */\n const siblings = parent.children\n siblings.push(node)\n this.stack.push(node)\n this.tokenStack.push([token, errorHandler])\n node.position = {\n start: point(token.start),\n // @ts-expect-error: `end` will be patched later.\n end: undefined\n }\n }\n\n /**\n * Create a closer handle.\n *\n * @param {Handle | undefined} [and]\n * Optional function to also run.\n * @returns {Handle}\n * Handle.\n */\n function closer(and) {\n return close\n\n /**\n * @this {CompileContext}\n * @param {Token} token\n * @returns {undefined}\n */\n function close(token) {\n if (and) and.call(this, token)\n exit.call(this, token)\n }\n }\n\n /**\n * @this {CompileContext}\n * Context.\n * @param {Token} token\n * Corresponding token.\n * @param {OnExitError | undefined} [onExitError]\n * Handle the case where another token is open.\n * @returns {undefined}\n * Nothing.\n */\n function exit(token, onExitError) {\n const node = this.stack.pop()\n const open = this.tokenStack.pop()\n if (!open) {\n throw new Error(\n 'Cannot close `' +\n token.type +\n '` (' +\n stringifyPosition({\n start: token.start,\n end: token.end\n }) +\n '): it’s not open'\n )\n } else if (open[0].type !== token.type) {\n if (onExitError) {\n onExitError.call(this, token, open[0])\n } else {\n const handler = open[1] || defaultOnError\n handler.call(this, token, open[0])\n }\n }\n node.position.end = point(token.end)\n }\n\n /**\n * @this {CompileContext}\n * @returns {string}\n */\n function resume() {\n return toString(this.stack.pop())\n }\n\n //\n // Handlers.\n //\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onenterlistordered() {\n this.data.expectingFirstListItemValue = true\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onenterlistitemvalue(token) {\n if (this.data.expectingFirstListItemValue) {\n const ancestor = this.stack[this.stack.length - 2]\n ancestor.start = Number.parseInt(this.sliceSerialize(token), 10)\n this.data.expectingFirstListItemValue = undefined\n }\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitcodefencedfenceinfo() {\n const data = this.resume()\n const node = this.stack[this.stack.length - 1]\n node.lang = data\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitcodefencedfencemeta() {\n const data = this.resume()\n const node = this.stack[this.stack.length - 1]\n node.meta = data\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitcodefencedfence() {\n // Exit if this is the closing fence.\n if (this.data.flowCodeInside) return\n this.buffer()\n this.data.flowCodeInside = true\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitcodefenced() {\n const data = this.resume()\n const node = this.stack[this.stack.length - 1]\n node.value = data.replace(/^(\\r?\\n|\\r)|(\\r?\\n|\\r)$/g, '')\n this.data.flowCodeInside = undefined\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitcodeindented() {\n const data = this.resume()\n const node = this.stack[this.stack.length - 1]\n node.value = data.replace(/(\\r?\\n|\\r)$/g, '')\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitdefinitionlabelstring(token) {\n const label = this.resume()\n const node = this.stack[this.stack.length - 1]\n node.label = label\n node.identifier = normalizeIdentifier(\n this.sliceSerialize(token)\n ).toLowerCase()\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitdefinitiontitlestring() {\n const data = this.resume()\n const node = this.stack[this.stack.length - 1]\n node.title = data\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitdefinitiondestinationstring() {\n const data = this.resume()\n const node = this.stack[this.stack.length - 1]\n node.url = data\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitatxheadingsequence(token) {\n const node = this.stack[this.stack.length - 1]\n if (!node.depth) {\n const depth = this.sliceSerialize(token).length\n node.depth = depth\n }\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitsetextheadingtext() {\n this.data.setextHeadingSlurpLineEnding = true\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitsetextheadinglinesequence(token) {\n const node = this.stack[this.stack.length - 1]\n node.depth = this.sliceSerialize(token).codePointAt(0) === 61 ? 1 : 2\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitsetextheading() {\n this.data.setextHeadingSlurpLineEnding = undefined\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onenterdata(token) {\n const node = this.stack[this.stack.length - 1]\n /** @type {Array} */\n const siblings = node.children\n let tail = siblings[siblings.length - 1]\n if (!tail || tail.type !== 'text') {\n // Add a new text node.\n tail = text()\n tail.position = {\n start: point(token.start),\n // @ts-expect-error: we’ll add `end` later.\n end: undefined\n }\n siblings.push(tail)\n }\n this.stack.push(tail)\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitdata(token) {\n const tail = this.stack.pop()\n tail.value += this.sliceSerialize(token)\n tail.position.end = point(token.end)\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitlineending(token) {\n const context = this.stack[this.stack.length - 1]\n // If we’re at a hard break, include the line ending in there.\n if (this.data.atHardBreak) {\n const tail = context.children[context.children.length - 1]\n tail.position.end = point(token.end)\n this.data.atHardBreak = undefined\n return\n }\n if (\n !this.data.setextHeadingSlurpLineEnding &&\n config.canContainEols.includes(context.type)\n ) {\n onenterdata.call(this, token)\n onexitdata.call(this, token)\n }\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexithardbreak() {\n this.data.atHardBreak = true\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexithtmlflow() {\n const data = this.resume()\n const node = this.stack[this.stack.length - 1]\n node.value = data\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexithtmltext() {\n const data = this.resume()\n const node = this.stack[this.stack.length - 1]\n node.value = data\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitcodetext() {\n const data = this.resume()\n const node = this.stack[this.stack.length - 1]\n node.value = data\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitlink() {\n const node = this.stack[this.stack.length - 1]\n // Note: there are also `identifier` and `label` fields on this link node!\n // These are used / cleaned here.\n // To do: clean.\n if (this.data.inReference) {\n /** @type {ReferenceType} */\n const referenceType = this.data.referenceType || 'shortcut'\n node.type += 'Reference'\n // @ts-expect-error: mutate.\n node.referenceType = referenceType\n // @ts-expect-error: mutate.\n delete node.url\n delete node.title\n } else {\n // @ts-expect-error: mutate.\n delete node.identifier\n // @ts-expect-error: mutate.\n delete node.label\n }\n this.data.referenceType = undefined\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitimage() {\n const node = this.stack[this.stack.length - 1]\n // Note: there are also `identifier` and `label` fields on this link node!\n // These are used / cleaned here.\n // To do: clean.\n if (this.data.inReference) {\n /** @type {ReferenceType} */\n const referenceType = this.data.referenceType || 'shortcut'\n node.type += 'Reference'\n // @ts-expect-error: mutate.\n node.referenceType = referenceType\n // @ts-expect-error: mutate.\n delete node.url\n delete node.title\n } else {\n // @ts-expect-error: mutate.\n delete node.identifier\n // @ts-expect-error: mutate.\n delete node.label\n }\n this.data.referenceType = undefined\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitlabeltext(token) {\n const string = this.sliceSerialize(token)\n const ancestor = this.stack[this.stack.length - 2]\n // @ts-expect-error: stash this on the node, as it might become a reference\n // later.\n ancestor.label = decodeString(string)\n // @ts-expect-error: same as above.\n ancestor.identifier = normalizeIdentifier(string).toLowerCase()\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitlabel() {\n const fragment = this.stack[this.stack.length - 1]\n const value = this.resume()\n const node = this.stack[this.stack.length - 1]\n // Assume a reference.\n this.data.inReference = true\n if (node.type === 'link') {\n /** @type {Array} */\n const children = fragment.children\n node.children = children\n } else {\n node.alt = value\n }\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitresourcedestinationstring() {\n const data = this.resume()\n const node = this.stack[this.stack.length - 1]\n node.url = data\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitresourcetitlestring() {\n const data = this.resume()\n const node = this.stack[this.stack.length - 1]\n node.title = data\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitresource() {\n this.data.inReference = undefined\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onenterreference() {\n this.data.referenceType = 'collapsed'\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitreferencestring(token) {\n const label = this.resume()\n const node = this.stack[this.stack.length - 1]\n // @ts-expect-error: stash this on the node, as it might become a reference\n // later.\n node.label = label\n // @ts-expect-error: same as above.\n node.identifier = normalizeIdentifier(\n this.sliceSerialize(token)\n ).toLowerCase()\n this.data.referenceType = 'full'\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitcharacterreferencemarker(token) {\n this.data.characterReferenceType = token.type\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitcharacterreferencevalue(token) {\n const data = this.sliceSerialize(token)\n const type = this.data.characterReferenceType\n /** @type {string} */\n let value\n if (type) {\n value = decodeNumericCharacterReference(\n data,\n type === 'characterReferenceMarkerNumeric' ? 10 : 16\n )\n this.data.characterReferenceType = undefined\n } else {\n const result = decodeNamedCharacterReference(data)\n value = result\n }\n const tail = this.stack.pop()\n tail.value += value\n tail.position.end = point(token.end)\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitautolinkprotocol(token) {\n onexitdata.call(this, token)\n const node = this.stack[this.stack.length - 1]\n node.url = this.sliceSerialize(token)\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitautolinkemail(token) {\n onexitdata.call(this, token)\n const node = this.stack[this.stack.length - 1]\n node.url = 'mailto:' + this.sliceSerialize(token)\n }\n\n //\n // Creaters.\n //\n\n /** @returns {Blockquote} */\n function blockQuote() {\n return {\n type: 'blockquote',\n children: []\n }\n }\n\n /** @returns {Code} */\n function codeFlow() {\n return {\n type: 'code',\n lang: null,\n meta: null,\n value: ''\n }\n }\n\n /** @returns {InlineCode} */\n function codeText() {\n return {\n type: 'inlineCode',\n value: ''\n }\n }\n\n /** @returns {Definition} */\n function definition() {\n return {\n type: 'definition',\n identifier: '',\n label: null,\n title: null,\n url: ''\n }\n }\n\n /** @returns {Emphasis} */\n function emphasis() {\n return {\n type: 'emphasis',\n children: []\n }\n }\n\n /** @returns {Heading} */\n function heading() {\n return {\n type: 'heading',\n // @ts-expect-error `depth` will be set later.\n depth: 0,\n children: []\n }\n }\n\n /** @returns {Break} */\n function hardBreak() {\n return {\n type: 'break'\n }\n }\n\n /** @returns {Html} */\n function html() {\n return {\n type: 'html',\n value: ''\n }\n }\n\n /** @returns {Image} */\n function image() {\n return {\n type: 'image',\n title: null,\n url: '',\n alt: null\n }\n }\n\n /** @returns {Link} */\n function link() {\n return {\n type: 'link',\n title: null,\n url: '',\n children: []\n }\n }\n\n /**\n * @param {Token} token\n * @returns {List}\n */\n function list(token) {\n return {\n type: 'list',\n ordered: token.type === 'listOrdered',\n start: null,\n spread: token._spread,\n children: []\n }\n }\n\n /**\n * @param {Token} token\n * @returns {ListItem}\n */\n function listItem(token) {\n return {\n type: 'listItem',\n spread: token._spread,\n checked: null,\n children: []\n }\n }\n\n /** @returns {Paragraph} */\n function paragraph() {\n return {\n type: 'paragraph',\n children: []\n }\n }\n\n /** @returns {Strong} */\n function strong() {\n return {\n type: 'strong',\n children: []\n }\n }\n\n /** @returns {Text} */\n function text() {\n return {\n type: 'text',\n value: ''\n }\n }\n\n /** @returns {ThematicBreak} */\n function thematicBreak() {\n return {\n type: 'thematicBreak'\n }\n }\n}\n\n/**\n * Copy a point-like value.\n *\n * @param {Point} d\n * Point-like value.\n * @returns {Point}\n * unist point.\n */\nfunction point(d) {\n return {\n line: d.line,\n column: d.column,\n offset: d.offset\n }\n}\n\n/**\n * @param {Config} combined\n * @param {Array | Extension>} extensions\n * @returns {undefined}\n */\nfunction configure(combined, extensions) {\n let index = -1\n while (++index < extensions.length) {\n const value = extensions[index]\n if (Array.isArray(value)) {\n configure(combined, value)\n } else {\n extension(combined, value)\n }\n }\n}\n\n/**\n * @param {Config} combined\n * @param {Extension} extension\n * @returns {undefined}\n */\nfunction extension(combined, extension) {\n /** @type {keyof Extension} */\n let key\n for (key in extension) {\n if (own.call(extension, key)) {\n switch (key) {\n case 'canContainEols': {\n const right = extension[key]\n if (right) {\n combined[key].push(...right)\n }\n break\n }\n case 'transforms': {\n const right = extension[key]\n if (right) {\n combined[key].push(...right)\n }\n break\n }\n case 'enter':\n case 'exit': {\n const right = extension[key]\n if (right) {\n Object.assign(combined[key], right)\n }\n break\n }\n // No default\n }\n }\n }\n}\n\n/** @type {OnEnterError} */\nfunction defaultOnError(left, right) {\n if (left) {\n throw new Error(\n 'Cannot close `' +\n left.type +\n '` (' +\n stringifyPosition({\n start: left.start,\n end: left.end\n }) +\n '): a different token (`' +\n right.type +\n '`, ' +\n stringifyPosition({\n start: right.start,\n end: right.end\n }) +\n ') is open'\n )\n } else {\n throw new Error(\n 'Cannot close document, a token (`' +\n right.type +\n '`, ' +\n stringifyPosition({\n start: right.start,\n end: right.end\n }) +\n ') is still open'\n )\n }\n}\n","/**\n * @typedef {import('mdast').Root} Root\n * @typedef {import('mdast-util-from-markdown').Options} FromMarkdownOptions\n * @typedef {import('unified').Parser} Parser\n * @typedef {import('unified').Processor} Processor\n */\n\n/**\n * @typedef {Omit} Options\n */\n\nimport {fromMarkdown} from 'mdast-util-from-markdown'\n\n/**\n * Aadd support for parsing from markdown.\n *\n * @param {Readonly | null | undefined} [options]\n * Configuration (optional).\n * @returns {undefined}\n * Nothing.\n */\nexport default function remarkParse(options) {\n /** @type {Processor} */\n // @ts-expect-error: TS in JSDoc generates wrong types if `this` is typed regularly.\n const self = this\n\n self.parser = parser\n\n /**\n * @type {Parser}\n */\n function parser(doc) {\n return fromMarkdown(doc, {\n ...self.data('settings'),\n ...options,\n // Note: these options are not in the readme.\n // The goal is for them to be set by plugins on `data` instead of being\n // passed by users.\n extensions: self.data('micromarkExtensions') || [],\n mdastExtensions: self.data('fromMarkdownExtensions') || []\n })\n }\n}\n","/**\n * Count how often a character (or substring) is used in a string.\n *\n * @param {string} value\n * Value to search in.\n * @param {string} character\n * Character (or substring) to look for.\n * @return {number}\n * Number of times `character` occurred in `value`.\n */\nexport function ccount(value, character) {\n const source = String(value)\n\n if (typeof character !== 'string') {\n throw new TypeError('Expected character')\n }\n\n let count = 0\n let index = source.indexOf(character)\n\n while (index !== -1) {\n count++\n index = source.indexOf(character, index + character.length)\n }\n\n return count\n}\n","export default function escapeStringRegexp(string) {\n\tif (typeof string !== 'string') {\n\t\tthrow new TypeError('Expected a string');\n\t}\n\n\t// Escape characters with special meaning either inside or outside character sets.\n\t// Use a simple backslash escape when it’s always valid, and a `\\xnn` escape when the simpler form would be disallowed by Unicode patterns’ stricter grammar.\n\treturn string\n\t\t.replace(/[|\\\\{}()[\\]^$+*?.]/g, '\\\\$&')\n\t\t.replace(/-/g, '\\\\x2d');\n}\n","/**\n * @typedef {import('mdast').Nodes} Nodes\n * @typedef {import('mdast').Parents} Parents\n * @typedef {import('mdast').PhrasingContent} PhrasingContent\n * @typedef {import('mdast').Root} Root\n * @typedef {import('mdast').Text} Text\n * @typedef {import('unist-util-visit-parents').Test} Test\n * @typedef {import('unist-util-visit-parents').VisitorResult} VisitorResult\n */\n\n/**\n * @typedef RegExpMatchObject\n * Info on the match.\n * @property {number} index\n * The index of the search at which the result was found.\n * @property {string} input\n * A copy of the search string in the text node.\n * @property {[...Array, Text]} stack\n * All ancestors of the text node, where the last node is the text itself.\n *\n * @typedef {RegExp | string} Find\n * Pattern to find.\n *\n * Strings are escaped and then turned into global expressions.\n *\n * @typedef {Array} FindAndReplaceList\n * Several find and replaces, in array form.\n *\n * @typedef {[Find, Replace?]} FindAndReplaceTuple\n * Find and replace in tuple form.\n *\n * @typedef {ReplaceFunction | string | null | undefined} Replace\n * Thing to replace with.\n *\n * @callback ReplaceFunction\n * Callback called when a search matches.\n * @param {...any} parameters\n * The parameters are the result of corresponding search expression:\n *\n * * `value` (`string`) — whole match\n * * `...capture` (`Array`) — matches from regex capture groups\n * * `match` (`RegExpMatchObject`) — info on the match\n * @returns {Array | PhrasingContent | string | false | null | undefined}\n * Thing to replace with.\n *\n * * when `null`, `undefined`, `''`, remove the match\n * * …or when `false`, do not replace at all\n * * …or when `string`, replace with a text node of that value\n * * …or when `Node` or `Array`, replace with those nodes\n *\n * @typedef {[RegExp, ReplaceFunction]} Pair\n * Normalized find and replace.\n *\n * @typedef {Array} Pairs\n * All find and replaced.\n *\n * @typedef Options\n * Configuration.\n * @property {Test | null | undefined} [ignore]\n * Test for which nodes to ignore (optional).\n */\n\nimport escape from 'escape-string-regexp'\nimport {visitParents} from 'unist-util-visit-parents'\nimport {convert} from 'unist-util-is'\n\n/**\n * Find patterns in a tree and replace them.\n *\n * The algorithm searches the tree in *preorder* for complete values in `Text`\n * nodes.\n * Partial matches are not supported.\n *\n * @param {Nodes} tree\n * Tree to change.\n * @param {FindAndReplaceList | FindAndReplaceTuple} list\n * Patterns to find.\n * @param {Options | null | undefined} [options]\n * Configuration (when `find` is not `Find`).\n * @returns {undefined}\n * Nothing.\n */\nexport function findAndReplace(tree, list, options) {\n const settings = options || {}\n const ignored = convert(settings.ignore || [])\n const pairs = toPairs(list)\n let pairIndex = -1\n\n while (++pairIndex < pairs.length) {\n visitParents(tree, 'text', visitor)\n }\n\n /** @type {import('unist-util-visit-parents').BuildVisitor} */\n function visitor(node, parents) {\n let index = -1\n /** @type {Parents | undefined} */\n let grandparent\n\n while (++index < parents.length) {\n const parent = parents[index]\n /** @type {Array | undefined} */\n const siblings = grandparent ? grandparent.children : undefined\n\n if (\n ignored(\n parent,\n siblings ? siblings.indexOf(parent) : undefined,\n grandparent\n )\n ) {\n return\n }\n\n grandparent = parent\n }\n\n if (grandparent) {\n return handler(node, parents)\n }\n }\n\n /**\n * Handle a text node which is not in an ignored parent.\n *\n * @param {Text} node\n * Text node.\n * @param {Array} parents\n * Parents.\n * @returns {VisitorResult}\n * Result.\n */\n function handler(node, parents) {\n const parent = parents[parents.length - 1]\n const find = pairs[pairIndex][0]\n const replace = pairs[pairIndex][1]\n let start = 0\n /** @type {Array} */\n const siblings = parent.children\n const index = siblings.indexOf(node)\n let change = false\n /** @type {Array} */\n let nodes = []\n\n find.lastIndex = 0\n\n let match = find.exec(node.value)\n\n while (match) {\n const position = match.index\n /** @type {RegExpMatchObject} */\n const matchObject = {\n index: match.index,\n input: match.input,\n stack: [...parents, node]\n }\n let value = replace(...match, matchObject)\n\n if (typeof value === 'string') {\n value = value.length > 0 ? {type: 'text', value} : undefined\n }\n\n // It wasn’t a match after all.\n if (value === false) {\n // False acts as if there was no match.\n // So we need to reset `lastIndex`, which currently being at the end of\n // the current match, to the beginning.\n find.lastIndex = position + 1\n } else {\n if (start !== position) {\n nodes.push({\n type: 'text',\n value: node.value.slice(start, position)\n })\n }\n\n if (Array.isArray(value)) {\n nodes.push(...value)\n } else if (value) {\n nodes.push(value)\n }\n\n start = position + match[0].length\n change = true\n }\n\n if (!find.global) {\n break\n }\n\n match = find.exec(node.value)\n }\n\n if (change) {\n if (start < node.value.length) {\n nodes.push({type: 'text', value: node.value.slice(start)})\n }\n\n parent.children.splice(index, 1, ...nodes)\n } else {\n nodes = [node]\n }\n\n return index + nodes.length\n }\n}\n\n/**\n * Turn a tuple or a list of tuples into pairs.\n *\n * @param {FindAndReplaceList | FindAndReplaceTuple} tupleOrList\n * Schema.\n * @returns {Pairs}\n * Clean pairs.\n */\nfunction toPairs(tupleOrList) {\n /** @type {Pairs} */\n const result = []\n\n if (!Array.isArray(tupleOrList)) {\n throw new TypeError('Expected find and replace tuple or list of tuples')\n }\n\n /** @type {FindAndReplaceList} */\n // @ts-expect-error: correct.\n const list =\n !tupleOrList[0] || Array.isArray(tupleOrList[0])\n ? tupleOrList\n : [tupleOrList]\n\n let index = -1\n\n while (++index < list.length) {\n const tuple = list[index]\n result.push([toExpression(tuple[0]), toFunction(tuple[1])])\n }\n\n return result\n}\n\n/**\n * Turn a find into an expression.\n *\n * @param {Find} find\n * Find.\n * @returns {RegExp}\n * Expression.\n */\nfunction toExpression(find) {\n return typeof find === 'string' ? new RegExp(escape(find), 'g') : find\n}\n\n/**\n * Turn a replace into a function.\n *\n * @param {Replace} replace\n * Replace.\n * @returns {ReplaceFunction}\n * Function.\n */\nfunction toFunction(replace) {\n return typeof replace === 'function'\n ? replace\n : function () {\n return replace\n }\n}\n","/**\n * @typedef {import('mdast').Link} Link\n * @typedef {import('mdast').PhrasingContent} PhrasingContent\n *\n * @typedef {import('mdast-util-from-markdown').CompileContext} CompileContext\n * @typedef {import('mdast-util-from-markdown').Extension} FromMarkdownExtension\n * @typedef {import('mdast-util-from-markdown').Handle} FromMarkdownHandle\n * @typedef {import('mdast-util-from-markdown').Transform} FromMarkdownTransform\n *\n * @typedef {import('mdast-util-to-markdown').ConstructName} ConstructName\n * @typedef {import('mdast-util-to-markdown').Options} ToMarkdownExtension\n *\n * @typedef {import('mdast-util-find-and-replace').RegExpMatchObject} RegExpMatchObject\n * @typedef {import('mdast-util-find-and-replace').ReplaceFunction} ReplaceFunction\n */\n\nimport {ccount} from 'ccount'\nimport {ok as assert} from 'devlop'\nimport {unicodePunctuation, unicodeWhitespace} from 'micromark-util-character'\nimport {findAndReplace} from 'mdast-util-find-and-replace'\n\n/** @type {ConstructName} */\nconst inConstruct = 'phrasing'\n/** @type {Array} */\nconst notInConstruct = ['autolink', 'link', 'image', 'label']\n\n/**\n * Create an extension for `mdast-util-from-markdown` to enable GFM autolink\n * literals in markdown.\n *\n * @returns {FromMarkdownExtension}\n * Extension for `mdast-util-to-markdown` to enable GFM autolink literals.\n */\nexport function gfmAutolinkLiteralFromMarkdown() {\n return {\n transforms: [transformGfmAutolinkLiterals],\n enter: {\n literalAutolink: enterLiteralAutolink,\n literalAutolinkEmail: enterLiteralAutolinkValue,\n literalAutolinkHttp: enterLiteralAutolinkValue,\n literalAutolinkWww: enterLiteralAutolinkValue\n },\n exit: {\n literalAutolink: exitLiteralAutolink,\n literalAutolinkEmail: exitLiteralAutolinkEmail,\n literalAutolinkHttp: exitLiteralAutolinkHttp,\n literalAutolinkWww: exitLiteralAutolinkWww\n }\n }\n}\n\n/**\n * Create an extension for `mdast-util-to-markdown` to enable GFM autolink\n * literals in markdown.\n *\n * @returns {ToMarkdownExtension}\n * Extension for `mdast-util-to-markdown` to enable GFM autolink literals.\n */\nexport function gfmAutolinkLiteralToMarkdown() {\n return {\n unsafe: [\n {\n character: '@',\n before: '[+\\\\-.\\\\w]',\n after: '[\\\\-.\\\\w]',\n inConstruct,\n notInConstruct\n },\n {\n character: '.',\n before: '[Ww]',\n after: '[\\\\-.\\\\w]',\n inConstruct,\n notInConstruct\n },\n {\n character: ':',\n before: '[ps]',\n after: '\\\\/',\n inConstruct,\n notInConstruct\n }\n ]\n }\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction enterLiteralAutolink(token) {\n this.enter({type: 'link', title: null, url: '', children: []}, token)\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction enterLiteralAutolinkValue(token) {\n this.config.enter.autolinkProtocol.call(this, token)\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitLiteralAutolinkHttp(token) {\n this.config.exit.autolinkProtocol.call(this, token)\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitLiteralAutolinkWww(token) {\n this.config.exit.data.call(this, token)\n const node = this.stack[this.stack.length - 1]\n assert(node.type === 'link')\n node.url = 'http://' + this.sliceSerialize(token)\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitLiteralAutolinkEmail(token) {\n this.config.exit.autolinkEmail.call(this, token)\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitLiteralAutolink(token) {\n this.exit(token)\n}\n\n/** @type {FromMarkdownTransform} */\nfunction transformGfmAutolinkLiterals(tree) {\n findAndReplace(\n tree,\n [\n [/(https?:\\/\\/|www(?=\\.))([-.\\w]+)([^ \\t\\r\\n]*)/gi, findUrl],\n [/([-.\\w+]+)@([-\\w]+(?:\\.[-\\w]+)+)/g, findEmail]\n ],\n {ignore: ['link', 'linkReference']}\n )\n}\n\n/**\n * @type {ReplaceFunction}\n * @param {string} _\n * @param {string} protocol\n * @param {string} domain\n * @param {string} path\n * @param {RegExpMatchObject} match\n * @returns {Array | Link | false}\n */\n// eslint-disable-next-line max-params\nfunction findUrl(_, protocol, domain, path, match) {\n let prefix = ''\n\n // Not an expected previous character.\n if (!previous(match)) {\n return false\n }\n\n // Treat `www` as part of the domain.\n if (/^w/i.test(protocol)) {\n domain = protocol + domain\n protocol = ''\n prefix = 'http://'\n }\n\n if (!isCorrectDomain(domain)) {\n return false\n }\n\n const parts = splitUrl(domain + path)\n\n if (!parts[0]) return false\n\n /** @type {Link} */\n const result = {\n type: 'link',\n title: null,\n url: prefix + protocol + parts[0],\n children: [{type: 'text', value: protocol + parts[0]}]\n }\n\n if (parts[1]) {\n return [result, {type: 'text', value: parts[1]}]\n }\n\n return result\n}\n\n/**\n * @type {ReplaceFunction}\n * @param {string} _\n * @param {string} atext\n * @param {string} label\n * @param {RegExpMatchObject} match\n * @returns {Link | false}\n */\nfunction findEmail(_, atext, label, match) {\n if (\n // Not an expected previous character.\n !previous(match, true) ||\n // Label ends in not allowed character.\n /[-\\d_]$/.test(label)\n ) {\n return false\n }\n\n return {\n type: 'link',\n title: null,\n url: 'mailto:' + atext + '@' + label,\n children: [{type: 'text', value: atext + '@' + label}]\n }\n}\n\n/**\n * @param {string} domain\n * @returns {boolean}\n */\nfunction isCorrectDomain(domain) {\n const parts = domain.split('.')\n\n if (\n parts.length < 2 ||\n (parts[parts.length - 1] &&\n (/_/.test(parts[parts.length - 1]) ||\n !/[a-zA-Z\\d]/.test(parts[parts.length - 1]))) ||\n (parts[parts.length - 2] &&\n (/_/.test(parts[parts.length - 2]) ||\n !/[a-zA-Z\\d]/.test(parts[parts.length - 2])))\n ) {\n return false\n }\n\n return true\n}\n\n/**\n * @param {string} url\n * @returns {[string, string | undefined]}\n */\nfunction splitUrl(url) {\n const trailExec = /[!\"&'),.:;<>?\\]}]+$/.exec(url)\n\n if (!trailExec) {\n return [url, undefined]\n }\n\n url = url.slice(0, trailExec.index)\n\n let trail = trailExec[0]\n let closingParenIndex = trail.indexOf(')')\n const openingParens = ccount(url, '(')\n let closingParens = ccount(url, ')')\n\n while (closingParenIndex !== -1 && openingParens > closingParens) {\n url += trail.slice(0, closingParenIndex + 1)\n trail = trail.slice(closingParenIndex + 1)\n closingParenIndex = trail.indexOf(')')\n closingParens++\n }\n\n return [url, trail]\n}\n\n/**\n * @param {RegExpMatchObject} match\n * @param {boolean | null | undefined} [email=false]\n * @returns {boolean}\n */\nfunction previous(match, email) {\n const code = match.input.charCodeAt(match.index - 1)\n\n return (\n (match.index === 0 ||\n unicodeWhitespace(code) ||\n unicodePunctuation(code)) &&\n (!email || code !== 47)\n )\n}\n","/**\n * @typedef {import('mdast').FootnoteDefinition} FootnoteDefinition\n * @typedef {import('mdast').FootnoteReference} FootnoteReference\n * @typedef {import('mdast-util-from-markdown').CompileContext} CompileContext\n * @typedef {import('mdast-util-from-markdown').Extension} FromMarkdownExtension\n * @typedef {import('mdast-util-from-markdown').Handle} FromMarkdownHandle\n * @typedef {import('mdast-util-to-markdown').Handle} ToMarkdownHandle\n * @typedef {import('mdast-util-to-markdown').Map} Map\n * @typedef {import('mdast-util-to-markdown').Options} ToMarkdownExtension\n */\n\nimport {ok as assert} from 'devlop'\nimport {normalizeIdentifier} from 'micromark-util-normalize-identifier'\n\nfootnoteReference.peek = footnoteReferencePeek\n\n/**\n * Create an extension for `mdast-util-from-markdown` to enable GFM footnotes\n * in markdown.\n *\n * @returns {FromMarkdownExtension}\n * Extension for `mdast-util-from-markdown`.\n */\nexport function gfmFootnoteFromMarkdown() {\n return {\n enter: {\n gfmFootnoteDefinition: enterFootnoteDefinition,\n gfmFootnoteDefinitionLabelString: enterFootnoteDefinitionLabelString,\n gfmFootnoteCall: enterFootnoteCall,\n gfmFootnoteCallString: enterFootnoteCallString\n },\n exit: {\n gfmFootnoteDefinition: exitFootnoteDefinition,\n gfmFootnoteDefinitionLabelString: exitFootnoteDefinitionLabelString,\n gfmFootnoteCall: exitFootnoteCall,\n gfmFootnoteCallString: exitFootnoteCallString\n }\n }\n}\n\n/**\n * Create an extension for `mdast-util-to-markdown` to enable GFM footnotes\n * in markdown.\n *\n * @returns {ToMarkdownExtension}\n * Extension for `mdast-util-to-markdown`.\n */\nexport function gfmFootnoteToMarkdown() {\n return {\n // This is on by default already.\n unsafe: [{character: '[', inConstruct: ['phrasing', 'label', 'reference']}],\n handlers: {footnoteDefinition, footnoteReference}\n }\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction enterFootnoteDefinition(token) {\n this.enter(\n {type: 'footnoteDefinition', identifier: '', label: '', children: []},\n token\n )\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction enterFootnoteDefinitionLabelString() {\n this.buffer()\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitFootnoteDefinitionLabelString(token) {\n const label = this.resume()\n const node = this.stack[this.stack.length - 1]\n assert(node.type === 'footnoteDefinition')\n node.label = label\n node.identifier = normalizeIdentifier(\n this.sliceSerialize(token)\n ).toLowerCase()\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitFootnoteDefinition(token) {\n this.exit(token)\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction enterFootnoteCall(token) {\n this.enter({type: 'footnoteReference', identifier: '', label: ''}, token)\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction enterFootnoteCallString() {\n this.buffer()\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitFootnoteCallString(token) {\n const label = this.resume()\n const node = this.stack[this.stack.length - 1]\n assert(node.type === 'footnoteReference')\n node.label = label\n node.identifier = normalizeIdentifier(\n this.sliceSerialize(token)\n ).toLowerCase()\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitFootnoteCall(token) {\n this.exit(token)\n}\n\n/**\n * @type {ToMarkdownHandle}\n * @param {FootnoteReference} node\n */\nfunction footnoteReference(node, _, state, info) {\n const tracker = state.createTracker(info)\n let value = tracker.move('[^')\n const exit = state.enter('footnoteReference')\n const subexit = state.enter('reference')\n value += tracker.move(\n state.safe(state.associationId(node), {\n ...tracker.current(),\n before: value,\n after: ']'\n })\n )\n subexit()\n exit()\n value += tracker.move(']')\n return value\n}\n\n/** @type {ToMarkdownHandle} */\nfunction footnoteReferencePeek() {\n return '['\n}\n\n/**\n * @type {ToMarkdownHandle}\n * @param {FootnoteDefinition} node\n */\nfunction footnoteDefinition(node, _, state, info) {\n const tracker = state.createTracker(info)\n let value = tracker.move('[^')\n const exit = state.enter('footnoteDefinition')\n const subexit = state.enter('label')\n value += tracker.move(\n state.safe(state.associationId(node), {\n ...tracker.current(),\n before: value,\n after: ']'\n })\n )\n subexit()\n value += tracker.move(\n ']:' + (node.children && node.children.length > 0 ? ' ' : '')\n )\n tracker.shift(4)\n value += tracker.move(\n state.indentLines(state.containerFlow(node, tracker.current()), map)\n )\n exit()\n\n return value\n}\n\n/** @type {Map} */\nfunction map(line, index, blank) {\n if (index === 0) {\n return line\n }\n\n return (blank ? '' : ' ') + line\n}\n","/**\n * @typedef {import('mdast').Delete} Delete\n *\n * @typedef {import('mdast-util-from-markdown').CompileContext} CompileContext\n * @typedef {import('mdast-util-from-markdown').Extension} FromMarkdownExtension\n * @typedef {import('mdast-util-from-markdown').Handle} FromMarkdownHandle\n *\n * @typedef {import('mdast-util-to-markdown').ConstructName} ConstructName\n * @typedef {import('mdast-util-to-markdown').Handle} ToMarkdownHandle\n * @typedef {import('mdast-util-to-markdown').Options} ToMarkdownExtension\n */\n\n/**\n * List of constructs that occur in phrasing (paragraphs, headings), but cannot\n * contain strikethrough.\n * So they sort of cancel each other out.\n * Note: could use a better name.\n *\n * Note: keep in sync with: \n *\n * @type {Array}\n */\nconst constructsWithoutStrikethrough = [\n 'autolink',\n 'destinationLiteral',\n 'destinationRaw',\n 'reference',\n 'titleQuote',\n 'titleApostrophe'\n]\n\nhandleDelete.peek = peekDelete\n\n/**\n * Create an extension for `mdast-util-from-markdown` to enable GFM\n * strikethrough in markdown.\n *\n * @returns {FromMarkdownExtension}\n * Extension for `mdast-util-from-markdown` to enable GFM strikethrough.\n */\nexport function gfmStrikethroughFromMarkdown() {\n return {\n canContainEols: ['delete'],\n enter: {strikethrough: enterStrikethrough},\n exit: {strikethrough: exitStrikethrough}\n }\n}\n\n/**\n * Create an extension for `mdast-util-to-markdown` to enable GFM\n * strikethrough in markdown.\n *\n * @returns {ToMarkdownExtension}\n * Extension for `mdast-util-to-markdown` to enable GFM strikethrough.\n */\nexport function gfmStrikethroughToMarkdown() {\n return {\n unsafe: [\n {\n character: '~',\n inConstruct: 'phrasing',\n notInConstruct: constructsWithoutStrikethrough\n }\n ],\n handlers: {delete: handleDelete}\n }\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction enterStrikethrough(token) {\n this.enter({type: 'delete', children: []}, token)\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitStrikethrough(token) {\n this.exit(token)\n}\n\n/**\n * @type {ToMarkdownHandle}\n * @param {Delete} node\n */\nfunction handleDelete(node, _, state, info) {\n const tracker = state.createTracker(info)\n const exit = state.enter('strikethrough')\n let value = tracker.move('~~')\n value += state.containerPhrasing(node, {\n ...tracker.current(),\n before: value,\n after: '~'\n })\n value += tracker.move('~~')\n exit()\n return value\n}\n\n/** @type {ToMarkdownHandle} */\nfunction peekDelete() {\n return '~'\n}\n","/**\n * @typedef Options\n * Configuration (optional).\n * @property {string|null|ReadonlyArray} [align]\n * One style for all columns, or styles for their respective columns.\n * Each style is either `'l'` (left), `'r'` (right), or `'c'` (center).\n * Other values are treated as `''`, which doesn’t place the colon in the\n * alignment row but does align left.\n * *Only the lowercased first character is used, so `Right` is fine.*\n * @property {boolean} [padding=true]\n * Whether to add a space of padding between delimiters and cells.\n *\n * When `true`, there is padding:\n *\n * ```markdown\n * | Alpha | B |\n * | ----- | ----- |\n * | C | Delta |\n * ```\n *\n * When `false`, there is no padding:\n *\n * ```markdown\n * |Alpha|B |\n * |-----|-----|\n * |C |Delta|\n * ```\n * @property {boolean} [delimiterStart=true]\n * Whether to begin each row with the delimiter.\n *\n * > 👉 **Note**: please don’t use this: it could create fragile structures\n * > that aren’t understandable to some markdown parsers.\n *\n * When `true`, there are starting delimiters:\n *\n * ```markdown\n * | Alpha | B |\n * | ----- | ----- |\n * | C | Delta |\n * ```\n *\n * When `false`, there are no starting delimiters:\n *\n * ```markdown\n * Alpha | B |\n * ----- | ----- |\n * C | Delta |\n * ```\n * @property {boolean} [delimiterEnd=true]\n * Whether to end each row with the delimiter.\n *\n * > 👉 **Note**: please don’t use this: it could create fragile structures\n * > that aren’t understandable to some markdown parsers.\n *\n * When `true`, there are ending delimiters:\n *\n * ```markdown\n * | Alpha | B |\n * | ----- | ----- |\n * | C | Delta |\n * ```\n *\n * When `false`, there are no ending delimiters:\n *\n * ```markdown\n * | Alpha | B\n * | ----- | -----\n * | C | Delta\n * ```\n * @property {boolean} [alignDelimiters=true]\n * Whether to align the delimiters.\n * By default, they are aligned:\n *\n * ```markdown\n * | Alpha | B |\n * | ----- | ----- |\n * | C | Delta |\n * ```\n *\n * Pass `false` to make them staggered:\n *\n * ```markdown\n * | Alpha | B |\n * | - | - |\n * | C | Delta |\n * ```\n * @property {(value: string) => number} [stringLength]\n * Function to detect the length of table cell content.\n * This is used when aligning the delimiters (`|`) between table cells.\n * Full-width characters and emoji mess up delimiter alignment when viewing\n * the markdown source.\n * To fix this, you can pass this function, which receives the cell content\n * and returns its “visible” size.\n * Note that what is and isn’t visible depends on where the text is displayed.\n *\n * Without such a function, the following:\n *\n * ```js\n * markdownTable([\n * ['Alpha', 'Bravo'],\n * ['中文', 'Charlie'],\n * ['👩❤️👩', 'Delta']\n * ])\n * ```\n *\n * Yields:\n *\n * ```markdown\n * | Alpha | Bravo |\n * | - | - |\n * | 中文 | Charlie |\n * | 👩❤️👩 | Delta |\n * ```\n *\n * With [`string-width`](https://github.com/sindresorhus/string-width):\n *\n * ```js\n * import stringWidth from 'string-width'\n *\n * markdownTable(\n * [\n * ['Alpha', 'Bravo'],\n * ['中文', 'Charlie'],\n * ['👩❤️👩', 'Delta']\n * ],\n * {stringLength: stringWidth}\n * )\n * ```\n *\n * Yields:\n *\n * ```markdown\n * | Alpha | Bravo |\n * | ----- | ------- |\n * | 中文 | Charlie |\n * | 👩❤️👩 | Delta |\n * ```\n */\n\n/**\n * @typedef {Options} MarkdownTableOptions\n * @todo\n * Remove next major.\n */\n\n/**\n * Generate a markdown ([GFM](https://docs.github.com/en/github/writing-on-github/working-with-advanced-formatting/organizing-information-with-tables)) table..\n *\n * @param {ReadonlyArray>} table\n * Table data (matrix of strings).\n * @param {Options} [options]\n * Configuration (optional).\n * @returns {string}\n */\nexport function markdownTable(table, options = {}) {\n const align = (options.align || []).concat()\n const stringLength = options.stringLength || defaultStringLength\n /** @type {Array} Character codes as symbols for alignment per column. */\n const alignments = []\n /** @type {Array>} Cells per row. */\n const cellMatrix = []\n /** @type {Array>} Sizes of each cell per row. */\n const sizeMatrix = []\n /** @type {Array} */\n const longestCellByColumn = []\n let mostCellsPerRow = 0\n let rowIndex = -1\n\n // This is a superfluous loop if we don’t align delimiters, but otherwise we’d\n // do superfluous work when aligning, so optimize for aligning.\n while (++rowIndex < table.length) {\n /** @type {Array} */\n const row = []\n /** @type {Array} */\n const sizes = []\n let columnIndex = -1\n\n if (table[rowIndex].length > mostCellsPerRow) {\n mostCellsPerRow = table[rowIndex].length\n }\n\n while (++columnIndex < table[rowIndex].length) {\n const cell = serialize(table[rowIndex][columnIndex])\n\n if (options.alignDelimiters !== false) {\n const size = stringLength(cell)\n sizes[columnIndex] = size\n\n if (\n longestCellByColumn[columnIndex] === undefined ||\n size > longestCellByColumn[columnIndex]\n ) {\n longestCellByColumn[columnIndex] = size\n }\n }\n\n row.push(cell)\n }\n\n cellMatrix[rowIndex] = row\n sizeMatrix[rowIndex] = sizes\n }\n\n // Figure out which alignments to use.\n let columnIndex = -1\n\n if (typeof align === 'object' && 'length' in align) {\n while (++columnIndex < mostCellsPerRow) {\n alignments[columnIndex] = toAlignment(align[columnIndex])\n }\n } else {\n const code = toAlignment(align)\n\n while (++columnIndex < mostCellsPerRow) {\n alignments[columnIndex] = code\n }\n }\n\n // Inject the alignment row.\n columnIndex = -1\n /** @type {Array} */\n const row = []\n /** @type {Array} */\n const sizes = []\n\n while (++columnIndex < mostCellsPerRow) {\n const code = alignments[columnIndex]\n let before = ''\n let after = ''\n\n if (code === 99 /* `c` */) {\n before = ':'\n after = ':'\n } else if (code === 108 /* `l` */) {\n before = ':'\n } else if (code === 114 /* `r` */) {\n after = ':'\n }\n\n // There *must* be at least one hyphen-minus in each alignment cell.\n let size =\n options.alignDelimiters === false\n ? 1\n : Math.max(\n 1,\n longestCellByColumn[columnIndex] - before.length - after.length\n )\n\n const cell = before + '-'.repeat(size) + after\n\n if (options.alignDelimiters !== false) {\n size = before.length + size + after.length\n\n if (size > longestCellByColumn[columnIndex]) {\n longestCellByColumn[columnIndex] = size\n }\n\n sizes[columnIndex] = size\n }\n\n row[columnIndex] = cell\n }\n\n // Inject the alignment row.\n cellMatrix.splice(1, 0, row)\n sizeMatrix.splice(1, 0, sizes)\n\n rowIndex = -1\n /** @type {Array} */\n const lines = []\n\n while (++rowIndex < cellMatrix.length) {\n const row = cellMatrix[rowIndex]\n const sizes = sizeMatrix[rowIndex]\n columnIndex = -1\n /** @type {Array} */\n const line = []\n\n while (++columnIndex < mostCellsPerRow) {\n const cell = row[columnIndex] || ''\n let before = ''\n let after = ''\n\n if (options.alignDelimiters !== false) {\n const size =\n longestCellByColumn[columnIndex] - (sizes[columnIndex] || 0)\n const code = alignments[columnIndex]\n\n if (code === 114 /* `r` */) {\n before = ' '.repeat(size)\n } else if (code === 99 /* `c` */) {\n if (size % 2) {\n before = ' '.repeat(size / 2 + 0.5)\n after = ' '.repeat(size / 2 - 0.5)\n } else {\n before = ' '.repeat(size / 2)\n after = before\n }\n } else {\n after = ' '.repeat(size)\n }\n }\n\n if (options.delimiterStart !== false && !columnIndex) {\n line.push('|')\n }\n\n if (\n options.padding !== false &&\n // Don’t add the opening space if we’re not aligning and the cell is\n // empty: there will be a closing space.\n !(options.alignDelimiters === false && cell === '') &&\n (options.delimiterStart !== false || columnIndex)\n ) {\n line.push(' ')\n }\n\n if (options.alignDelimiters !== false) {\n line.push(before)\n }\n\n line.push(cell)\n\n if (options.alignDelimiters !== false) {\n line.push(after)\n }\n\n if (options.padding !== false) {\n line.push(' ')\n }\n\n if (\n options.delimiterEnd !== false ||\n columnIndex !== mostCellsPerRow - 1\n ) {\n line.push('|')\n }\n }\n\n lines.push(\n options.delimiterEnd === false\n ? line.join('').replace(/ +$/, '')\n : line.join('')\n )\n }\n\n return lines.join('\\n')\n}\n\n/**\n * @param {string|null|undefined} [value]\n * @returns {string}\n */\nfunction serialize(value) {\n return value === null || value === undefined ? '' : String(value)\n}\n\n/**\n * @param {string} value\n * @returns {number}\n */\nfunction defaultStringLength(value) {\n return value.length\n}\n\n/**\n * @param {string|null|undefined} value\n * @returns {number}\n */\nfunction toAlignment(value) {\n const code = typeof value === 'string' ? value.codePointAt(0) : 0\n\n return code === 67 /* `C` */ || code === 99 /* `c` */\n ? 99 /* `c` */\n : code === 76 /* `L` */ || code === 108 /* `l` */\n ? 108 /* `l` */\n : code === 82 /* `R` */ || code === 114 /* `r` */\n ? 114 /* `r` */\n : 0\n}\n","/**\n * @typedef {import('mdast').Blockquote} Blockquote\n * @typedef {import('mdast').Parents} Parents\n * @typedef {import('../types.js').Info} Info\n * @typedef {import('../types.js').Map} Map\n * @typedef {import('../types.js').State} State\n */\n\n/**\n * @param {Blockquote} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function blockquote(node, _, state, info) {\n const exit = state.enter('blockquote')\n const tracker = state.createTracker(info)\n tracker.move('> ')\n tracker.shift(2)\n const value = state.indentLines(\n state.containerFlow(node, tracker.current()),\n map\n )\n exit()\n return value\n}\n\n/** @type {Map} */\nfunction map(line, _, blank) {\n return '>' + (blank ? '' : ' ') + line\n}\n","/**\n * @typedef {import('../types.js').ConstructName} ConstructName\n * @typedef {import('../types.js').Unsafe} Unsafe\n */\n\n/**\n * @param {Array} stack\n * @param {Unsafe} pattern\n * @returns {boolean}\n */\nexport function patternInScope(stack, pattern) {\n return (\n listInScope(stack, pattern.inConstruct, true) &&\n !listInScope(stack, pattern.notInConstruct, false)\n )\n}\n\n/**\n * @param {Array} stack\n * @param {Unsafe['inConstruct']} list\n * @param {boolean} none\n * @returns {boolean}\n */\nfunction listInScope(stack, list, none) {\n if (typeof list === 'string') {\n list = [list]\n }\n\n if (!list || list.length === 0) {\n return none\n }\n\n let index = -1\n\n while (++index < list.length) {\n if (stack.includes(list[index])) {\n return true\n }\n }\n\n return false\n}\n","/**\n * @typedef {import('mdast').Break} Break\n * @typedef {import('mdast').Parents} Parents\n * @typedef {import('../types.js').Info} Info\n * @typedef {import('../types.js').State} State\n */\n\nimport {patternInScope} from '../util/pattern-in-scope.js'\n\n/**\n * @param {Break} _\n * @param {Parents | undefined} _1\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function hardBreak(_, _1, state, info) {\n let index = -1\n\n while (++index < state.unsafe.length) {\n // If we can’t put eols in this construct (setext headings, tables), use a\n // space instead.\n if (\n state.unsafe[index].character === '\\n' &&\n patternInScope(state.stack, state.unsafe[index])\n ) {\n return /[ \\t]/.test(info.before) ? '' : ' '\n }\n }\n\n return '\\\\\\n'\n}\n","/**\n * Get the count of the longest repeating streak of `substring` in `value`.\n *\n * @param {string} value\n * Content to search in.\n * @param {string} substring\n * Substring to look for, typically one character.\n * @returns {number}\n * Count of most frequent adjacent `substring`s in `value`.\n */\nexport function longestStreak(value, substring) {\n const source = String(value)\n let index = source.indexOf(substring)\n let expected = index\n let count = 0\n let max = 0\n\n if (typeof substring !== 'string') {\n throw new TypeError('Expected substring')\n }\n\n while (index !== -1) {\n if (index === expected) {\n if (++count > max) {\n max = count\n }\n } else {\n count = 1\n }\n\n expected = index + substring.length\n index = source.indexOf(substring, expected)\n }\n\n return max\n}\n","/**\n * @typedef {import('mdast').Code} Code\n * @typedef {import('../types.js').State} State\n */\n\n/**\n * @param {Code} node\n * @param {State} state\n * @returns {boolean}\n */\nexport function formatCodeAsIndented(node, state) {\n return Boolean(\n state.options.fences === false &&\n node.value &&\n // If there’s no info…\n !node.lang &&\n // And there’s a non-whitespace character…\n /[^ \\r\\n]/.test(node.value) &&\n // And the value doesn’t start or end in a blank…\n !/^[\\t ]*(?:[\\r\\n]|$)|(?:^|[\\r\\n])[\\t ]*$/.test(node.value)\n )\n}\n","/**\n * @typedef {import('../types.js').Options} Options\n * @typedef {import('../types.js').State} State\n */\n\n/**\n * @param {State} state\n * @returns {Exclude}\n */\nexport function checkFence(state) {\n const marker = state.options.fence || '`'\n\n if (marker !== '`' && marker !== '~') {\n throw new Error(\n 'Cannot serialize code with `' +\n marker +\n '` for `options.fence`, expected `` ` `` or `~`'\n )\n }\n\n return marker\n}\n","/**\n * @typedef {import('mdast').Code} Code\n * @typedef {import('mdast').Parents} Parents\n * @typedef {import('../types.js').Info} Info\n * @typedef {import('../types.js').Map} Map\n * @typedef {import('../types.js').State} State\n */\n\nimport {longestStreak} from 'longest-streak'\nimport {formatCodeAsIndented} from '../util/format-code-as-indented.js'\nimport {checkFence} from '../util/check-fence.js'\n\n/**\n * @param {Code} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function code(node, _, state, info) {\n const marker = checkFence(state)\n const raw = node.value || ''\n const suffix = marker === '`' ? 'GraveAccent' : 'Tilde'\n\n if (formatCodeAsIndented(node, state)) {\n const exit = state.enter('codeIndented')\n const value = state.indentLines(raw, map)\n exit()\n return value\n }\n\n const tracker = state.createTracker(info)\n const sequence = marker.repeat(Math.max(longestStreak(raw, marker) + 1, 3))\n const exit = state.enter('codeFenced')\n let value = tracker.move(sequence)\n\n if (node.lang) {\n const subexit = state.enter(`codeFencedLang${suffix}`)\n value += tracker.move(\n state.safe(node.lang, {\n before: value,\n after: ' ',\n encode: ['`'],\n ...tracker.current()\n })\n )\n subexit()\n }\n\n if (node.lang && node.meta) {\n const subexit = state.enter(`codeFencedMeta${suffix}`)\n value += tracker.move(' ')\n value += tracker.move(\n state.safe(node.meta, {\n before: value,\n after: '\\n',\n encode: ['`'],\n ...tracker.current()\n })\n )\n subexit()\n }\n\n value += tracker.move('\\n')\n\n if (raw) {\n value += tracker.move(raw + '\\n')\n }\n\n value += tracker.move(sequence)\n exit()\n return value\n}\n\n/** @type {Map} */\nfunction map(line, _, blank) {\n return (blank ? '' : ' ') + line\n}\n","/**\n * @typedef {import('../types.js').Options} Options\n * @typedef {import('../types.js').State} State\n */\n\n/**\n * @param {State} state\n * @returns {Exclude}\n */\nexport function checkQuote(state) {\n const marker = state.options.quote || '\"'\n\n if (marker !== '\"' && marker !== \"'\") {\n throw new Error(\n 'Cannot serialize title with `' +\n marker +\n '` for `options.quote`, expected `\"`, or `\\'`'\n )\n }\n\n return marker\n}\n","/**\n * @typedef {import('mdast').Definition} Definition\n * @typedef {import('mdast').Parents} Parents\n * @typedef {import('../types.js').Info} Info\n * @typedef {import('../types.js').State} State\n */\n\nimport {checkQuote} from '../util/check-quote.js'\n\n/**\n * @param {Definition} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function definition(node, _, state, info) {\n const quote = checkQuote(state)\n const suffix = quote === '\"' ? 'Quote' : 'Apostrophe'\n const exit = state.enter('definition')\n let subexit = state.enter('label')\n const tracker = state.createTracker(info)\n let value = tracker.move('[')\n value += tracker.move(\n state.safe(state.associationId(node), {\n before: value,\n after: ']',\n ...tracker.current()\n })\n )\n value += tracker.move(']: ')\n\n subexit()\n\n if (\n // If there’s no url, or…\n !node.url ||\n // If there are control characters or whitespace.\n /[\\0- \\u007F]/.test(node.url)\n ) {\n subexit = state.enter('destinationLiteral')\n value += tracker.move('<')\n value += tracker.move(\n state.safe(node.url, {before: value, after: '>', ...tracker.current()})\n )\n value += tracker.move('>')\n } else {\n // No whitespace, raw is prettier.\n subexit = state.enter('destinationRaw')\n value += tracker.move(\n state.safe(node.url, {\n before: value,\n after: node.title ? ' ' : '\\n',\n ...tracker.current()\n })\n )\n }\n\n subexit()\n\n if (node.title) {\n subexit = state.enter(`title${suffix}`)\n value += tracker.move(' ' + quote)\n value += tracker.move(\n state.safe(node.title, {\n before: value,\n after: quote,\n ...tracker.current()\n })\n )\n value += tracker.move(quote)\n subexit()\n }\n\n exit()\n\n return value\n}\n","/**\n * @typedef {import('../types.js').Options} Options\n * @typedef {import('../types.js').State} State\n */\n\n/**\n * @param {State} state\n * @returns {Exclude}\n */\nexport function checkEmphasis(state) {\n const marker = state.options.emphasis || '*'\n\n if (marker !== '*' && marker !== '_') {\n throw new Error(\n 'Cannot serialize emphasis with `' +\n marker +\n '` for `options.emphasis`, expected `*`, or `_`'\n )\n }\n\n return marker\n}\n","/**\n * @typedef {import('mdast').Emphasis} Emphasis\n * @typedef {import('mdast').Parents} Parents\n * @typedef {import('../types.js').Info} Info\n * @typedef {import('../types.js').State} State\n */\n\nimport {checkEmphasis} from '../util/check-emphasis.js'\n\nemphasis.peek = emphasisPeek\n\n// To do: there are cases where emphasis cannot “form” depending on the\n// previous or next character of sequences.\n// There’s no way around that though, except for injecting zero-width stuff.\n// Do we need to safeguard against that?\n/**\n * @param {Emphasis} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function emphasis(node, _, state, info) {\n const marker = checkEmphasis(state)\n const exit = state.enter('emphasis')\n const tracker = state.createTracker(info)\n let value = tracker.move(marker)\n value += tracker.move(\n state.containerPhrasing(node, {\n before: value,\n after: marker,\n ...tracker.current()\n })\n )\n value += tracker.move(marker)\n exit()\n return value\n}\n\n/**\n * @param {Emphasis} _\n * @param {Parents | undefined} _1\n * @param {State} state\n * @returns {string}\n */\nfunction emphasisPeek(_, _1, state) {\n return state.options.emphasis || '*'\n}\n","/**\n * @typedef {import('mdast').Heading} Heading\n * @typedef {import('../types.js').State} State\n */\n\nimport {EXIT, visit} from 'unist-util-visit'\nimport {toString} from 'mdast-util-to-string'\n\n/**\n * @param {Heading} node\n * @param {State} state\n * @returns {boolean}\n */\nexport function formatHeadingAsSetext(node, state) {\n let literalWithBreak = false\n\n // Look for literals with a line break.\n // Note that this also\n visit(node, function (node) {\n if (\n ('value' in node && /\\r?\\n|\\r/.test(node.value)) ||\n node.type === 'break'\n ) {\n literalWithBreak = true\n return EXIT\n }\n })\n\n return Boolean(\n (!node.depth || node.depth < 3) &&\n toString(node) &&\n (state.options.setext || literalWithBreak)\n )\n}\n","/**\n * @typedef {import('mdast').Heading} Heading\n * @typedef {import('mdast').Parents} Parents\n * @typedef {import('../types.js').Info} Info\n * @typedef {import('../types.js').State} State\n */\n\nimport {formatHeadingAsSetext} from '../util/format-heading-as-setext.js'\n\n/**\n * @param {Heading} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function heading(node, _, state, info) {\n const rank = Math.max(Math.min(6, node.depth || 1), 1)\n const tracker = state.createTracker(info)\n\n if (formatHeadingAsSetext(node, state)) {\n const exit = state.enter('headingSetext')\n const subexit = state.enter('phrasing')\n const value = state.containerPhrasing(node, {\n ...tracker.current(),\n before: '\\n',\n after: '\\n'\n })\n subexit()\n exit()\n\n return (\n value +\n '\\n' +\n (rank === 1 ? '=' : '-').repeat(\n // The whole size…\n value.length -\n // Minus the position of the character after the last EOL (or\n // 0 if there is none)…\n (Math.max(value.lastIndexOf('\\r'), value.lastIndexOf('\\n')) + 1)\n )\n )\n }\n\n const sequence = '#'.repeat(rank)\n const exit = state.enter('headingAtx')\n const subexit = state.enter('phrasing')\n\n // Note: for proper tracking, we should reset the output positions when there\n // is no content returned, because then the space is not output.\n // Practically, in that case, there is no content, so it doesn’t matter that\n // we’ve tracked one too many characters.\n tracker.move(sequence + ' ')\n\n let value = state.containerPhrasing(node, {\n before: '# ',\n after: '\\n',\n ...tracker.current()\n })\n\n if (/^[\\t ]/.test(value)) {\n // To do: what effect has the character reference on tracking?\n value =\n '' +\n value.charCodeAt(0).toString(16).toUpperCase() +\n ';' +\n value.slice(1)\n }\n\n value = value ? sequence + ' ' + value : sequence\n\n if (state.options.closeAtx) {\n value += ' ' + sequence\n }\n\n subexit()\n exit()\n\n return value\n}\n","/**\n * @typedef {import('mdast').Html} Html\n */\n\nhtml.peek = htmlPeek\n\n/**\n * @param {Html} node\n * @returns {string}\n */\nexport function html(node) {\n return node.value || ''\n}\n\n/**\n * @returns {string}\n */\nfunction htmlPeek() {\n return '<'\n}\n","/**\n * @typedef {import('mdast').Image} Image\n * @typedef {import('mdast').Parents} Parents\n * @typedef {import('../types.js').Info} Info\n * @typedef {import('../types.js').State} State\n */\n\nimport {checkQuote} from '../util/check-quote.js'\n\nimage.peek = imagePeek\n\n/**\n * @param {Image} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function image(node, _, state, info) {\n const quote = checkQuote(state)\n const suffix = quote === '\"' ? 'Quote' : 'Apostrophe'\n const exit = state.enter('image')\n let subexit = state.enter('label')\n const tracker = state.createTracker(info)\n let value = tracker.move('![')\n value += tracker.move(\n state.safe(node.alt, {before: value, after: ']', ...tracker.current()})\n )\n value += tracker.move('](')\n\n subexit()\n\n if (\n // If there’s no url but there is a title…\n (!node.url && node.title) ||\n // If there are control characters or whitespace.\n /[\\0- \\u007F]/.test(node.url)\n ) {\n subexit = state.enter('destinationLiteral')\n value += tracker.move('<')\n value += tracker.move(\n state.safe(node.url, {before: value, after: '>', ...tracker.current()})\n )\n value += tracker.move('>')\n } else {\n // No whitespace, raw is prettier.\n subexit = state.enter('destinationRaw')\n value += tracker.move(\n state.safe(node.url, {\n before: value,\n after: node.title ? ' ' : ')',\n ...tracker.current()\n })\n )\n }\n\n subexit()\n\n if (node.title) {\n subexit = state.enter(`title${suffix}`)\n value += tracker.move(' ' + quote)\n value += tracker.move(\n state.safe(node.title, {\n before: value,\n after: quote,\n ...tracker.current()\n })\n )\n value += tracker.move(quote)\n subexit()\n }\n\n value += tracker.move(')')\n exit()\n\n return value\n}\n\n/**\n * @returns {string}\n */\nfunction imagePeek() {\n return '!'\n}\n","/**\n * @typedef {import('mdast').ImageReference} ImageReference\n * @typedef {import('mdast').Parents} Parents\n * @typedef {import('../types.js').Info} Info\n * @typedef {import('../types.js').State} State\n */\n\nimageReference.peek = imageReferencePeek\n\n/**\n * @param {ImageReference} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function imageReference(node, _, state, info) {\n const type = node.referenceType\n const exit = state.enter('imageReference')\n let subexit = state.enter('label')\n const tracker = state.createTracker(info)\n let value = tracker.move('![')\n const alt = state.safe(node.alt, {\n before: value,\n after: ']',\n ...tracker.current()\n })\n value += tracker.move(alt + '][')\n\n subexit()\n // Hide the fact that we’re in phrasing, because escapes don’t work.\n const stack = state.stack\n state.stack = []\n subexit = state.enter('reference')\n // Note: for proper tracking, we should reset the output positions when we end\n // up making a `shortcut` reference, because then there is no brace output.\n // Practically, in that case, there is no content, so it doesn’t matter that\n // we’ve tracked one too many characters.\n const reference = state.safe(state.associationId(node), {\n before: value,\n after: ']',\n ...tracker.current()\n })\n subexit()\n state.stack = stack\n exit()\n\n if (type === 'full' || !alt || alt !== reference) {\n value += tracker.move(reference + ']')\n } else if (type === 'shortcut') {\n // Remove the unwanted `[`.\n value = value.slice(0, -1)\n } else {\n value += tracker.move(']')\n }\n\n return value\n}\n\n/**\n * @returns {string}\n */\nfunction imageReferencePeek() {\n return '!'\n}\n","/**\n * @typedef {import('mdast').InlineCode} InlineCode\n * @typedef {import('mdast').Parents} Parents\n * @typedef {import('../types.js').State} State\n */\n\ninlineCode.peek = inlineCodePeek\n\n/**\n * @param {InlineCode} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @returns {string}\n */\nexport function inlineCode(node, _, state) {\n let value = node.value || ''\n let sequence = '`'\n let index = -1\n\n // If there is a single grave accent on its own in the code, use a fence of\n // two.\n // If there are two in a row, use one.\n while (new RegExp('(^|[^`])' + sequence + '([^`]|$)').test(value)) {\n sequence += '`'\n }\n\n // If this is not just spaces or eols (tabs don’t count), and either the\n // first or last character are a space, eol, or tick, then pad with spaces.\n if (\n /[^ \\r\\n]/.test(value) &&\n ((/^[ \\r\\n]/.test(value) && /[ \\r\\n]$/.test(value)) || /^`|`$/.test(value))\n ) {\n value = ' ' + value + ' '\n }\n\n // We have a potential problem: certain characters after eols could result in\n // blocks being seen.\n // For example, if someone injected the string `'\\n# b'`, then that would\n // result in an ATX heading.\n // We can’t escape characters in `inlineCode`, but because eols are\n // transformed to spaces when going from markdown to HTML anyway, we can swap\n // them out.\n while (++index < state.unsafe.length) {\n const pattern = state.unsafe[index]\n const expression = state.compilePattern(pattern)\n /** @type {RegExpExecArray | null} */\n let match\n\n // Only look for `atBreak`s.\n // Btw: note that `atBreak` patterns will always start the regex at LF or\n // CR.\n if (!pattern.atBreak) continue\n\n while ((match = expression.exec(value))) {\n let position = match.index\n\n // Support CRLF (patterns only look for one of the characters).\n if (\n value.charCodeAt(position) === 10 /* `\\n` */ &&\n value.charCodeAt(position - 1) === 13 /* `\\r` */\n ) {\n position--\n }\n\n value = value.slice(0, position) + ' ' + value.slice(match.index + 1)\n }\n }\n\n return sequence + value + sequence\n}\n\n/**\n * @returns {string}\n */\nfunction inlineCodePeek() {\n return '`'\n}\n","/**\n * @typedef {import('mdast').Link} Link\n * @typedef {import('../types.js').State} State\n */\n\nimport {toString} from 'mdast-util-to-string'\n\n/**\n * @param {Link} node\n * @param {State} state\n * @returns {boolean}\n */\nexport function formatLinkAsAutolink(node, state) {\n const raw = toString(node)\n\n return Boolean(\n !state.options.resourceLink &&\n // If there’s a url…\n node.url &&\n // And there’s a no title…\n !node.title &&\n // And the content of `node` is a single text node…\n node.children &&\n node.children.length === 1 &&\n node.children[0].type === 'text' &&\n // And if the url is the same as the content…\n (raw === node.url || 'mailto:' + raw === node.url) &&\n // And that starts w/ a protocol…\n /^[a-z][a-z+.-]+:/i.test(node.url) &&\n // And that doesn’t contain ASCII control codes (character escapes and\n // references don’t work), space, or angle brackets…\n !/[\\0- <>\\u007F]/.test(node.url)\n )\n}\n","/**\n * @typedef {import('mdast').Link} Link\n * @typedef {import('mdast').Parents} Parents\n * @typedef {import('../types.js').Exit} Exit\n * @typedef {import('../types.js').Info} Info\n * @typedef {import('../types.js').State} State\n */\n\nimport {checkQuote} from '../util/check-quote.js'\nimport {formatLinkAsAutolink} from '../util/format-link-as-autolink.js'\n\nlink.peek = linkPeek\n\n/**\n * @param {Link} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function link(node, _, state, info) {\n const quote = checkQuote(state)\n const suffix = quote === '\"' ? 'Quote' : 'Apostrophe'\n const tracker = state.createTracker(info)\n /** @type {Exit} */\n let exit\n /** @type {Exit} */\n let subexit\n\n if (formatLinkAsAutolink(node, state)) {\n // Hide the fact that we’re in phrasing, because escapes don’t work.\n const stack = state.stack\n state.stack = []\n exit = state.enter('autolink')\n let value = tracker.move('<')\n value += tracker.move(\n state.containerPhrasing(node, {\n before: value,\n after: '>',\n ...tracker.current()\n })\n )\n value += tracker.move('>')\n exit()\n state.stack = stack\n return value\n }\n\n exit = state.enter('link')\n subexit = state.enter('label')\n let value = tracker.move('[')\n value += tracker.move(\n state.containerPhrasing(node, {\n before: value,\n after: '](',\n ...tracker.current()\n })\n )\n value += tracker.move('](')\n subexit()\n\n if (\n // If there’s no url but there is a title…\n (!node.url && node.title) ||\n // If there are control characters or whitespace.\n /[\\0- \\u007F]/.test(node.url)\n ) {\n subexit = state.enter('destinationLiteral')\n value += tracker.move('<')\n value += tracker.move(\n state.safe(node.url, {before: value, after: '>', ...tracker.current()})\n )\n value += tracker.move('>')\n } else {\n // No whitespace, raw is prettier.\n subexit = state.enter('destinationRaw')\n value += tracker.move(\n state.safe(node.url, {\n before: value,\n after: node.title ? ' ' : ')',\n ...tracker.current()\n })\n )\n }\n\n subexit()\n\n if (node.title) {\n subexit = state.enter(`title${suffix}`)\n value += tracker.move(' ' + quote)\n value += tracker.move(\n state.safe(node.title, {\n before: value,\n after: quote,\n ...tracker.current()\n })\n )\n value += tracker.move(quote)\n subexit()\n }\n\n value += tracker.move(')')\n\n exit()\n return value\n}\n\n/**\n * @param {Link} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @returns {string}\n */\nfunction linkPeek(node, _, state) {\n return formatLinkAsAutolink(node, state) ? '<' : '['\n}\n","/**\n * @typedef {import('mdast').LinkReference} LinkReference\n * @typedef {import('mdast').Parents} Parents\n * @typedef {import('../types.js').Info} Info\n * @typedef {import('../types.js').State} State\n */\n\nlinkReference.peek = linkReferencePeek\n\n/**\n * @param {LinkReference} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function linkReference(node, _, state, info) {\n const type = node.referenceType\n const exit = state.enter('linkReference')\n let subexit = state.enter('label')\n const tracker = state.createTracker(info)\n let value = tracker.move('[')\n const text = state.containerPhrasing(node, {\n before: value,\n after: ']',\n ...tracker.current()\n })\n value += tracker.move(text + '][')\n\n subexit()\n // Hide the fact that we’re in phrasing, because escapes don’t work.\n const stack = state.stack\n state.stack = []\n subexit = state.enter('reference')\n // Note: for proper tracking, we should reset the output positions when we end\n // up making a `shortcut` reference, because then there is no brace output.\n // Practically, in that case, there is no content, so it doesn’t matter that\n // we’ve tracked one too many characters.\n const reference = state.safe(state.associationId(node), {\n before: value,\n after: ']',\n ...tracker.current()\n })\n subexit()\n state.stack = stack\n exit()\n\n if (type === 'full' || !text || text !== reference) {\n value += tracker.move(reference + ']')\n } else if (type === 'shortcut') {\n // Remove the unwanted `[`.\n value = value.slice(0, -1)\n } else {\n value += tracker.move(']')\n }\n\n return value\n}\n\n/**\n * @returns {string}\n */\nfunction linkReferencePeek() {\n return '['\n}\n","/**\n * @typedef {import('../types.js').Options} Options\n * @typedef {import('../types.js').State} State\n */\n\n/**\n * @param {State} state\n * @returns {Exclude}\n */\nexport function checkBullet(state) {\n const marker = state.options.bullet || '*'\n\n if (marker !== '*' && marker !== '+' && marker !== '-') {\n throw new Error(\n 'Cannot serialize items with `' +\n marker +\n '` for `options.bullet`, expected `*`, `+`, or `-`'\n )\n }\n\n return marker\n}\n","/**\n * @typedef {import('../types.js').Options} Options\n * @typedef {import('../types.js').State} State\n */\n\nimport {checkBullet} from './check-bullet.js'\n\n/**\n * @param {State} state\n * @returns {Exclude}\n */\nexport function checkBulletOther(state) {\n const bullet = checkBullet(state)\n const bulletOther = state.options.bulletOther\n\n if (!bulletOther) {\n return bullet === '*' ? '-' : '*'\n }\n\n if (bulletOther !== '*' && bulletOther !== '+' && bulletOther !== '-') {\n throw new Error(\n 'Cannot serialize items with `' +\n bulletOther +\n '` for `options.bulletOther`, expected `*`, `+`, or `-`'\n )\n }\n\n if (bulletOther === bullet) {\n throw new Error(\n 'Expected `bullet` (`' +\n bullet +\n '`) and `bulletOther` (`' +\n bulletOther +\n '`) to be different'\n )\n }\n\n return bulletOther\n}\n","/**\n * @typedef {import('../types.js').Options} Options\n * @typedef {import('../types.js').State} State\n */\n\n/**\n * @param {State} state\n * @returns {Exclude}\n */\nexport function checkBulletOrdered(state) {\n const marker = state.options.bulletOrdered || '.'\n\n if (marker !== '.' && marker !== ')') {\n throw new Error(\n 'Cannot serialize items with `' +\n marker +\n '` for `options.bulletOrdered`, expected `.` or `)`'\n )\n }\n\n return marker\n}\n","/**\n * @typedef {import('../types.js').Options} Options\n * @typedef {import('../types.js').State} State\n */\n\n/**\n * @param {State} state\n * @returns {Exclude}\n */\nexport function checkRule(state) {\n const marker = state.options.rule || '*'\n\n if (marker !== '*' && marker !== '-' && marker !== '_') {\n throw new Error(\n 'Cannot serialize rules with `' +\n marker +\n '` for `options.rule`, expected `*`, `-`, or `_`'\n )\n }\n\n return marker\n}\n","/**\n * @typedef {import('mdast').List} List\n * @typedef {import('mdast').Parents} Parents\n * @typedef {import('../types.js').Info} Info\n * @typedef {import('../types.js').State} State\n */\n\nimport {checkBullet} from '../util/check-bullet.js'\nimport {checkBulletOther} from '../util/check-bullet-other.js'\nimport {checkBulletOrdered} from '../util/check-bullet-ordered.js'\nimport {checkRule} from '../util/check-rule.js'\n\n/**\n * @param {List} node\n * @param {Parents | undefined} parent\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function list(node, parent, state, info) {\n const exit = state.enter('list')\n const bulletCurrent = state.bulletCurrent\n /** @type {string} */\n let bullet = node.ordered ? checkBulletOrdered(state) : checkBullet(state)\n /** @type {string} */\n const bulletOther = node.ordered\n ? bullet === '.'\n ? ')'\n : '.'\n : checkBulletOther(state)\n let useDifferentMarker =\n parent && state.bulletLastUsed ? bullet === state.bulletLastUsed : false\n\n if (!node.ordered) {\n const firstListItem = node.children ? node.children[0] : undefined\n\n // If there’s an empty first list item directly in two list items,\n // we have to use a different bullet:\n //\n // ```markdown\n // * - *\n // ```\n //\n // …because otherwise it would become one big thematic break.\n if (\n // Bullet could be used as a thematic break marker:\n (bullet === '*' || bullet === '-') &&\n // Empty first list item:\n firstListItem &&\n (!firstListItem.children || !firstListItem.children[0]) &&\n // Directly in two other list items:\n state.stack[state.stack.length - 1] === 'list' &&\n state.stack[state.stack.length - 2] === 'listItem' &&\n state.stack[state.stack.length - 3] === 'list' &&\n state.stack[state.stack.length - 4] === 'listItem' &&\n // That are each the first child.\n state.indexStack[state.indexStack.length - 1] === 0 &&\n state.indexStack[state.indexStack.length - 2] === 0 &&\n state.indexStack[state.indexStack.length - 3] === 0\n ) {\n useDifferentMarker = true\n }\n\n // If there’s a thematic break at the start of the first list item,\n // we have to use a different bullet:\n //\n // ```markdown\n // * ---\n // ```\n //\n // …because otherwise it would become one big thematic break.\n if (checkRule(state) === bullet && firstListItem) {\n let index = -1\n\n while (++index < node.children.length) {\n const item = node.children[index]\n\n if (\n item &&\n item.type === 'listItem' &&\n item.children &&\n item.children[0] &&\n item.children[0].type === 'thematicBreak'\n ) {\n useDifferentMarker = true\n break\n }\n }\n }\n }\n\n if (useDifferentMarker) {\n bullet = bulletOther\n }\n\n state.bulletCurrent = bullet\n const value = state.containerFlow(node, info)\n state.bulletLastUsed = bullet\n state.bulletCurrent = bulletCurrent\n exit()\n return value\n}\n","/**\n * @typedef {import('../types.js').Options} Options\n * @typedef {import('../types.js').State} State\n */\n\n/**\n * @param {State} state\n * @returns {Exclude}\n */\nexport function checkListItemIndent(state) {\n const style = state.options.listItemIndent || 'one'\n\n if (style !== 'tab' && style !== 'one' && style !== 'mixed') {\n throw new Error(\n 'Cannot serialize items with `' +\n style +\n '` for `options.listItemIndent`, expected `tab`, `one`, or `mixed`'\n )\n }\n\n return style\n}\n","/**\n * @typedef {import('mdast').ListItem} ListItem\n * @typedef {import('mdast').Parents} Parents\n * @typedef {import('../types.js').Info} Info\n * @typedef {import('../types.js').Map} Map\n * @typedef {import('../types.js').State} State\n */\n\nimport {checkBullet} from '../util/check-bullet.js'\nimport {checkListItemIndent} from '../util/check-list-item-indent.js'\n\n/**\n * @param {ListItem} node\n * @param {Parents | undefined} parent\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function listItem(node, parent, state, info) {\n const listItemIndent = checkListItemIndent(state)\n let bullet = state.bulletCurrent || checkBullet(state)\n\n // Add the marker value for ordered lists.\n if (parent && parent.type === 'list' && parent.ordered) {\n bullet =\n (typeof parent.start === 'number' && parent.start > -1\n ? parent.start\n : 1) +\n (state.options.incrementListMarker === false\n ? 0\n : parent.children.indexOf(node)) +\n bullet\n }\n\n let size = bullet.length + 1\n\n if (\n listItemIndent === 'tab' ||\n (listItemIndent === 'mixed' &&\n ((parent && parent.type === 'list' && parent.spread) || node.spread))\n ) {\n size = Math.ceil(size / 4) * 4\n }\n\n const tracker = state.createTracker(info)\n tracker.move(bullet + ' '.repeat(size - bullet.length))\n tracker.shift(size)\n const exit = state.enter('listItem')\n const value = state.indentLines(\n state.containerFlow(node, tracker.current()),\n map\n )\n exit()\n\n return value\n\n /** @type {Map} */\n function map(line, index, blank) {\n if (index) {\n return (blank ? '' : ' '.repeat(size)) + line\n }\n\n return (blank ? bullet : bullet + ' '.repeat(size - bullet.length)) + line\n }\n}\n","/**\n * @typedef {import('mdast').Paragraph} Paragraph\n * @typedef {import('mdast').Parents} Parents\n * @typedef {import('../types.js').Info} Info\n * @typedef {import('../types.js').State} State\n */\n\n/**\n * @param {Paragraph} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function paragraph(node, _, state, info) {\n const exit = state.enter('paragraph')\n const subexit = state.enter('phrasing')\n const value = state.containerPhrasing(node, info)\n subexit()\n exit()\n return value\n}\n","/**\n * @typedef {import('mdast').PhrasingContent} PhrasingContent\n */\n\nimport {convert} from 'unist-util-is'\n\n/**\n * Check if the given value is *phrasing content*.\n *\n * > 👉 **Note**: Excludes `html`, which can be both phrasing or flow.\n *\n * @param node\n * Thing to check, typically `Node`.\n * @returns\n * Whether `value` is phrasing content.\n */\n\nexport const phrasing =\n /** @type {(node?: unknown) => node is PhrasingContent} */\n (\n convert([\n 'break',\n 'delete',\n 'emphasis',\n 'footnote',\n 'footnoteReference',\n 'image',\n 'imageReference',\n 'inlineCode',\n 'link',\n 'linkReference',\n 'strong',\n 'text'\n ])\n )\n","/**\n * @typedef {import('mdast').Parents} Parents\n * @typedef {import('mdast').Root} Root\n * @typedef {import('../types.js').Info} Info\n * @typedef {import('../types.js').State} State\n */\n\nimport {phrasing} from 'mdast-util-phrasing'\n\n/**\n * @param {Root} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function root(node, _, state, info) {\n // Note: `html` nodes are ambiguous.\n const hasPhrasing = node.children.some(function (d) {\n return phrasing(d)\n })\n const fn = hasPhrasing ? state.containerPhrasing : state.containerFlow\n return fn.call(state, node, info)\n}\n","/**\n * @typedef {import('../types.js').Options} Options\n * @typedef {import('../types.js').State} State\n */\n\n/**\n * @param {State} state\n * @returns {Exclude}\n */\nexport function checkStrong(state) {\n const marker = state.options.strong || '*'\n\n if (marker !== '*' && marker !== '_') {\n throw new Error(\n 'Cannot serialize strong with `' +\n marker +\n '` for `options.strong`, expected `*`, or `_`'\n )\n }\n\n return marker\n}\n","/**\n * @typedef {import('mdast').Parents} Parents\n * @typedef {import('mdast').Strong} Strong\n * @typedef {import('../types.js').Info} Info\n * @typedef {import('../types.js').State} State\n */\n\nimport {checkStrong} from '../util/check-strong.js'\n\nstrong.peek = strongPeek\n\n// To do: there are cases where emphasis cannot “form” depending on the\n// previous or next character of sequences.\n// There’s no way around that though, except for injecting zero-width stuff.\n// Do we need to safeguard against that?\n/**\n * @param {Strong} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function strong(node, _, state, info) {\n const marker = checkStrong(state)\n const exit = state.enter('strong')\n const tracker = state.createTracker(info)\n let value = tracker.move(marker + marker)\n value += tracker.move(\n state.containerPhrasing(node, {\n before: value,\n after: marker,\n ...tracker.current()\n })\n )\n value += tracker.move(marker + marker)\n exit()\n return value\n}\n\n/**\n * @param {Strong} _\n * @param {Parents | undefined} _1\n * @param {State} state\n * @returns {string}\n */\nfunction strongPeek(_, _1, state) {\n return state.options.strong || '*'\n}\n","/**\n * @typedef {import('mdast').Parents} Parents\n * @typedef {import('mdast').Text} Text\n * @typedef {import('../types.js').Info} Info\n * @typedef {import('../types.js').State} State\n */\n\n/**\n * @param {Text} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function text(node, _, state, info) {\n return state.safe(node.value, info)\n}\n","/**\n * @typedef {import('../types.js').Options} Options\n * @typedef {import('../types.js').State} State\n */\n\n/**\n * @param {State} state\n * @returns {Exclude}\n */\nexport function checkRuleRepetition(state) {\n const repetition = state.options.ruleRepetition || 3\n\n if (repetition < 3) {\n throw new Error(\n 'Cannot serialize rules with repetition `' +\n repetition +\n '` for `options.ruleRepetition`, expected `3` or more'\n )\n }\n\n return repetition\n}\n","/**\n * @typedef {import('mdast').Parents} Parents\n * @typedef {import('mdast').ThematicBreak} ThematicBreak\n * @typedef {import('../types.js').State} State\n */\n\nimport {checkRuleRepetition} from '../util/check-rule-repetition.js'\nimport {checkRule} from '../util/check-rule.js'\n\n/**\n * @param {ThematicBreak} _\n * @param {Parents | undefined} _1\n * @param {State} state\n * @returns {string}\n */\nexport function thematicBreak(_, _1, state) {\n const value = (\n checkRule(state) + (state.options.ruleSpaces ? ' ' : '')\n ).repeat(checkRuleRepetition(state))\n\n return state.options.ruleSpaces ? value.slice(0, -1) : value\n}\n","import {blockquote} from './blockquote.js'\nimport {hardBreak} from './break.js'\nimport {code} from './code.js'\nimport {definition} from './definition.js'\nimport {emphasis} from './emphasis.js'\nimport {heading} from './heading.js'\nimport {html} from './html.js'\nimport {image} from './image.js'\nimport {imageReference} from './image-reference.js'\nimport {inlineCode} from './inline-code.js'\nimport {link} from './link.js'\nimport {linkReference} from './link-reference.js'\nimport {list} from './list.js'\nimport {listItem} from './list-item.js'\nimport {paragraph} from './paragraph.js'\nimport {root} from './root.js'\nimport {strong} from './strong.js'\nimport {text} from './text.js'\nimport {thematicBreak} from './thematic-break.js'\n\n/**\n * Default (CommonMark) handlers.\n */\nexport const handle = {\n blockquote,\n break: hardBreak,\n code,\n definition,\n emphasis,\n hardBreak,\n heading,\n html,\n image,\n imageReference,\n inlineCode,\n link,\n linkReference,\n list,\n listItem,\n paragraph,\n root,\n strong,\n text,\n thematicBreak\n}\n","/**\n * @typedef {import('mdast').InlineCode} InlineCode\n * @typedef {import('mdast').Table} Table\n * @typedef {import('mdast').TableCell} TableCell\n * @typedef {import('mdast').TableRow} TableRow\n *\n * @typedef {import('markdown-table').Options} MarkdownTableOptions\n *\n * @typedef {import('mdast-util-from-markdown').CompileContext} CompileContext\n * @typedef {import('mdast-util-from-markdown').Extension} FromMarkdownExtension\n * @typedef {import('mdast-util-from-markdown').Handle} FromMarkdownHandle\n *\n * @typedef {import('mdast-util-to-markdown').Options} ToMarkdownExtension\n * @typedef {import('mdast-util-to-markdown').Handle} ToMarkdownHandle\n * @typedef {import('mdast-util-to-markdown').State} State\n * @typedef {import('mdast-util-to-markdown').Info} Info\n */\n\n/**\n * @typedef Options\n * Configuration.\n * @property {boolean | null | undefined} [tableCellPadding=true]\n * Whether to add a space of padding between delimiters and cells (default:\n * `true`).\n * @property {boolean | null | undefined} [tablePipeAlign=true]\n * Whether to align the delimiters (default: `true`).\n * @property {MarkdownTableOptions['stringLength'] | null | undefined} [stringLength]\n * Function to detect the length of table cell content, used when aligning\n * the delimiters between cells (optional).\n */\n\nimport {ok as assert} from 'devlop'\nimport {markdownTable} from 'markdown-table'\nimport {defaultHandlers} from 'mdast-util-to-markdown'\n\n/**\n * Create an extension for `mdast-util-from-markdown` to enable GFM tables in\n * markdown.\n *\n * @returns {FromMarkdownExtension}\n * Extension for `mdast-util-from-markdown` to enable GFM tables.\n */\nexport function gfmTableFromMarkdown() {\n return {\n enter: {\n table: enterTable,\n tableData: enterCell,\n tableHeader: enterCell,\n tableRow: enterRow\n },\n exit: {\n codeText: exitCodeText,\n table: exitTable,\n tableData: exit,\n tableHeader: exit,\n tableRow: exit\n }\n }\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction enterTable(token) {\n const align = token._align\n assert(align, 'expected `_align` on table')\n this.enter(\n {\n type: 'table',\n align: align.map(function (d) {\n return d === 'none' ? null : d\n }),\n children: []\n },\n token\n )\n this.data.inTable = true\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitTable(token) {\n this.exit(token)\n this.data.inTable = undefined\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction enterRow(token) {\n this.enter({type: 'tableRow', children: []}, token)\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exit(token) {\n this.exit(token)\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction enterCell(token) {\n this.enter({type: 'tableCell', children: []}, token)\n}\n\n// Overwrite the default code text data handler to unescape escaped pipes when\n// they are in tables.\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitCodeText(token) {\n let value = this.resume()\n\n if (this.data.inTable) {\n value = value.replace(/\\\\([\\\\|])/g, replace)\n }\n\n const node = this.stack[this.stack.length - 1]\n assert(node.type === 'inlineCode')\n node.value = value\n this.exit(token)\n}\n\n/**\n * @param {string} $0\n * @param {string} $1\n * @returns {string}\n */\nfunction replace($0, $1) {\n // Pipes work, backslashes don’t (but can’t escape pipes).\n return $1 === '|' ? $1 : $0\n}\n\n/**\n * Create an extension for `mdast-util-to-markdown` to enable GFM tables in\n * markdown.\n *\n * @param {Options | null | undefined} [options]\n * Configuration.\n * @returns {ToMarkdownExtension}\n * Extension for `mdast-util-to-markdown` to enable GFM tables.\n */\nexport function gfmTableToMarkdown(options) {\n const settings = options || {}\n const padding = settings.tableCellPadding\n const alignDelimiters = settings.tablePipeAlign\n const stringLength = settings.stringLength\n const around = padding ? ' ' : '|'\n\n return {\n unsafe: [\n {character: '\\r', inConstruct: 'tableCell'},\n {character: '\\n', inConstruct: 'tableCell'},\n // A pipe, when followed by a tab or space (padding), or a dash or colon\n // (unpadded delimiter row), could result in a table.\n {atBreak: true, character: '|', after: '[\\t :-]'},\n // A pipe in a cell must be encoded.\n {character: '|', inConstruct: 'tableCell'},\n // A colon must be followed by a dash, in which case it could start a\n // delimiter row.\n {atBreak: true, character: ':', after: '-'},\n // A delimiter row can also start with a dash, when followed by more\n // dashes, a colon, or a pipe.\n // This is a stricter version than the built in check for lists, thematic\n // breaks, and setex heading underlines though:\n // \n {atBreak: true, character: '-', after: '[:|-]'}\n ],\n handlers: {\n inlineCode: inlineCodeWithTable,\n table: handleTable,\n tableCell: handleTableCell,\n tableRow: handleTableRow\n }\n }\n\n /**\n * @type {ToMarkdownHandle}\n * @param {Table} node\n */\n function handleTable(node, _, state, info) {\n return serializeData(handleTableAsData(node, state, info), node.align)\n }\n\n /**\n * This function isn’t really used normally, because we handle rows at the\n * table level.\n * But, if someone passes in a table row, this ensures we make somewhat sense.\n *\n * @type {ToMarkdownHandle}\n * @param {TableRow} node\n */\n function handleTableRow(node, _, state, info) {\n const row = handleTableRowAsData(node, state, info)\n const value = serializeData([row])\n // `markdown-table` will always add an align row\n return value.slice(0, value.indexOf('\\n'))\n }\n\n /**\n * @type {ToMarkdownHandle}\n * @param {TableCell} node\n */\n function handleTableCell(node, _, state, info) {\n const exit = state.enter('tableCell')\n const subexit = state.enter('phrasing')\n const value = state.containerPhrasing(node, {\n ...info,\n before: around,\n after: around\n })\n subexit()\n exit()\n return value\n }\n\n /**\n * @param {Array>} matrix\n * @param {Array | null | undefined} [align]\n */\n function serializeData(matrix, align) {\n return markdownTable(matrix, {\n align,\n // @ts-expect-error: `markdown-table` types should support `null`.\n alignDelimiters,\n // @ts-expect-error: `markdown-table` types should support `null`.\n padding,\n // @ts-expect-error: `markdown-table` types should support `null`.\n stringLength\n })\n }\n\n /**\n * @param {Table} node\n * @param {State} state\n * @param {Info} info\n */\n function handleTableAsData(node, state, info) {\n const children = node.children\n let index = -1\n /** @type {Array>} */\n const result = []\n const subexit = state.enter('table')\n\n while (++index < children.length) {\n result[index] = handleTableRowAsData(children[index], state, info)\n }\n\n subexit()\n\n return result\n }\n\n /**\n * @param {TableRow} node\n * @param {State} state\n * @param {Info} info\n */\n function handleTableRowAsData(node, state, info) {\n const children = node.children\n let index = -1\n /** @type {Array} */\n const result = []\n const subexit = state.enter('tableRow')\n\n while (++index < children.length) {\n // Note: the positional info as used here is incorrect.\n // Making it correct would be impossible due to aligning cells?\n // And it would need copy/pasting `markdown-table` into this project.\n result[index] = handleTableCell(children[index], node, state, info)\n }\n\n subexit()\n\n return result\n }\n\n /**\n * @type {ToMarkdownHandle}\n * @param {InlineCode} node\n */\n function inlineCodeWithTable(node, parent, state) {\n let value = defaultHandlers.inlineCode(node, parent, state)\n\n if (state.stack.includes('tableCell')) {\n value = value.replace(/\\|/g, '\\\\$&')\n }\n\n return value\n }\n}\n","/**\n * @typedef {import('mdast').ListItem} ListItem\n * @typedef {import('mdast').Paragraph} Paragraph\n * @typedef {import('mdast-util-from-markdown').CompileContext} CompileContext\n * @typedef {import('mdast-util-from-markdown').Extension} FromMarkdownExtension\n * @typedef {import('mdast-util-from-markdown').Handle} FromMarkdownHandle\n * @typedef {import('mdast-util-to-markdown').Options} ToMarkdownExtension\n * @typedef {import('mdast-util-to-markdown').Handle} ToMarkdownHandle\n */\n\nimport {ok as assert} from 'devlop'\nimport {defaultHandlers} from 'mdast-util-to-markdown'\n\n/**\n * Create an extension for `mdast-util-from-markdown` to enable GFM task\n * list items in markdown.\n *\n * @returns {FromMarkdownExtension}\n * Extension for `mdast-util-from-markdown` to enable GFM task list items.\n */\nexport function gfmTaskListItemFromMarkdown() {\n return {\n exit: {\n taskListCheckValueChecked: exitCheck,\n taskListCheckValueUnchecked: exitCheck,\n paragraph: exitParagraphWithTaskListItem\n }\n }\n}\n\n/**\n * Create an extension for `mdast-util-to-markdown` to enable GFM task list\n * items in markdown.\n *\n * @returns {ToMarkdownExtension}\n * Extension for `mdast-util-to-markdown` to enable GFM task list items.\n */\nexport function gfmTaskListItemToMarkdown() {\n return {\n unsafe: [{atBreak: true, character: '-', after: '[:|-]'}],\n handlers: {listItem: listItemWithTaskListItem}\n }\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitCheck(token) {\n // We’re always in a paragraph, in a list item.\n const node = this.stack[this.stack.length - 2]\n assert(node.type === 'listItem')\n node.checked = token.type === 'taskListCheckValueChecked'\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitParagraphWithTaskListItem(token) {\n const parent = this.stack[this.stack.length - 2]\n\n if (\n parent &&\n parent.type === 'listItem' &&\n typeof parent.checked === 'boolean'\n ) {\n const node = this.stack[this.stack.length - 1]\n assert(node.type === 'paragraph')\n const head = node.children[0]\n\n if (head && head.type === 'text') {\n const siblings = parent.children\n let index = -1\n /** @type {Paragraph | undefined} */\n let firstParaghraph\n\n while (++index < siblings.length) {\n const sibling = siblings[index]\n if (sibling.type === 'paragraph') {\n firstParaghraph = sibling\n break\n }\n }\n\n if (firstParaghraph === node) {\n // Must start with a space or a tab.\n head.value = head.value.slice(1)\n\n if (head.value.length === 0) {\n node.children.shift()\n } else if (\n node.position &&\n head.position &&\n typeof head.position.start.offset === 'number'\n ) {\n head.position.start.column++\n head.position.start.offset++\n node.position.start = Object.assign({}, head.position.start)\n }\n }\n }\n }\n\n this.exit(token)\n}\n\n/**\n * @type {ToMarkdownHandle}\n * @param {ListItem} node\n */\nfunction listItemWithTaskListItem(node, parent, state, info) {\n const head = node.children[0]\n const checkable =\n typeof node.checked === 'boolean' && head && head.type === 'paragraph'\n const checkbox = '[' + (node.checked ? 'x' : ' ') + '] '\n const tracker = state.createTracker(info)\n\n if (checkable) {\n tracker.move(checkbox)\n }\n\n let value = defaultHandlers.listItem(node, parent, state, {\n ...info,\n ...tracker.current()\n })\n\n if (checkable) {\n value = value.replace(/^(?:[*+-]|\\d+\\.)([\\r\\n]| {1,3})/, check)\n }\n\n return value\n\n /**\n * @param {string} $0\n * @returns {string}\n */\n function check($0) {\n return $0 + checkbox\n }\n}\n","/**\n * @typedef {import('mdast-util-from-markdown').Extension} FromMarkdownExtension\n * @typedef {import('mdast-util-to-markdown').Options} ToMarkdownExtension\n */\n\n/**\n * @typedef {import('mdast-util-gfm-table').Options} Options\n * Configuration.\n */\n\nimport {\n gfmAutolinkLiteralFromMarkdown,\n gfmAutolinkLiteralToMarkdown\n} from 'mdast-util-gfm-autolink-literal'\nimport {\n gfmFootnoteFromMarkdown,\n gfmFootnoteToMarkdown\n} from 'mdast-util-gfm-footnote'\nimport {\n gfmStrikethroughFromMarkdown,\n gfmStrikethroughToMarkdown\n} from 'mdast-util-gfm-strikethrough'\nimport {gfmTableFromMarkdown, gfmTableToMarkdown} from 'mdast-util-gfm-table'\nimport {\n gfmTaskListItemFromMarkdown,\n gfmTaskListItemToMarkdown\n} from 'mdast-util-gfm-task-list-item'\n\n/**\n * Create an extension for `mdast-util-from-markdown` to enable GFM (autolink\n * literals, footnotes, strikethrough, tables, tasklists).\n *\n * @returns {Array}\n * Extension for `mdast-util-from-markdown` to enable GFM (autolink literals,\n * footnotes, strikethrough, tables, tasklists).\n */\nexport function gfmFromMarkdown() {\n return [\n gfmAutolinkLiteralFromMarkdown(),\n gfmFootnoteFromMarkdown(),\n gfmStrikethroughFromMarkdown(),\n gfmTableFromMarkdown(),\n gfmTaskListItemFromMarkdown()\n ]\n}\n\n/**\n * Create an extension for `mdast-util-to-markdown` to enable GFM (autolink\n * literals, footnotes, strikethrough, tables, tasklists).\n *\n * @param {Options | null | undefined} [options]\n * Configuration.\n * @returns {ToMarkdownExtension}\n * Extension for `mdast-util-to-markdown` to enable GFM (autolink literals,\n * footnotes, strikethrough, tables, tasklists).\n */\nexport function gfmToMarkdown(options) {\n return {\n extensions: [\n gfmAutolinkLiteralToMarkdown(),\n gfmFootnoteToMarkdown(),\n gfmStrikethroughToMarkdown(),\n gfmTableToMarkdown(options),\n gfmTaskListItemToMarkdown()\n ]\n }\n}\n","/**\n * @typedef {import('micromark-util-types').Code} Code\n * @typedef {import('micromark-util-types').ConstructRecord} ConstructRecord\n * @typedef {import('micromark-util-types').Event} Event\n * @typedef {import('micromark-util-types').Extension} Extension\n * @typedef {import('micromark-util-types').Previous} Previous\n * @typedef {import('micromark-util-types').State} State\n * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext\n * @typedef {import('micromark-util-types').Tokenizer} Tokenizer\n */\n\nimport {\n asciiAlpha,\n asciiAlphanumeric,\n asciiControl,\n markdownLineEndingOrSpace,\n unicodePunctuation,\n unicodeWhitespace\n} from 'micromark-util-character'\nconst wwwPrefix = {\n tokenize: tokenizeWwwPrefix,\n partial: true\n}\nconst domain = {\n tokenize: tokenizeDomain,\n partial: true\n}\nconst path = {\n tokenize: tokenizePath,\n partial: true\n}\nconst trail = {\n tokenize: tokenizeTrail,\n partial: true\n}\nconst emailDomainDotTrail = {\n tokenize: tokenizeEmailDomainDotTrail,\n partial: true\n}\nconst wwwAutolink = {\n tokenize: tokenizeWwwAutolink,\n previous: previousWww\n}\nconst protocolAutolink = {\n tokenize: tokenizeProtocolAutolink,\n previous: previousProtocol\n}\nconst emailAutolink = {\n tokenize: tokenizeEmailAutolink,\n previous: previousEmail\n}\n\n/** @type {ConstructRecord} */\nconst text = {}\n\n/**\n * Create an extension for `micromark` to support GitHub autolink literal\n * syntax.\n *\n * @returns {Extension}\n * Extension for `micromark` that can be passed in `extensions` to enable GFM\n * autolink literal syntax.\n */\nexport function gfmAutolinkLiteral() {\n return {\n text\n }\n}\n\n/** @type {Code} */\nlet code = 48\n\n// Add alphanumerics.\nwhile (code < 123) {\n text[code] = emailAutolink\n code++\n if (code === 58) code = 65\n else if (code === 91) code = 97\n}\ntext[43] = emailAutolink\ntext[45] = emailAutolink\ntext[46] = emailAutolink\ntext[95] = emailAutolink\ntext[72] = [emailAutolink, protocolAutolink]\ntext[104] = [emailAutolink, protocolAutolink]\ntext[87] = [emailAutolink, wwwAutolink]\ntext[119] = [emailAutolink, wwwAutolink]\n\n// To do: perform email autolink literals on events, afterwards.\n// That’s where `markdown-rs` and `cmark-gfm` perform it.\n// It should look for `@`, then for atext backwards, and then for a label\n// forwards.\n// To do: `mailto:`, `xmpp:` protocol as prefix.\n\n/**\n * Email autolink literal.\n *\n * ```markdown\n * > | a contact@example.org b\n * ^^^^^^^^^^^^^^^^^^^\n * ```\n *\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeEmailAutolink(effects, ok, nok) {\n const self = this\n /** @type {boolean | undefined} */\n let dot\n /** @type {boolean} */\n let data\n return start\n\n /**\n * Start of email autolink literal.\n *\n * ```markdown\n * > | a contact@example.org b\n * ^\n * ```\n *\n * @type {State}\n */\n function start(code) {\n if (\n !gfmAtext(code) ||\n !previousEmail.call(self, self.previous) ||\n previousUnbalanced(self.events)\n ) {\n return nok(code)\n }\n effects.enter('literalAutolink')\n effects.enter('literalAutolinkEmail')\n return atext(code)\n }\n\n /**\n * In email atext.\n *\n * ```markdown\n * > | a contact@example.org b\n * ^\n * ```\n *\n * @type {State}\n */\n function atext(code) {\n if (gfmAtext(code)) {\n effects.consume(code)\n return atext\n }\n if (code === 64) {\n effects.consume(code)\n return emailDomain\n }\n return nok(code)\n }\n\n /**\n * In email domain.\n *\n * The reference code is a bit overly complex as it handles the `@`, of which\n * there may be just one.\n * Source: \n *\n * ```markdown\n * > | a contact@example.org b\n * ^\n * ```\n *\n * @type {State}\n */\n function emailDomain(code) {\n // Dot followed by alphanumerical (not `-` or `_`).\n if (code === 46) {\n return effects.check(\n emailDomainDotTrail,\n emailDomainAfter,\n emailDomainDot\n )(code)\n }\n\n // Alphanumerical, `-`, and `_`.\n if (code === 45 || code === 95 || asciiAlphanumeric(code)) {\n data = true\n effects.consume(code)\n return emailDomain\n }\n\n // To do: `/` if xmpp.\n\n // Note: normally we’d truncate trailing punctuation from the link.\n // However, email autolink literals cannot contain any of those markers,\n // except for `.`, but that can only occur if it isn’t trailing.\n // So we can ignore truncating!\n return emailDomainAfter(code)\n }\n\n /**\n * In email domain, on dot that is not a trail.\n *\n * ```markdown\n * > | a contact@example.org b\n * ^\n * ```\n *\n * @type {State}\n */\n function emailDomainDot(code) {\n effects.consume(code)\n dot = true\n return emailDomain\n }\n\n /**\n * After email domain.\n *\n * ```markdown\n * > | a contact@example.org b\n * ^\n * ```\n *\n * @type {State}\n */\n function emailDomainAfter(code) {\n // Domain must not be empty, must include a dot, and must end in alphabetical.\n // Source: .\n if (data && dot && asciiAlpha(self.previous)) {\n effects.exit('literalAutolinkEmail')\n effects.exit('literalAutolink')\n return ok(code)\n }\n return nok(code)\n }\n}\n\n/**\n * `www` autolink literal.\n *\n * ```markdown\n * > | a www.example.org b\n * ^^^^^^^^^^^^^^^\n * ```\n *\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeWwwAutolink(effects, ok, nok) {\n const self = this\n return wwwStart\n\n /**\n * Start of www autolink literal.\n *\n * ```markdown\n * > | www.example.com/a?b#c\n * ^\n * ```\n *\n * @type {State}\n */\n function wwwStart(code) {\n if (\n (code !== 87 && code !== 119) ||\n !previousWww.call(self, self.previous) ||\n previousUnbalanced(self.events)\n ) {\n return nok(code)\n }\n effects.enter('literalAutolink')\n effects.enter('literalAutolinkWww')\n // Note: we *check*, so we can discard the `www.` we parsed.\n // If it worked, we consider it as a part of the domain.\n return effects.check(\n wwwPrefix,\n effects.attempt(domain, effects.attempt(path, wwwAfter), nok),\n nok\n )(code)\n }\n\n /**\n * After a www autolink literal.\n *\n * ```markdown\n * > | www.example.com/a?b#c\n * ^\n * ```\n *\n * @type {State}\n */\n function wwwAfter(code) {\n effects.exit('literalAutolinkWww')\n effects.exit('literalAutolink')\n return ok(code)\n }\n}\n\n/**\n * Protocol autolink literal.\n *\n * ```markdown\n * > | a https://example.org b\n * ^^^^^^^^^^^^^^^^^^^\n * ```\n *\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeProtocolAutolink(effects, ok, nok) {\n const self = this\n let buffer = ''\n let seen = false\n return protocolStart\n\n /**\n * Start of protocol autolink literal.\n *\n * ```markdown\n * > | https://example.com/a?b#c\n * ^\n * ```\n *\n * @type {State}\n */\n function protocolStart(code) {\n if (\n (code === 72 || code === 104) &&\n previousProtocol.call(self, self.previous) &&\n !previousUnbalanced(self.events)\n ) {\n effects.enter('literalAutolink')\n effects.enter('literalAutolinkHttp')\n buffer += String.fromCodePoint(code)\n effects.consume(code)\n return protocolPrefixInside\n }\n return nok(code)\n }\n\n /**\n * In protocol.\n *\n * ```markdown\n * > | https://example.com/a?b#c\n * ^^^^^\n * ```\n *\n * @type {State}\n */\n function protocolPrefixInside(code) {\n // `5` is size of `https`\n if (asciiAlpha(code) && buffer.length < 5) {\n // @ts-expect-error: definitely number.\n buffer += String.fromCodePoint(code)\n effects.consume(code)\n return protocolPrefixInside\n }\n if (code === 58) {\n const protocol = buffer.toLowerCase()\n if (protocol === 'http' || protocol === 'https') {\n effects.consume(code)\n return protocolSlashesInside\n }\n }\n return nok(code)\n }\n\n /**\n * In slashes.\n *\n * ```markdown\n * > | https://example.com/a?b#c\n * ^^\n * ```\n *\n * @type {State}\n */\n function protocolSlashesInside(code) {\n if (code === 47) {\n effects.consume(code)\n if (seen) {\n return afterProtocol\n }\n seen = true\n return protocolSlashesInside\n }\n return nok(code)\n }\n\n /**\n * After protocol, before domain.\n *\n * ```markdown\n * > | https://example.com/a?b#c\n * ^\n * ```\n *\n * @type {State}\n */\n function afterProtocol(code) {\n // To do: this is different from `markdown-rs`:\n // https://github.com/wooorm/markdown-rs/blob/b3a921c761309ae00a51fe348d8a43adbc54b518/src/construct/gfm_autolink_literal.rs#L172-L182\n return code === null ||\n asciiControl(code) ||\n markdownLineEndingOrSpace(code) ||\n unicodeWhitespace(code) ||\n unicodePunctuation(code)\n ? nok(code)\n : effects.attempt(domain, effects.attempt(path, protocolAfter), nok)(code)\n }\n\n /**\n * After a protocol autolink literal.\n *\n * ```markdown\n * > | https://example.com/a?b#c\n * ^\n * ```\n *\n * @type {State}\n */\n function protocolAfter(code) {\n effects.exit('literalAutolinkHttp')\n effects.exit('literalAutolink')\n return ok(code)\n }\n}\n\n/**\n * `www` prefix.\n *\n * ```markdown\n * > | a www.example.org b\n * ^^^^\n * ```\n *\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeWwwPrefix(effects, ok, nok) {\n let size = 0\n return wwwPrefixInside\n\n /**\n * In www prefix.\n *\n * ```markdown\n * > | www.example.com\n * ^^^^\n * ```\n *\n * @type {State}\n */\n function wwwPrefixInside(code) {\n if ((code === 87 || code === 119) && size < 3) {\n size++\n effects.consume(code)\n return wwwPrefixInside\n }\n if (code === 46 && size === 3) {\n effects.consume(code)\n return wwwPrefixAfter\n }\n return nok(code)\n }\n\n /**\n * After www prefix.\n *\n * ```markdown\n * > | www.example.com\n * ^\n * ```\n *\n * @type {State}\n */\n function wwwPrefixAfter(code) {\n // If there is *anything*, we can link.\n return code === null ? nok(code) : ok(code)\n }\n}\n\n/**\n * Domain.\n *\n * ```markdown\n * > | a https://example.org b\n * ^^^^^^^^^^^\n * ```\n *\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeDomain(effects, ok, nok) {\n /** @type {boolean | undefined} */\n let underscoreInLastSegment\n /** @type {boolean | undefined} */\n let underscoreInLastLastSegment\n /** @type {boolean | undefined} */\n let seen\n return domainInside\n\n /**\n * In domain.\n *\n * ```markdown\n * > | https://example.com/a\n * ^^^^^^^^^^^\n * ```\n *\n * @type {State}\n */\n function domainInside(code) {\n // Check whether this marker, which is a trailing punctuation\n // marker, optionally followed by more trailing markers, and then\n // followed by an end.\n if (code === 46 || code === 95) {\n return effects.check(trail, domainAfter, domainAtPunctuation)(code)\n }\n\n // GH documents that only alphanumerics (other than `-`, `.`, and `_`) can\n // occur, which sounds like ASCII only, but they also support `www.點看.com`,\n // so that’s Unicode.\n // Instead of some new production for Unicode alphanumerics, markdown\n // already has that for Unicode punctuation and whitespace, so use those.\n // Source: .\n if (\n code === null ||\n markdownLineEndingOrSpace(code) ||\n unicodeWhitespace(code) ||\n (code !== 45 && unicodePunctuation(code))\n ) {\n return domainAfter(code)\n }\n seen = true\n effects.consume(code)\n return domainInside\n }\n\n /**\n * In domain, at potential trailing punctuation, that was not trailing.\n *\n * ```markdown\n * > | https://example.com\n * ^\n * ```\n *\n * @type {State}\n */\n function domainAtPunctuation(code) {\n // There is an underscore in the last segment of the domain\n if (code === 95) {\n underscoreInLastSegment = true\n }\n // Otherwise, it’s a `.`: save the last segment underscore in the\n // penultimate segment slot.\n else {\n underscoreInLastLastSegment = underscoreInLastSegment\n underscoreInLastSegment = undefined\n }\n effects.consume(code)\n return domainInside\n }\n\n /**\n * After domain.\n *\n * ```markdown\n * > | https://example.com/a\n * ^\n * ```\n *\n * @type {State} */\n function domainAfter(code) {\n // Note: that’s GH says a dot is needed, but it’s not true:\n // \n if (underscoreInLastLastSegment || underscoreInLastSegment || !seen) {\n return nok(code)\n }\n return ok(code)\n }\n}\n\n/**\n * Path.\n *\n * ```markdown\n * > | a https://example.org/stuff b\n * ^^^^^^\n * ```\n *\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizePath(effects, ok) {\n let sizeOpen = 0\n let sizeClose = 0\n return pathInside\n\n /**\n * In path.\n *\n * ```markdown\n * > | https://example.com/a\n * ^^\n * ```\n *\n * @type {State}\n */\n function pathInside(code) {\n if (code === 40) {\n sizeOpen++\n effects.consume(code)\n return pathInside\n }\n\n // To do: `markdown-rs` also needs this.\n // If this is a paren, and there are less closings than openings,\n // we don’t check for a trail.\n if (code === 41 && sizeClose < sizeOpen) {\n return pathAtPunctuation(code)\n }\n\n // Check whether this trailing punctuation marker is optionally\n // followed by more trailing markers, and then followed\n // by an end.\n if (\n code === 33 ||\n code === 34 ||\n code === 38 ||\n code === 39 ||\n code === 41 ||\n code === 42 ||\n code === 44 ||\n code === 46 ||\n code === 58 ||\n code === 59 ||\n code === 60 ||\n code === 63 ||\n code === 93 ||\n code === 95 ||\n code === 126\n ) {\n return effects.check(trail, ok, pathAtPunctuation)(code)\n }\n if (\n code === null ||\n markdownLineEndingOrSpace(code) ||\n unicodeWhitespace(code)\n ) {\n return ok(code)\n }\n effects.consume(code)\n return pathInside\n }\n\n /**\n * In path, at potential trailing punctuation, that was not trailing.\n *\n * ```markdown\n * > | https://example.com/a\"b\n * ^\n * ```\n *\n * @type {State}\n */\n function pathAtPunctuation(code) {\n // Count closing parens.\n if (code === 41) {\n sizeClose++\n }\n effects.consume(code)\n return pathInside\n }\n}\n\n/**\n * Trail.\n *\n * This calls `ok` if this *is* the trail, followed by an end, which means\n * the entire trail is not part of the link.\n * It calls `nok` if this *is* part of the link.\n *\n * ```markdown\n * > | https://example.com\").\n * ^^^\n * ```\n *\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeTrail(effects, ok, nok) {\n return trail\n\n /**\n * In trail of domain or path.\n *\n * ```markdown\n * > | https://example.com\").\n * ^\n * ```\n *\n * @type {State}\n */\n function trail(code) {\n // Regular trailing punctuation.\n if (\n code === 33 ||\n code === 34 ||\n code === 39 ||\n code === 41 ||\n code === 42 ||\n code === 44 ||\n code === 46 ||\n code === 58 ||\n code === 59 ||\n code === 63 ||\n code === 95 ||\n code === 126\n ) {\n effects.consume(code)\n return trail\n }\n\n // `&` followed by one or more alphabeticals and then a `;`, is\n // as a whole considered as trailing punctuation.\n // In all other cases, it is considered as continuation of the URL.\n if (code === 38) {\n effects.consume(code)\n return trailCharRefStart\n }\n\n // Needed because we allow literals after `[`, as we fix:\n // .\n // Check that it is not followed by `(` or `[`.\n if (code === 93) {\n effects.consume(code)\n return trailBracketAfter\n }\n if (\n // `<` is an end.\n code === 60 ||\n // So is whitespace.\n code === null ||\n markdownLineEndingOrSpace(code) ||\n unicodeWhitespace(code)\n ) {\n return ok(code)\n }\n return nok(code)\n }\n\n /**\n * In trail, after `]`.\n *\n * > 👉 **Note**: this deviates from `cmark-gfm` to fix a bug.\n * > See end of for more.\n *\n * ```markdown\n * > | https://example.com](\n * ^\n * ```\n *\n * @type {State}\n */\n function trailBracketAfter(code) {\n // Whitespace or something that could start a resource or reference is the end.\n // Switch back to trail otherwise.\n if (\n code === null ||\n code === 40 ||\n code === 91 ||\n markdownLineEndingOrSpace(code) ||\n unicodeWhitespace(code)\n ) {\n return ok(code)\n }\n return trail(code)\n }\n\n /**\n * In character-reference like trail, after `&`.\n *\n * ```markdown\n * > | https://example.com&).\n * ^\n * ```\n *\n * @type {State}\n */\n function trailCharRefStart(code) {\n // When non-alpha, it’s not a trail.\n return asciiAlpha(code) ? trailCharRefInside(code) : nok(code)\n }\n\n /**\n * In character-reference like trail.\n *\n * ```markdown\n * > | https://example.com&).\n * ^\n * ```\n *\n * @type {State}\n */\n function trailCharRefInside(code) {\n // Switch back to trail if this is well-formed.\n if (code === 59) {\n effects.consume(code)\n return trail\n }\n if (asciiAlpha(code)) {\n effects.consume(code)\n return trailCharRefInside\n }\n\n // It’s not a trail.\n return nok(code)\n }\n}\n\n/**\n * Dot in email domain trail.\n *\n * This calls `ok` if this *is* the trail, followed by an end, which means\n * the trail is not part of the link.\n * It calls `nok` if this *is* part of the link.\n *\n * ```markdown\n * > | contact@example.org.\n * ^\n * ```\n *\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeEmailDomainDotTrail(effects, ok, nok) {\n return start\n\n /**\n * Dot.\n *\n * ```markdown\n * > | contact@example.org.\n * ^ ^\n * ```\n *\n * @type {State}\n */\n function start(code) {\n // Must be dot.\n effects.consume(code)\n return after\n }\n\n /**\n * After dot.\n *\n * ```markdown\n * > | contact@example.org.\n * ^ ^\n * ```\n *\n * @type {State}\n */\n function after(code) {\n // Not a trail if alphanumeric.\n return asciiAlphanumeric(code) ? nok(code) : ok(code)\n }\n}\n\n/**\n * See:\n * .\n *\n * @type {Previous}\n */\nfunction previousWww(code) {\n return (\n code === null ||\n code === 40 ||\n code === 42 ||\n code === 95 ||\n code === 91 ||\n code === 93 ||\n code === 126 ||\n markdownLineEndingOrSpace(code)\n )\n}\n\n/**\n * See:\n * .\n *\n * @type {Previous}\n */\nfunction previousProtocol(code) {\n return !asciiAlpha(code)\n}\n\n/**\n * @this {TokenizeContext}\n * @type {Previous}\n */\nfunction previousEmail(code) {\n // Do not allow a slash “inside” atext.\n // The reference code is a bit weird, but that’s what it results in.\n // Source: .\n // Other than slash, every preceding character is allowed.\n return !(code === 47 || gfmAtext(code))\n}\n\n/**\n * @param {Code} code\n * @returns {boolean}\n */\nfunction gfmAtext(code) {\n return (\n code === 43 ||\n code === 45 ||\n code === 46 ||\n code === 95 ||\n asciiAlphanumeric(code)\n )\n}\n\n/**\n * @param {Array} events\n * @returns {boolean}\n */\nfunction previousUnbalanced(events) {\n let index = events.length\n let result = false\n while (index--) {\n const token = events[index][1]\n if (\n (token.type === 'labelLink' || token.type === 'labelImage') &&\n !token._balanced\n ) {\n result = true\n break\n }\n\n // If we’ve seen this token, and it was marked as not having any unbalanced\n // bracket before it, we can exit.\n if (token._gfmAutolinkLiteralWalkedInto) {\n result = false\n break\n }\n }\n if (events.length > 0 && !result) {\n // Mark the last token as “walked into” w/o finding\n // anything.\n events[events.length - 1][1]._gfmAutolinkLiteralWalkedInto = true\n }\n return result\n}\n","/**\n * @typedef {import('micromark-util-types').Event} Event\n * @typedef {import('micromark-util-types').Exiter} Exiter\n * @typedef {import('micromark-util-types').Extension} Extension\n * @typedef {import('micromark-util-types').Resolver} Resolver\n * @typedef {import('micromark-util-types').State} State\n * @typedef {import('micromark-util-types').Token} Token\n * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext\n * @typedef {import('micromark-util-types').Tokenizer} Tokenizer\n */\n\nimport {blankLine} from 'micromark-core-commonmark'\nimport {factorySpace} from 'micromark-factory-space'\nimport {markdownLineEndingOrSpace} from 'micromark-util-character'\nimport {normalizeIdentifier} from 'micromark-util-normalize-identifier'\nconst indent = {\n tokenize: tokenizeIndent,\n partial: true\n}\n\n// To do: micromark should support a `_hiddenGfmFootnoteSupport`, which only\n// affects label start (image).\n// That will let us drop `tokenizePotentialGfmFootnote*`.\n// It currently has a `_hiddenFootnoteSupport`, which affects that and more.\n// That can be removed when `micromark-extension-footnote` is archived.\n\n/**\n * Create an extension for `micromark` to enable GFM footnote syntax.\n *\n * @returns {Extension}\n * Extension for `micromark` that can be passed in `extensions` to\n * enable GFM footnote syntax.\n */\nexport function gfmFootnote() {\n /** @type {Extension} */\n return {\n document: {\n [91]: {\n tokenize: tokenizeDefinitionStart,\n continuation: {\n tokenize: tokenizeDefinitionContinuation\n },\n exit: gfmFootnoteDefinitionEnd\n }\n },\n text: {\n [91]: {\n tokenize: tokenizeGfmFootnoteCall\n },\n [93]: {\n add: 'after',\n tokenize: tokenizePotentialGfmFootnoteCall,\n resolveTo: resolveToPotentialGfmFootnoteCall\n }\n }\n }\n}\n\n// To do: remove after micromark update.\n/**\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizePotentialGfmFootnoteCall(effects, ok, nok) {\n const self = this\n let index = self.events.length\n const defined = self.parser.gfmFootnotes || (self.parser.gfmFootnotes = [])\n /** @type {Token} */\n let labelStart\n\n // Find an opening.\n while (index--) {\n const token = self.events[index][1]\n if (token.type === 'labelImage') {\n labelStart = token\n break\n }\n\n // Exit if we’ve walked far enough.\n if (\n token.type === 'gfmFootnoteCall' ||\n token.type === 'labelLink' ||\n token.type === 'label' ||\n token.type === 'image' ||\n token.type === 'link'\n ) {\n break\n }\n }\n return start\n\n /**\n * @type {State}\n */\n function start(code) {\n if (!labelStart || !labelStart._balanced) {\n return nok(code)\n }\n const id = normalizeIdentifier(\n self.sliceSerialize({\n start: labelStart.end,\n end: self.now()\n })\n )\n if (id.codePointAt(0) !== 94 || !defined.includes(id.slice(1))) {\n return nok(code)\n }\n effects.enter('gfmFootnoteCallLabelMarker')\n effects.consume(code)\n effects.exit('gfmFootnoteCallLabelMarker')\n return ok(code)\n }\n}\n\n// To do: remove after micromark update.\n/** @type {Resolver} */\nfunction resolveToPotentialGfmFootnoteCall(events, context) {\n let index = events.length\n /** @type {Token | undefined} */\n let labelStart\n\n // Find an opening.\n while (index--) {\n if (\n events[index][1].type === 'labelImage' &&\n events[index][0] === 'enter'\n ) {\n labelStart = events[index][1]\n break\n }\n }\n // Change the `labelImageMarker` to a `data`.\n events[index + 1][1].type = 'data'\n events[index + 3][1].type = 'gfmFootnoteCallLabelMarker'\n\n // The whole (without `!`):\n /** @type {Token} */\n const call = {\n type: 'gfmFootnoteCall',\n start: Object.assign({}, events[index + 3][1].start),\n end: Object.assign({}, events[events.length - 1][1].end)\n }\n // The `^` marker\n /** @type {Token} */\n const marker = {\n type: 'gfmFootnoteCallMarker',\n start: Object.assign({}, events[index + 3][1].end),\n end: Object.assign({}, events[index + 3][1].end)\n }\n // Increment the end 1 character.\n marker.end.column++\n marker.end.offset++\n marker.end._bufferIndex++\n /** @type {Token} */\n const string = {\n type: 'gfmFootnoteCallString',\n start: Object.assign({}, marker.end),\n end: Object.assign({}, events[events.length - 1][1].start)\n }\n /** @type {Token} */\n const chunk = {\n type: 'chunkString',\n contentType: 'string',\n start: Object.assign({}, string.start),\n end: Object.assign({}, string.end)\n }\n\n /** @type {Array