/i.test(cap[0])) {
+ this.lexer.state.inLink = false;
+ }
+ if (!this.lexer.state.inRawBlock && /^<(pre|code|kbd|script)(\s|>)/i.test(cap[0])) {
+ this.lexer.state.inRawBlock = true;
+ }
+ else if (this.lexer.state.inRawBlock && /^<\/(pre|code|kbd|script)(\s|>)/i.test(cap[0])) {
+ this.lexer.state.inRawBlock = false;
+ }
+ return {
+ type: 'html',
+ raw: cap[0],
+ inLink: this.lexer.state.inLink,
+ inRawBlock: this.lexer.state.inRawBlock,
+ block: false,
+ text: cap[0],
+ };
+ }
+ }
+ link(src) {
+ const cap = this.rules.inline.link.exec(src);
+ if (cap) {
+ const trimmedUrl = cap[2].trim();
+ if (!this.options.pedantic && /^$/.test(trimmedUrl))) {
+ return;
+ }
+ // ending angle bracket cannot be escaped
+ const rtrimSlash = rtrim(trimmedUrl.slice(0, -1), '\\');
+ if ((trimmedUrl.length - rtrimSlash.length) % 2 === 0) {
+ return;
+ }
+ }
+ else {
+ // find closing parenthesis
+ const lastParenIndex = findClosingBracket(cap[2], '()');
+ if (lastParenIndex > -1) {
+ const start = cap[0].indexOf('!') === 0 ? 5 : 4;
+ const linkLen = start + cap[1].length + lastParenIndex;
+ cap[2] = cap[2].substring(0, lastParenIndex);
+ cap[0] = cap[0].substring(0, linkLen).trim();
+ cap[3] = '';
+ }
+ }
+ let href = cap[2];
+ let title = '';
+ if (this.options.pedantic) {
+ // split pedantic href and title
+ const link = /^([^'"]*[^\s])\s+(['"])(.*)\2/.exec(href);
+ if (link) {
+ href = link[1];
+ title = link[3];
+ }
+ }
+ else {
+ title = cap[3] ? cap[3].slice(1, -1) : '';
+ }
+ href = href.trim();
+ if (/^$/.test(trimmedUrl))) {
+ // pedantic allows starting angle bracket without ending angle bracket
+ href = href.slice(1);
+ }
+ else {
+ href = href.slice(1, -1);
+ }
+ }
+ return outputLink(cap, {
+ href: href ? href.replace(this.rules.inline.anyPunctuation, '$1') : href,
+ title: title ? title.replace(this.rules.inline.anyPunctuation, '$1') : title,
+ }, cap[0], this.lexer);
+ }
+ }
+ reflink(src, links) {
+ let cap;
+ if ((cap = this.rules.inline.reflink.exec(src))
+ || (cap = this.rules.inline.nolink.exec(src))) {
+ const linkString = (cap[2] || cap[1]).replace(/\s+/g, ' ');
+ const link = links[linkString.toLowerCase()];
+ if (!link) {
+ const text = cap[0].charAt(0);
+ return {
+ type: 'text',
+ raw: text,
+ text,
+ };
+ }
+ return outputLink(cap, link, cap[0], this.lexer);
+ }
+ }
+ emStrong(src, maskedSrc, prevChar = '') {
+ let match = this.rules.inline.emStrongLDelim.exec(src);
+ if (!match)
+ return;
+ // _ can't be between two alphanumerics. \p{L}\p{N} includes non-english alphabet/numbers as well
+ if (match[3] && prevChar.match(/[\p{L}\p{N}]/u))
+ return;
+ const nextChar = match[1] || match[2] || '';
+ if (!nextChar || !prevChar || this.rules.inline.punctuation.exec(prevChar)) {
+ // unicode Regex counts emoji as 1 char; spread into array for proper count (used multiple times below)
+ const lLength = [...match[0]].length - 1;
+ let rDelim, rLength, delimTotal = lLength, midDelimTotal = 0;
+ const endReg = match[0][0] === '*' ? this.rules.inline.emStrongRDelimAst : this.rules.inline.emStrongRDelimUnd;
+ endReg.lastIndex = 0;
+ // Clip maskedSrc to same section of string as src (move to lexer?)
+ maskedSrc = maskedSrc.slice(-1 * src.length + lLength);
+ while ((match = endReg.exec(maskedSrc)) != null) {
+ rDelim = match[1] || match[2] || match[3] || match[4] || match[5] || match[6];
+ if (!rDelim)
+ continue; // skip single * in __abc*abc__
+ rLength = [...rDelim].length;
+ if (match[3] || match[4]) { // found another Left Delim
+ delimTotal += rLength;
+ continue;
+ }
+ else if (match[5] || match[6]) { // either Left or Right Delim
+ if (lLength % 3 && !((lLength + rLength) % 3)) {
+ midDelimTotal += rLength;
+ continue; // CommonMark Emphasis Rules 9-10
+ }
+ }
+ delimTotal -= rLength;
+ if (delimTotal > 0)
+ continue; // Haven't found enough closing delimiters
+ // Remove extra characters. *a*** -> *a*
+ rLength = Math.min(rLength, rLength + delimTotal + midDelimTotal);
+ // char length can be >1 for unicode characters;
+ const lastCharLength = [...match[0]][0].length;
+ const raw = src.slice(0, lLength + match.index + lastCharLength + rLength);
+ // Create `em` if smallest delimiter has odd char count. *a***
+ if (Math.min(lLength, rLength) % 2) {
+ const text = raw.slice(1, -1);
+ return {
+ type: 'em',
+ raw,
+ text,
+ tokens: this.lexer.inlineTokens(text),
+ };
+ }
+ // Create 'strong' if smallest delimiter has even char count. **a***
+ const text = raw.slice(2, -2);
+ return {
+ type: 'strong',
+ raw,
+ text,
+ tokens: this.lexer.inlineTokens(text),
+ };
+ }
+ }
+ }
+ codespan(src) {
+ const cap = this.rules.inline.code.exec(src);
+ if (cap) {
+ let text = cap[2].replace(/\n/g, ' ');
+ const hasNonSpaceChars = /[^ ]/.test(text);
+ const hasSpaceCharsOnBothEnds = /^ /.test(text) && / $/.test(text);
+ if (hasNonSpaceChars && hasSpaceCharsOnBothEnds) {
+ text = text.substring(1, text.length - 1);
+ }
+ text = escape$1(text, true);
+ return {
+ type: 'codespan',
+ raw: cap[0],
+ text,
+ };
+ }
+ }
+ br(src) {
+ const cap = this.rules.inline.br.exec(src);
+ if (cap) {
+ return {
+ type: 'br',
+ raw: cap[0],
+ };
+ }
+ }
+ del(src) {
+ const cap = this.rules.inline.del.exec(src);
+ if (cap) {
+ return {
+ type: 'del',
+ raw: cap[0],
+ text: cap[2],
+ tokens: this.lexer.inlineTokens(cap[2]),
+ };
+ }
+ }
+ autolink(src) {
+ const cap = this.rules.inline.autolink.exec(src);
+ if (cap) {
+ let text, href;
+ if (cap[2] === '@') {
+ text = escape$1(cap[1]);
+ href = 'mailto:' + text;
+ }
+ else {
+ text = escape$1(cap[1]);
+ href = text;
+ }
+ return {
+ type: 'link',
+ raw: cap[0],
+ text,
+ href,
+ tokens: [
+ {
+ type: 'text',
+ raw: text,
+ text,
+ },
+ ],
+ };
+ }
+ }
+ url(src) {
+ let cap;
+ if (cap = this.rules.inline.url.exec(src)) {
+ let text, href;
+ if (cap[2] === '@') {
+ text = escape$1(cap[0]);
+ href = 'mailto:' + text;
+ }
+ else {
+ // do extended autolink path validation
+ let prevCapZero;
+ do {
+ prevCapZero = cap[0];
+ cap[0] = this.rules.inline._backpedal.exec(cap[0])?.[0] ?? '';
+ } while (prevCapZero !== cap[0]);
+ text = escape$1(cap[0]);
+ if (cap[1] === 'www.') {
+ href = 'http://' + cap[0];
+ }
+ else {
+ href = cap[0];
+ }
+ }
+ return {
+ type: 'link',
+ raw: cap[0],
+ text,
+ href,
+ tokens: [
+ {
+ type: 'text',
+ raw: text,
+ text,
+ },
+ ],
+ };
+ }
+ }
+ inlineText(src) {
+ const cap = this.rules.inline.text.exec(src);
+ if (cap) {
+ let text;
+ if (this.lexer.state.inRawBlock) {
+ text = cap[0];
+ }
+ else {
+ text = escape$1(cap[0]);
+ }
+ return {
+ type: 'text',
+ raw: cap[0],
+ text,
+ };
+ }
+ }
+}
+
+/**
+ * Block-Level Grammar
+ */
+const newline = /^(?: *(?:\n|$))+/;
+const blockCode = /^( {4}[^\n]+(?:\n(?: *(?:\n|$))*)?)+/;
+const fences = /^ {0,3}(`{3,}(?=[^`\n]*(?:\n|$))|~{3,})([^\n]*)(?:\n|$)(?:|([\s\S]*?)(?:\n|$))(?: {0,3}\1[~`]* *(?=\n|$)|$)/;
+const hr = /^ {0,3}((?:-[\t ]*){3,}|(?:_[ \t]*){3,}|(?:\*[ \t]*){3,})(?:\n+|$)/;
+const heading = /^ {0,3}(#{1,6})(?=\s|$)(.*)(?:\n+|$)/;
+const bullet = /(?:[*+-]|\d{1,9}[.)])/;
+const lheading = edit(/^(?!bull |blockCode|fences|blockquote|heading|html)((?:.|\n(?!\s*?\n|bull |blockCode|fences|blockquote|heading|html))+?)\n {0,3}(=+|-+) *(?:\n+|$)/)
+ .replace(/bull/g, bullet) // lists can interrupt
+ .replace(/blockCode/g, / {4}/) // indented code blocks can interrupt
+ .replace(/fences/g, / {0,3}(?:`{3,}|~{3,})/) // fenced code blocks can interrupt
+ .replace(/blockquote/g, / {0,3}>/) // blockquote can interrupt
+ .replace(/heading/g, / {0,3}#{1,6}/) // ATX heading can interrupt
+ .replace(/html/g, / {0,3}<[^\n>]+>\n/) // block html can interrupt
+ .getRegex();
+const _paragraph = /^([^\n]+(?:\n(?!hr|heading|lheading|blockquote|fences|list|html|table| +\n)[^\n]+)*)/;
+const blockText = /^[^\n]+/;
+const _blockLabel = /(?!\s*\])(?:\\.|[^\[\]\\])+/;
+const def = edit(/^ {0,3}\[(label)\]: *(?:\n *)?([^<\s][^\s]*|<.*?>)(?:(?: +(?:\n *)?| *\n *)(title))? *(?:\n+|$)/)
+ .replace('label', _blockLabel)
+ .replace('title', /(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/)
+ .getRegex();
+const list = edit(/^( {0,3}bull)([ \t][^\n]+?)?(?:\n|$)/)
+ .replace(/bull/g, bullet)
+ .getRegex();
+const _tag = 'address|article|aside|base|basefont|blockquote|body|caption'
+ + '|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption'
+ + '|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe'
+ + '|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option'
+ + '|p|param|search|section|summary|table|tbody|td|tfoot|th|thead|title'
+ + '|tr|track|ul';
+const _comment = /|$))/;
+const html = edit('^ {0,3}(?:' // optional indentation
+ + '<(script|pre|style|textarea)[\\s>][\\s\\S]*?(?:\\1>[^\\n]*\\n+|$)' // (1)
+ + '|comment[^\\n]*(\\n+|$)' // (2)
+ + '|<\\?[\\s\\S]*?(?:\\?>\\n*|$)' // (3)
+ + '|\\n*|$)' // (4)
+ + '|\\n*|$)' // (5)
+ + '|?(tag)(?: +|\\n|/?>)[\\s\\S]*?(?:(?:\\n *)+\\n|$)' // (6)
+ + '|<(?!script|pre|style|textarea)([a-z][\\w-]*)(?:attribute)*? */?>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)' // (7) open tag
+ + '|(?!script|pre|style|textarea)[a-z][\\w-]*\\s*>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)' // (7) closing tag
+ + ')', 'i')
+ .replace('comment', _comment)
+ .replace('tag', _tag)
+ .replace('attribute', / +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/)
+ .getRegex();
+const paragraph = edit(_paragraph)
+ .replace('hr', hr)
+ .replace('heading', ' {0,3}#{1,6}(?:\\s|$)')
+ .replace('|lheading', '') // setext headings don't interrupt commonmark paragraphs
+ .replace('|table', '')
+ .replace('blockquote', ' {0,3}>')
+ .replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n')
+ .replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt
+ .replace('html', '?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)')
+ .replace('tag', _tag) // pars can be interrupted by type (6) html blocks
+ .getRegex();
+const blockquote = edit(/^( {0,3}> ?(paragraph|[^\n]*)(?:\n|$))+/)
+ .replace('paragraph', paragraph)
+ .getRegex();
+/**
+ * Normal Block Grammar
+ */
+const blockNormal = {
+ blockquote,
+ code: blockCode,
+ def,
+ fences,
+ heading,
+ hr,
+ html,
+ lheading,
+ list,
+ newline,
+ paragraph,
+ table: noopTest,
+ text: blockText,
+};
+/**
+ * GFM Block Grammar
+ */
+const gfmTable = edit('^ *([^\\n ].*)\\n' // Header
+ + ' {0,3}((?:\\| *)?:?-+:? *(?:\\| *:?-+:? *)*(?:\\| *)?)' // Align
+ + '(?:\\n((?:(?! *\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)') // Cells
+ .replace('hr', hr)
+ .replace('heading', ' {0,3}#{1,6}(?:\\s|$)')
+ .replace('blockquote', ' {0,3}>')
+ .replace('code', ' {4}[^\\n]')
+ .replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n')
+ .replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt
+ .replace('html', '?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)')
+ .replace('tag', _tag) // tables can be interrupted by type (6) html blocks
+ .getRegex();
+const blockGfm = {
+ ...blockNormal,
+ table: gfmTable,
+ paragraph: edit(_paragraph)
+ .replace('hr', hr)
+ .replace('heading', ' {0,3}#{1,6}(?:\\s|$)')
+ .replace('|lheading', '') // setext headings don't interrupt commonmark paragraphs
+ .replace('table', gfmTable) // interrupt paragraphs with table
+ .replace('blockquote', ' {0,3}>')
+ .replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n')
+ .replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt
+ .replace('html', '?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)')
+ .replace('tag', _tag) // pars can be interrupted by type (6) html blocks
+ .getRegex(),
+};
+/**
+ * Pedantic grammar (original John Gruber's loose markdown specification)
+ */
+const blockPedantic = {
+ ...blockNormal,
+ html: edit('^ *(?:comment *(?:\\n|\\s*$)'
+ + '|<(tag)[\\s\\S]+?\\1> *(?:\\n{2,}|\\s*$)' // closed tag
+ + '| \\s]*)*?/?> *(?:\\n{2,}|\\s*$))')
+ .replace('comment', _comment)
+ .replace(/tag/g, '(?!(?:'
+ + 'a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|var|samp|kbd|sub'
+ + '|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|span|br|wbr|ins|del|img)'
+ + '\\b)\\w+(?!:|[^\\w\\s@]*@)\\b')
+ .getRegex(),
+ def: /^ *\[([^\]]+)\]: *([^\s>]+)>?(?: +(["(][^\n]+[")]))? *(?:\n+|$)/,
+ heading: /^(#{1,6})(.*)(?:\n+|$)/,
+ fences: noopTest, // fences not supported
+ lheading: /^(.+?)\n {0,3}(=+|-+) *(?:\n+|$)/,
+ paragraph: edit(_paragraph)
+ .replace('hr', hr)
+ .replace('heading', ' *#{1,6} *[^\n]')
+ .replace('lheading', lheading)
+ .replace('|table', '')
+ .replace('blockquote', ' {0,3}>')
+ .replace('|fences', '')
+ .replace('|list', '')
+ .replace('|html', '')
+ .replace('|tag', '')
+ .getRegex(),
+};
+/**
+ * Inline-Level Grammar
+ */
+const escape = /^\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/;
+const inlineCode = /^(`+)([^`]|[^`][\s\S]*?[^`])\1(?!`)/;
+const br = /^( {2,}|\\)\n(?!\s*$)/;
+const inlineText = /^(`+|[^`])(?:(?= {2,}\n)|[\s\S]*?(?:(?=[\\
+const blockSkip = /\[[^[\]]*?\]\([^\(\)]*?\)|`[^`]*?`|<[^<>]*?>/g;
+const emStrongLDelim = edit(/^(?:\*+(?:((?!\*)[punct])|[^\s*]))|^_+(?:((?!_)[punct])|([^\s_]))/, 'u')
+ .replace(/punct/g, _punctuation)
+ .getRegex();
+const emStrongRDelimAst = edit('^[^_*]*?__[^_*]*?\\*[^_*]*?(?=__)' // Skip orphan inside strong
+ + '|[^*]+(?=[^*])' // Consume to delim
+ + '|(?!\\*)[punct](\\*+)(?=[\\s]|$)' // (1) #*** can only be a Right Delimiter
+ + '|[^punct\\s](\\*+)(?!\\*)(?=[punct\\s]|$)' // (2) a***#, a*** can only be a Right Delimiter
+ + '|(?!\\*)[punct\\s](\\*+)(?=[^punct\\s])' // (3) #***a, ***a can only be Left Delimiter
+ + '|[\\s](\\*+)(?!\\*)(?=[punct])' // (4) ***# can only be Left Delimiter
+ + '|(?!\\*)[punct](\\*+)(?!\\*)(?=[punct])' // (5) #***# can be either Left or Right Delimiter
+ + '|[^punct\\s](\\*+)(?=[^punct\\s])', 'gu') // (6) a***a can be either Left or Right Delimiter
+ .replace(/punct/g, _punctuation)
+ .getRegex();
+// (6) Not allowed for _
+const emStrongRDelimUnd = edit('^[^_*]*?\\*\\*[^_*]*?_[^_*]*?(?=\\*\\*)' // Skip orphan inside strong
+ + '|[^_]+(?=[^_])' // Consume to delim
+ + '|(?!_)[punct](_+)(?=[\\s]|$)' // (1) #___ can only be a Right Delimiter
+ + '|[^punct\\s](_+)(?!_)(?=[punct\\s]|$)' // (2) a___#, a___ can only be a Right Delimiter
+ + '|(?!_)[punct\\s](_+)(?=[^punct\\s])' // (3) #___a, ___a can only be Left Delimiter
+ + '|[\\s](_+)(?!_)(?=[punct])' // (4) ___# can only be Left Delimiter
+ + '|(?!_)[punct](_+)(?!_)(?=[punct])', 'gu') // (5) #___# can be either Left or Right Delimiter
+ .replace(/punct/g, _punctuation)
+ .getRegex();
+const anyPunctuation = edit(/\\([punct])/, 'gu')
+ .replace(/punct/g, _punctuation)
+ .getRegex();
+const autolink = edit(/^<(scheme:[^\s\x00-\x1f<>]*|email)>/)
+ .replace('scheme', /[a-zA-Z][a-zA-Z0-9+.-]{1,31}/)
+ .replace('email', /[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_])/)
+ .getRegex();
+const _inlineComment = edit(_comment).replace('(?:-->|$)', '-->').getRegex();
+const tag = edit('^comment'
+ + '|^[a-zA-Z][\\w:-]*\\s*>' // self-closing tag
+ + '|^<[a-zA-Z][\\w-]*(?:attribute)*?\\s*/?>' // open tag
+ + '|^<\\?[\\s\\S]*?\\?>' // processing instruction, e.g.
+ + '|^' // declaration, e.g.
+ + '|^') // CDATA section
+ .replace('comment', _inlineComment)
+ .replace('attribute', /\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*'[^']*'|\s*=\s*[^\s"'=<>`]+)?/)
+ .getRegex();
+const _inlineLabel = /(?:\[(?:\\.|[^\[\]\\])*\]|\\.|`[^`]*`|[^\[\]\\`])*?/;
+const link = edit(/^!?\[(label)\]\(\s*(href)(?:\s+(title))?\s*\)/)
+ .replace('label', _inlineLabel)
+ .replace('href', /<(?:\\.|[^\n<>\\])+>|[^\s\x00-\x1f]*/)
+ .replace('title', /"(?:\\"?|[^"\\])*"|'(?:\\'?|[^'\\])*'|\((?:\\\)?|[^)\\])*\)/)
+ .getRegex();
+const reflink = edit(/^!?\[(label)\]\[(ref)\]/)
+ .replace('label', _inlineLabel)
+ .replace('ref', _blockLabel)
+ .getRegex();
+const nolink = edit(/^!?\[(ref)\](?:\[\])?/)
+ .replace('ref', _blockLabel)
+ .getRegex();
+const reflinkSearch = edit('reflink|nolink(?!\\()', 'g')
+ .replace('reflink', reflink)
+ .replace('nolink', nolink)
+ .getRegex();
+/**
+ * Normal Inline Grammar
+ */
+const inlineNormal = {
+ _backpedal: noopTest, // only used for GFM url
+ anyPunctuation,
+ autolink,
+ blockSkip,
+ br,
+ code: inlineCode,
+ del: noopTest,
+ emStrongLDelim,
+ emStrongRDelimAst,
+ emStrongRDelimUnd,
+ escape,
+ link,
+ nolink,
+ punctuation,
+ reflink,
+ reflinkSearch,
+ tag,
+ text: inlineText,
+ url: noopTest,
+};
+/**
+ * Pedantic Inline Grammar
+ */
+const inlinePedantic = {
+ ...inlineNormal,
+ link: edit(/^!?\[(label)\]\((.*?)\)/)
+ .replace('label', _inlineLabel)
+ .getRegex(),
+ reflink: edit(/^!?\[(label)\]\s*\[([^\]]*)\]/)
+ .replace('label', _inlineLabel)
+ .getRegex(),
+};
+/**
+ * GFM Inline Grammar
+ */
+const inlineGfm = {
+ ...inlineNormal,
+ escape: edit(escape).replace('])', '~|])').getRegex(),
+ url: edit(/^((?:ftp|https?):\/\/|www\.)(?:[a-zA-Z0-9\-]+\.?)+[^\s<]*|^email/, 'i')
+ .replace('email', /[A-Za-z0-9._+-]+(@)[a-zA-Z0-9-_]+(?:\.[a-zA-Z0-9-_]*[a-zA-Z0-9])+(?![-_])/)
+ .getRegex(),
+ _backpedal: /(?:[^?!.,:;*_'"~()&]+|\([^)]*\)|&(?![a-zA-Z0-9]+;$)|[?!.,:;*_'"~)]+(?!$))+/,
+ del: /^(~~?)(?=[^\s~])([\s\S]*?[^\s~])\1(?=[^~]|$)/,
+ text: /^([`~]+|[^`~])(?:(?= {2,}\n)|(?=[a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-]+@)|[\s\S]*?(?:(?=[\\ {
+ return leading + ' '.repeat(tabs.length);
+ });
+ }
+ let token;
+ let lastToken;
+ let cutSrc;
+ while (src) {
+ if (this.options.extensions
+ && this.options.extensions.block
+ && this.options.extensions.block.some((extTokenizer) => {
+ if (token = extTokenizer.call({ lexer: this }, src, tokens)) {
+ src = src.substring(token.raw.length);
+ tokens.push(token);
+ return true;
+ }
+ return false;
+ })) {
+ continue;
+ }
+ // newline
+ if (token = this.tokenizer.space(src)) {
+ src = src.substring(token.raw.length);
+ if (token.raw.length === 1 && tokens.length > 0) {
+ // if there's a single \n as a spacer, it's terminating the last line,
+ // so move it there so that we don't get unnecessary paragraph tags
+ tokens[tokens.length - 1].raw += '\n';
+ }
+ else {
+ tokens.push(token);
+ }
+ continue;
+ }
+ // code
+ if (token = this.tokenizer.code(src)) {
+ src = src.substring(token.raw.length);
+ lastToken = tokens[tokens.length - 1];
+ // An indented code block cannot interrupt a paragraph.
+ if (lastToken && (lastToken.type === 'paragraph' || lastToken.type === 'text')) {
+ lastToken.raw += '\n' + token.raw;
+ lastToken.text += '\n' + token.text;
+ this.inlineQueue[this.inlineQueue.length - 1].src = lastToken.text;
+ }
+ else {
+ tokens.push(token);
+ }
+ continue;
+ }
+ // fences
+ if (token = this.tokenizer.fences(src)) {
+ src = src.substring(token.raw.length);
+ tokens.push(token);
+ continue;
+ }
+ // heading
+ if (token = this.tokenizer.heading(src)) {
+ src = src.substring(token.raw.length);
+ tokens.push(token);
+ continue;
+ }
+ // hr
+ if (token = this.tokenizer.hr(src)) {
+ src = src.substring(token.raw.length);
+ tokens.push(token);
+ continue;
+ }
+ // blockquote
+ if (token = this.tokenizer.blockquote(src)) {
+ src = src.substring(token.raw.length);
+ tokens.push(token);
+ continue;
+ }
+ // list
+ if (token = this.tokenizer.list(src)) {
+ src = src.substring(token.raw.length);
+ tokens.push(token);
+ continue;
+ }
+ // html
+ if (token = this.tokenizer.html(src)) {
+ src = src.substring(token.raw.length);
+ tokens.push(token);
+ continue;
+ }
+ // def
+ if (token = this.tokenizer.def(src)) {
+ src = src.substring(token.raw.length);
+ lastToken = tokens[tokens.length - 1];
+ if (lastToken && (lastToken.type === 'paragraph' || lastToken.type === 'text')) {
+ lastToken.raw += '\n' + token.raw;
+ lastToken.text += '\n' + token.raw;
+ this.inlineQueue[this.inlineQueue.length - 1].src = lastToken.text;
+ }
+ else if (!this.tokens.links[token.tag]) {
+ this.tokens.links[token.tag] = {
+ href: token.href,
+ title: token.title,
+ };
+ }
+ continue;
+ }
+ // table (gfm)
+ if (token = this.tokenizer.table(src)) {
+ src = src.substring(token.raw.length);
+ tokens.push(token);
+ continue;
+ }
+ // lheading
+ if (token = this.tokenizer.lheading(src)) {
+ src = src.substring(token.raw.length);
+ tokens.push(token);
+ continue;
+ }
+ // top-level paragraph
+ // prevent paragraph consuming extensions by clipping 'src' to extension start
+ cutSrc = src;
+ if (this.options.extensions && this.options.extensions.startBlock) {
+ let startIndex = Infinity;
+ const tempSrc = src.slice(1);
+ let tempStart;
+ this.options.extensions.startBlock.forEach((getStartIndex) => {
+ tempStart = getStartIndex.call({ lexer: this }, tempSrc);
+ if (typeof tempStart === 'number' && tempStart >= 0) {
+ startIndex = Math.min(startIndex, tempStart);
+ }
+ });
+ if (startIndex < Infinity && startIndex >= 0) {
+ cutSrc = src.substring(0, startIndex + 1);
+ }
+ }
+ if (this.state.top && (token = this.tokenizer.paragraph(cutSrc))) {
+ lastToken = tokens[tokens.length - 1];
+ if (lastParagraphClipped && lastToken?.type === 'paragraph') {
+ lastToken.raw += '\n' + token.raw;
+ lastToken.text += '\n' + token.text;
+ this.inlineQueue.pop();
+ this.inlineQueue[this.inlineQueue.length - 1].src = lastToken.text;
+ }
+ else {
+ tokens.push(token);
+ }
+ lastParagraphClipped = (cutSrc.length !== src.length);
+ src = src.substring(token.raw.length);
+ continue;
+ }
+ // text
+ if (token = this.tokenizer.text(src)) {
+ src = src.substring(token.raw.length);
+ lastToken = tokens[tokens.length - 1];
+ if (lastToken && lastToken.type === 'text') {
+ lastToken.raw += '\n' + token.raw;
+ lastToken.text += '\n' + token.text;
+ this.inlineQueue.pop();
+ this.inlineQueue[this.inlineQueue.length - 1].src = lastToken.text;
+ }
+ else {
+ tokens.push(token);
+ }
+ continue;
+ }
+ if (src) {
+ const errMsg = 'Infinite loop on byte: ' + src.charCodeAt(0);
+ if (this.options.silent) {
+ console.error(errMsg);
+ break;
+ }
+ else {
+ throw new Error(errMsg);
+ }
+ }
+ }
+ this.state.top = true;
+ return tokens;
+ }
+ inline(src, tokens = []) {
+ this.inlineQueue.push({ src, tokens });
+ return tokens;
+ }
+ /**
+ * Lexing/Compiling
+ */
+ inlineTokens(src, tokens = []) {
+ let token, lastToken, cutSrc;
+ // String with links masked to avoid interference with em and strong
+ let maskedSrc = src;
+ let match;
+ let keepPrevChar, prevChar;
+ // Mask out reflinks
+ if (this.tokens.links) {
+ const links = Object.keys(this.tokens.links);
+ if (links.length > 0) {
+ while ((match = this.tokenizer.rules.inline.reflinkSearch.exec(maskedSrc)) != null) {
+ if (links.includes(match[0].slice(match[0].lastIndexOf('[') + 1, -1))) {
+ maskedSrc = maskedSrc.slice(0, match.index) + '[' + 'a'.repeat(match[0].length - 2) + ']' + maskedSrc.slice(this.tokenizer.rules.inline.reflinkSearch.lastIndex);
+ }
+ }
+ }
+ }
+ // Mask out other blocks
+ while ((match = this.tokenizer.rules.inline.blockSkip.exec(maskedSrc)) != null) {
+ maskedSrc = maskedSrc.slice(0, match.index) + '[' + 'a'.repeat(match[0].length - 2) + ']' + maskedSrc.slice(this.tokenizer.rules.inline.blockSkip.lastIndex);
+ }
+ // Mask out escaped characters
+ while ((match = this.tokenizer.rules.inline.anyPunctuation.exec(maskedSrc)) != null) {
+ maskedSrc = maskedSrc.slice(0, match.index) + '++' + maskedSrc.slice(this.tokenizer.rules.inline.anyPunctuation.lastIndex);
+ }
+ while (src) {
+ if (!keepPrevChar) {
+ prevChar = '';
+ }
+ keepPrevChar = false;
+ // extensions
+ if (this.options.extensions
+ && this.options.extensions.inline
+ && this.options.extensions.inline.some((extTokenizer) => {
+ if (token = extTokenizer.call({ lexer: this }, src, tokens)) {
+ src = src.substring(token.raw.length);
+ tokens.push(token);
+ return true;
+ }
+ return false;
+ })) {
+ continue;
+ }
+ // escape
+ if (token = this.tokenizer.escape(src)) {
+ src = src.substring(token.raw.length);
+ tokens.push(token);
+ continue;
+ }
+ // tag
+ if (token = this.tokenizer.tag(src)) {
+ src = src.substring(token.raw.length);
+ lastToken = tokens[tokens.length - 1];
+ if (lastToken && token.type === 'text' && lastToken.type === 'text') {
+ lastToken.raw += token.raw;
+ lastToken.text += token.text;
+ }
+ else {
+ tokens.push(token);
+ }
+ continue;
+ }
+ // link
+ if (token = this.tokenizer.link(src)) {
+ src = src.substring(token.raw.length);
+ tokens.push(token);
+ continue;
+ }
+ // reflink, nolink
+ if (token = this.tokenizer.reflink(src, this.tokens.links)) {
+ src = src.substring(token.raw.length);
+ lastToken = tokens[tokens.length - 1];
+ if (lastToken && token.type === 'text' && lastToken.type === 'text') {
+ lastToken.raw += token.raw;
+ lastToken.text += token.text;
+ }
+ else {
+ tokens.push(token);
+ }
+ continue;
+ }
+ // em & strong
+ if (token = this.tokenizer.emStrong(src, maskedSrc, prevChar)) {
+ src = src.substring(token.raw.length);
+ tokens.push(token);
+ continue;
+ }
+ // code
+ if (token = this.tokenizer.codespan(src)) {
+ src = src.substring(token.raw.length);
+ tokens.push(token);
+ continue;
+ }
+ // br
+ if (token = this.tokenizer.br(src)) {
+ src = src.substring(token.raw.length);
+ tokens.push(token);
+ continue;
+ }
+ // del (gfm)
+ if (token = this.tokenizer.del(src)) {
+ src = src.substring(token.raw.length);
+ tokens.push(token);
+ continue;
+ }
+ // autolink
+ if (token = this.tokenizer.autolink(src)) {
+ src = src.substring(token.raw.length);
+ tokens.push(token);
+ continue;
+ }
+ // url (gfm)
+ if (!this.state.inLink && (token = this.tokenizer.url(src))) {
+ src = src.substring(token.raw.length);
+ tokens.push(token);
+ continue;
+ }
+ // text
+ // prevent inlineText consuming extensions by clipping 'src' to extension start
+ cutSrc = src;
+ if (this.options.extensions && this.options.extensions.startInline) {
+ let startIndex = Infinity;
+ const tempSrc = src.slice(1);
+ let tempStart;
+ this.options.extensions.startInline.forEach((getStartIndex) => {
+ tempStart = getStartIndex.call({ lexer: this }, tempSrc);
+ if (typeof tempStart === 'number' && tempStart >= 0) {
+ startIndex = Math.min(startIndex, tempStart);
+ }
+ });
+ if (startIndex < Infinity && startIndex >= 0) {
+ cutSrc = src.substring(0, startIndex + 1);
+ }
+ }
+ if (token = this.tokenizer.inlineText(cutSrc)) {
+ src = src.substring(token.raw.length);
+ if (token.raw.slice(-1) !== '_') { // Track prevChar before string of ____ started
+ prevChar = token.raw.slice(-1);
+ }
+ keepPrevChar = true;
+ lastToken = tokens[tokens.length - 1];
+ if (lastToken && lastToken.type === 'text') {
+ lastToken.raw += token.raw;
+ lastToken.text += token.text;
+ }
+ else {
+ tokens.push(token);
+ }
+ continue;
+ }
+ if (src) {
+ const errMsg = 'Infinite loop on byte: ' + src.charCodeAt(0);
+ if (this.options.silent) {
+ console.error(errMsg);
+ break;
+ }
+ else {
+ throw new Error(errMsg);
+ }
+ }
+ }
+ return tokens;
+ }
+}
+
+/**
+ * Renderer
+ */
+class _Renderer {
+ options;
+ parser; // set by the parser
+ constructor(options) {
+ this.options = options || _defaults;
+ }
+ space(token) {
+ return '';
+ }
+ code({ text, lang, escaped }) {
+ const langString = (lang || '').match(/^\S*/)?.[0];
+ const code = text.replace(/\n$/, '') + '\n';
+ if (!langString) {
+ return ''
+ + (escaped ? code : escape$1(code, true))
+ + ' \n';
+ }
+ return ''
+ + (escaped ? code : escape$1(code, true))
+ + ' \n';
+ }
+ blockquote({ tokens }) {
+ const body = this.parser.parse(tokens);
+ return `\n${body} \n`;
+ }
+ html({ text }) {
+ return text;
+ }
+ heading({ tokens, depth }) {
+ return `${this.parser.parseInline(tokens)} \n`;
+ }
+ hr(token) {
+ return ' \n';
+ }
+ list(token) {
+ const ordered = token.ordered;
+ const start = token.start;
+ let body = '';
+ for (let j = 0; j < token.items.length; j++) {
+ const item = token.items[j];
+ body += this.listitem(item);
+ }
+ const type = ordered ? 'ol' : 'ul';
+ const startAttr = (ordered && start !== 1) ? (' start="' + start + '"') : '';
+ return '<' + type + startAttr + '>\n' + body + '' + type + '>\n';
+ }
+ listitem(item) {
+ let itemBody = '';
+ if (item.task) {
+ const checkbox = this.checkbox({ checked: !!item.checked });
+ if (item.loose) {
+ if (item.tokens.length > 0 && item.tokens[0].type === 'paragraph') {
+ item.tokens[0].text = checkbox + ' ' + item.tokens[0].text;
+ if (item.tokens[0].tokens && item.tokens[0].tokens.length > 0 && item.tokens[0].tokens[0].type === 'text') {
+ item.tokens[0].tokens[0].text = checkbox + ' ' + item.tokens[0].tokens[0].text;
+ }
+ }
+ else {
+ item.tokens.unshift({
+ type: 'text',
+ raw: checkbox + ' ',
+ text: checkbox + ' ',
+ });
+ }
+ }
+ else {
+ itemBody += checkbox + ' ';
+ }
+ }
+ itemBody += this.parser.parse(item.tokens, !!item.loose);
+ return `${itemBody} \n`;
+ }
+ checkbox({ checked }) {
+ return ' ';
+ }
+ paragraph({ tokens }) {
+ return `${this.parser.parseInline(tokens)}
\n`;
+ }
+ table(token) {
+ let header = '';
+ // header
+ let cell = '';
+ for (let j = 0; j < token.header.length; j++) {
+ cell += this.tablecell(token.header[j]);
+ }
+ header += this.tablerow({ text: cell });
+ let body = '';
+ for (let j = 0; j < token.rows.length; j++) {
+ const row = token.rows[j];
+ cell = '';
+ for (let k = 0; k < row.length; k++) {
+ cell += this.tablecell(row[k]);
+ }
+ body += this.tablerow({ text: cell });
+ }
+ if (body)
+ body = `${body} `;
+ return '