@ -1,4 +1,4 @@
/*! markdown-it 12.3.2 https://github.com/markdown-it/markdown-it @license MIT */
/*! markdown-it 13.0.0 https://github.com/markdown-it/markdown-it @license MIT */
( function ( global , factory ) {
typeof exports === "object" && typeof module !== "undefined" ? module . exports = factory ( ) : typeof define === "function" && define . amd ? define ( factory ) : ( global = typeof globalThis !== "undefined" ? globalThis : global || self ,
global . markdownit = factory ( ) ) ;
@ -3662,13 +3662,13 @@
}
} ;
var arrayReplaceAt = utils . arrayReplaceAt ;
function isLinkOpen ( str ) {
function isLinkOpen$1 ( str ) {
return /^<a[>\s]/i . test ( str ) ;
}
function isLinkClose ( str ) {
function isLinkClose$1 ( str ) {
return /^<\/a\s*>/i . test ( str ) ;
}
var linkify = function linkify ( state ) {
var linkify$1 = function linkify ( state ) {
var i , j , l , tokens , token , currentToken , nodes , ln , text , pos , lastPos , level , htmlLinkLevel , url , fullUrl , urlText , blockTokens = state . tokens , links ;
if ( ! state . md . options . linkify ) {
return ;
@ -3693,10 +3693,10 @@
}
// Skip content of html tag links
if ( currentToken . type === "html_inline" ) {
if ( isLinkOpen ( currentToken . content ) && htmlLinkLevel > 0 ) {
if ( isLinkOpen$1 ( currentToken . content ) && htmlLinkLevel > 0 ) {
htmlLinkLevel -- ;
}
if ( isLinkClose ( currentToken . content ) ) {
if ( isLinkClose$1 ( currentToken . content ) ) {
htmlLinkLevel ++ ;
}
}
@ -3710,6 +3710,12 @@
nodes = [ ] ;
level = currentToken . level ;
lastPos = 0 ;
// forbid escape sequence at the start of the string,
// this avoids http\://example.com/ from being linkified as
// http:<a href="//example.com/">//example.com/</a>
if ( links . length > 0 && links [ 0 ] . index === 0 && i > 0 && tokens [ i - 1 ] . type === "text_special" ) {
links = links . slice ( 1 ) ;
}
for ( ln = 0 ; ln < links . length ; ln ++ ) {
url = links [ ln ] . url ;
fullUrl = state . md . normalizeLink ( url ) ;
@ -3767,16 +3773,15 @@
// Simple typographic replacements
// TODO:
// - fractionals 1/2, 1/4, 3/4 -> ½, ¼, ¾
// - miltiplication 2 x 4 -> 2 × 4
// - multiplications 2 x 4 -> 2 × 4
var RARE_RE = /\+-|\.\.|\?\?\?\?|!!!!|,,|--/ ;
// Workaround for phantomjs - need regex without /g flag,
// or root check will fail every second time
var SCOPED_ABBR_TEST_RE = /\((c|tm|r|p )\)/i ;
var SCOPED_ABBR_RE = /\((c|tm|r|p )\)/gi ;
var SCOPED_ABBR_TEST_RE = /\((c|tm|r)\)/i ;
var SCOPED_ABBR_RE = /\((c|tm|r)\)/gi ;
var SCOPED_ABBR = {
c : "\xa9" ,
r : "\xae" ,
p : "\xa7" ,
tm : "\u2122"
} ;
function replaceFn ( match , name ) {
@ -3838,7 +3843,7 @@
var QUOTE_RE = /['"]/g ;
var APOSTROPHE = "\u2019" ;
/* ’ */ function replaceAt ( str , index , ch ) {
return str . substr ( 0 , index ) + ch + str . substr ( index + 1 ) ;
return str . slice ( 0 , index ) + ch + str . slice ( index + 1 ) ;
}
function process_inlines ( tokens , state ) {
var i , token , text , t , pos , max , thisLevel , item , lastChar , nextChar , isLastPunctChar , isNextPunctChar , isLastWhiteSpace , isNextWhiteSpace , canOpen , canClose , j , isSingle , stack , openQuote , closeQuote ;
@ -3997,6 +4002,34 @@
process_inlines ( state . tokens [ blkIdx ] . children , state ) ;
}
} ;
// Join raw text tokens with the rest of the text
var text_join = function text_join ( state ) {
var j , l , tokens , curr , max , last , blockTokens = state . tokens ;
for ( j = 0 , l = blockTokens . length ; j < l ; j ++ ) {
if ( blockTokens [ j ] . type !== "inline" ) continue ;
tokens = blockTokens [ j ] . children ;
max = tokens . length ;
for ( curr = 0 ; curr < max ; curr ++ ) {
if ( tokens [ curr ] . type === "text_special" ) {
tokens [ curr ] . type = "text" ;
}
}
for ( curr = last = 0 ; curr < max ; curr ++ ) {
if ( tokens [ curr ] . type === "text" && curr + 1 < max && tokens [ curr + 1 ] . type === "text" ) {
// collapse two adjacent text nodes
tokens [ curr + 1 ] . content = tokens [ curr ] . content + tokens [ curr + 1 ] . content ;
} else {
if ( curr !== last ) {
tokens [ last ] = tokens [ curr ] ;
}
last ++ ;
}
}
if ( curr !== last ) {
tokens . length = last ;
}
}
} ;
// Token class
/ * *
* class Token
@ -4160,7 +4193,10 @@
// re-export Token class to use in core rules
StateCore . prototype . Token = token ;
var state_core = StateCore ;
var _ rules$2 = [ [ "normalize" , normalize ] , [ "block" , block ] , [ "inline" , inline ] , [ "linkify" , linkify ] , [ "replacements" , replacements ] , [ "smartquotes" , smartquotes ] ] ;
var _ rules$2 = [ [ "normalize" , normalize ] , [ "block" , block ] , [ "inline" , inline ] , [ "linkify" , linkify$1 ] , [ "replacements" , replacements ] , [ "smartquotes" , smartquotes ] ,
// `text_join` finds `text_special` tokens (for escape sequences)
// and joins them with the rest of the text
[ "text_join" , text_join ] ] ;
/ * *
* new Core ( )
* * / f u n c t i o n C o r e ( ) {
@ -4190,7 +4226,7 @@
var isSpace$a = utils . isSpace ;
function getLine ( state , line ) {
var pos = state . bMarks [ line ] + state . tShift [ line ] , max = state . eMarks [ line ] ;
return state . src . substr ( pos , max - pos ) ;
return state . src . slice ( pos , max ) ;
}
function escapedSplit ( str ) {
var result = [ ] , pos = 0 , max = str . length , ch , isEscaped = false , lastPos = 0 , current = "" ;
@ -5750,6 +5786,44 @@
state . pos = pos ;
return true ;
} ;
// Process links like https://example.org/
// RFC3986: scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
var SCHEME_RE = /(?:^|[^a-z0-9.+-])([a-z][a-z0-9.+-]*)$/i ;
var linkify = function linkify ( state , silent ) {
var pos , max , match , proto , link , url , fullUrl , token ;
if ( ! state . md . options . linkify ) return false ;
if ( state . linkLevel > 0 ) return false ;
pos = state . pos ;
max = state . posMax ;
if ( pos + 3 > max ) return false ;
if ( state . src . charCodeAt ( pos ) !== 58 /* : */ ) return false ;
if ( state . src . charCodeAt ( pos + 1 ) !== 47 /* / */ ) return false ;
if ( state . src . charCodeAt ( pos + 2 ) !== 47 /* / */ ) return false ;
match = state . pending . match ( SCHEME_RE ) ;
if ( ! match ) return false ;
proto = match [ 1 ] ;
link = state . md . linkify . matchAtStart ( state . src . slice ( pos - proto . length ) ) ;
if ( ! link ) return false ;
url = link . url ;
// disallow '*' at the end of the link (conflicts with emphasis)
url = url . replace ( /\*+$/ , "" ) ;
fullUrl = state . md . normalizeLink ( url ) ;
if ( ! state . md . validateLink ( fullUrl ) ) return false ;
if ( ! silent ) {
state . pending = state . pending . slice ( 0 , - proto . length ) ;
token = state . push ( "link_open" , "a" , 1 ) ;
token . attrs = [ [ "href" , fullUrl ] ] ;
token . markup = "linkify" ;
token . info = "auto" ;
token = state . push ( "text" , "" , 0 ) ;
token . content = state . md . normalizeLinkText ( url ) ;
token = state . push ( "link_close" , "a" , - 1 ) ;
token . markup = "linkify" ;
token . info = "auto" ;
}
state . pos += url . length - proto . length ;
return true ;
} ;
var isSpace$3 = utils . isSpace ;
var newline = function newline ( state , silent ) {
var pmax , max , ws , pos = state . pos ;
@ -5795,41 +5869,46 @@
ESCAPED [ ch . charCodeAt ( 0 ) ] = 1 ;
} ) ) ;
var _ escape = function escape ( state , silent ) {
var ch , pos = state . pos , max = state . posMax ;
if ( state . src . charCodeAt ( pos ) !== 92 /* \ */ ) {
return false ;
}
var ch1 , ch2 , origStr , escapedStr , token , pos = state . pos , max = state . posMax ;
if ( state . src . charCodeAt ( pos ) !== 92 /* \ */ ) return false ;
pos ++ ;
if ( pos < max ) {
ch = state . src . charCodeAt ( pos ) ;
if ( ch < 256 && ESCAPED [ ch ] !== 0 ) {
if ( ! silent ) {
state . pending += state . src [ pos ] ;
}
state . pos += 2 ;
return true ;
// '\' at the end of the inline block
if ( pos >= max ) return false ;
ch1 = state . src . charCodeAt ( pos ) ;
if ( ch1 === 10 ) {
if ( ! silent ) {
state . push ( "hardbreak" , "br" , 0 ) ;
}
if ( ch === 10 ) {
if ( ! silent ) {
state . push ( "hardbreak" , "br" , 0 ) ;
}
pos ++ ;
// skip leading whitespaces from next line
while ( pos < max ) {
ch1 = state . src . charCodeAt ( pos ) ;
if ( ! isSpace$2 ( ch1 ) ) break ;
pos ++ ;
}
state . pos = pos ;
return true ;
}
escapedStr = state . src [ pos ] ;
if ( ch1 >= 55296 && ch1 <= 56319 && pos + 1 < max ) {
ch2 = state . src . charCodeAt ( pos + 1 ) ;
if ( ch2 >= 56320 && ch2 <= 57343 ) {
escapedStr += state . src [ pos + 1 ] ;
pos ++ ;
// skip leading whitespaces from next line
while ( pos < max ) {
ch = state . src . charCodeAt ( pos ) ;
if ( ! isSpace$2 ( ch ) ) {
break ;
}
pos ++ ;
}
state . pos = pos ;
return true ;
}
}
origStr = "\\" + escapedStr ;
if ( ! silent ) {
state . pending += "\\" ;
token = state . push ( "text_special" , "" , 0 ) ;
if ( ch1 < 256 && ESCAPED [ ch1 ] !== 0 ) {
token . content = escapedStr ;
} else {
token . content = origStr ;
}
token . markup = origStr ;
token . info = "escape" ;
}
state . pos ++ ;
state . pos = pos + 1 ;
return true ;
} ;
// Parse backticks
@ -6184,7 +6263,9 @@
if ( title ) {
attrs . push ( [ "title" , title ] ) ;
}
state . linkLevel ++ ;
state . md . inline . tokenize ( state ) ;
state . linkLevel -- ;
token = state . push ( "link_close" , "a" , - 1 ) ;
}
state . pos = pos ;
@ -6371,6 +6452,12 @@
return false ;
} ;
var HTML_TAG_RE = html_re . HTML_TAG_RE ;
function isLinkOpen ( str ) {
return /^<a[>\s]/i . test ( str ) ;
}
function isLinkClose ( str ) {
return /^<\/a\s*>/i . test ( str ) ;
}
function isLetter ( ch ) {
/*eslint no-bitwise:0*/
var lc = ch | 32 ;
@ -6399,6 +6486,8 @@
if ( ! silent ) {
token = state . push ( "html_inline" , "" , 0 ) ;
token . content = state . src . slice ( pos , pos + match [ 0 ] . length ) ;
if ( isLinkOpen ( token . content ) ) state . linkLevel ++ ;
if ( isLinkClose ( token . content ) ) state . linkLevel -- ;
}
state . pos += match [ 0 ] . length ;
return true ;
@ -6409,40 +6498,39 @@
var DIGITAL_RE = /^&#((?:x[a-f0-9]{1,6}|[0-9]{1,7}));/i ;
var NAMED_RE = /^&([a-z][a-z0-9]{1,31});/i ;
var entity = function entity ( state , silent ) {
var ch , code , match , pos = state . pos , max = state . posMax ;
if ( state . src . charCodeAt ( pos ) !== 38 /* & */ ) {
return false ;
}
if ( pos + 1 < max ) {
ch = state . src . charCodeAt ( pos + 1 ) ;
if ( ch === 35 /* # */ ) {
match = state . src . slice ( pos ) . match ( DIGITAL_RE ) ;
if ( match ) {
var ch , code , match , token , pos = state . pos , max = state . posMax ;
if ( state . src . charCodeAt ( pos ) !== 38 /* & */ ) return false ;
if ( pos + 1 >= max ) return false ;
ch = state . src . charCodeAt ( pos + 1 ) ;
if ( ch === 35 /* # */ ) {
match = state . src . slice ( pos ) . match ( DIGITAL_RE ) ;
if ( match ) {
if ( ! silent ) {
code = match [ 1 ] [ 0 ] . toLowerCase ( ) === "x" ? parseInt ( match [ 1 ] . slice ( 1 ) , 16 ) : parseInt ( match [ 1 ] , 10 ) ;
token = state . push ( "text_special" , "" , 0 ) ;
token . content = isValidEntityCode ( code ) ? fromCodePoint ( code ) : fromCodePoint ( 65533 ) ;
token . markup = match [ 0 ] ;
token . info = "entity" ;
}
state . pos += match [ 0 ] . length ;
return true ;
}
} else {
match = state . src . slice ( pos ) . match ( NAMED_RE ) ;
if ( match ) {
if ( has ( entities , match [ 1 ] ) ) {
if ( ! silent ) {
code = match [ 1 ] [ 0 ] . toLowerCase ( ) === "x" ? parseInt ( match [ 1 ] . slice ( 1 ) , 16 ) : parseInt ( match [ 1 ] , 10 ) ;
state . pending += isValidEntityCode ( code ) ? fromCodePoint ( code ) : fromCodePoint ( 65533 ) ;
token = state . push ( "text_special" , "" , 0 ) ;
token . content = entities [ match [ 1 ] ] ;
token . markup = match [ 0 ] ;
token . info = "entity" ;
}
state . pos += match [ 0 ] . length ;
return true ;
}
} else {
match = state . src . slice ( pos ) . match ( NAMED_RE ) ;
if ( match ) {
if ( has ( entities , match [ 1 ] ) ) {
if ( ! silent ) {
state . pending += entities [ match [ 1 ] ] ;
}
state . pos += match [ 0 ] . length ;
return true ;
}
}
}
}
if ( ! silent ) {
state . pending += "&" ;
}
state . pos ++ ;
return true ;
return false ;
} ;
// For each opening emphasis-like marker find a matching closing one
function processDelimiters ( state , delimiters ) {
@ -6537,7 +6625,7 @@
}
} ;
// Clean up tokens after emphasis and strikethrough postprocessing:
var text_collapse = function text_collapse ( state ) {
var fragments_join = function fragments_join ( state ) {
var curr , last , level = 0 , tokens = state . tokens , max = state . tokens . length ;
for ( curr = last = 0 ; curr < max ; curr ++ ) {
// re-calculate levels after emphasis/strikethrough turns some text nodes
@ -6585,6 +6673,9 @@
// backtick length => last seen position
this . backticks = { } ;
this . backticksScanned = false ;
// Counter used to disable inline linkify-it execution
// inside <a> and markdown links
this . linkLevel = 0 ;
}
// Flush pending text
@ -6677,8 +6768,16 @@
var state_inline = StateInline ;
////////////////////////////////////////////////////////////////////////////////
// Parser rules
var _ rules = [ [ "text" , text ] , [ "newline" , newline ] , [ "escape" , _ escape ] , [ "backticks" , backticks ] , [ "strikethrough" , strikethrough . tokenize ] , [ "emphasis" , emphasis . tokenize ] , [ "link" , link ] , [ "image" , image ] , [ "autolink" , autolink ] , [ "html_inline" , html_inline ] , [ "entity" , entity ] ] ;
var _ rules2 = [ [ "balance_pairs" , balance_pairs ] , [ "strikethrough" , strikethrough . postProcess ] , [ "emphasis" , emphasis . postProcess ] , [ "text_collapse" , text_collapse ] ] ;
var _ rules = [ [ "text" , text ] , [ "linkify" , linkify ] , [ "newline" , newline ] , [ "escape" , _ escape ] , [ "backticks" , backticks ] , [ "strikethrough" , strikethrough . tokenize ] , [ "emphasis" , emphasis . tokenize ] , [ "link" , link ] , [ "image" , image ] , [ "autolink" , autolink ] , [ "html_inline" , html_inline ] , [ "entity" , entity ] ] ;
// `rule2` ruleset was created specifically for emphasis/strikethrough
// post-processing and may be changed in the future.
// Don't use this for anything except pairs (plugins working with `balance_pairs`).
var _ rules2 = [ [ "balance_pairs" , balance_pairs ] , [ "strikethrough" , strikethrough . postProcess ] , [ "emphasis" , emphasis . postProcess ] ,
// rules for pairs separate '**' into its own text tokens, which may be left unused,
// rule below merges unused segments back with the rest of the text
[ "fragments_join" , fragments_join ] ] ;
/ * *
* new ParserInline ( )
* * / f u n c t i o n P a r s e r I n l i n e ( ) {
@ -6785,6 +6884,7 @@
var parser_inline = ParserInline ;
var re = function ( opts ) {
var re = { } ;
opts = opts || { } ;
// Use direct extract instead of `regenerate` to reduse browserified size
re . src_Any = regex$3 . source ;
re . src_Cc = regex$2 . source ;
@ -6808,8 +6908,8 @@
// Prohibit any of "@/[]()" in user/pass to avoid wrong domain fetch.
re . src_auth = "(?:(?:(?!" + re . src_ZCc + "|[@/\\[\\]()]).)+@)?" ;
re . src_port = "(?::(?:6(?:[0-4]\\d{3}|5(?:[0-4]\\d{2}|5(?:[0-2]\\d|3[0-5])))|[1-5]?\\d{1,4}))?" ;
re . src_host_terminator = "(?=$|" + text_separators + "|" + re . src_ZPCc + ")(?!-|_|:\\d|\\.-|\\.(?!$|" + re . src_ZPCc + "))" ;
re . src_path = "(?:" + "[/?#]" + "(?:" + "(?!" + re . src_ZCc + "|" + text_separators + "|[()[\\]{}.,\"'?!\\-;]).|" + "\\[(?:(?!" + re . src_ZCc + "|\\]).)*\\]|" + "\\((?:(?!" + re . src_ZCc + "|[)]).)*\\)|" + "\\{(?:(?!" + re . src_ZCc + "|[}]).)*\\}|" + '\\"(?:(?!' + re . src_ZCc + '|["]).)+\\"|' + "\\'(?:(?!" + re . src_ZCc + "|[']).)+\\'|" + "\\'(?=" + re . src_pseudo_letter + "|[-]). |" + // allow `I'm_king` if no pair found
re . src_host_terminator = "(?=$|" + text_separators + "|" + re . src_ZPCc + ")" + " (?!" + ( opts [ " ---" ] ? "-(?!--) |" : "-|" ) + " _|:\\d|\\.-|\\.(?!$|" + re . src_ZPCc + "))" ;
re . src_path = "(?:" + "[/?#]" + "(?:" + "(?!" + re . src_ZCc + "|" + text_separators + "|[()[\\]{}.,\"'?!\\-;]).|" + "\\[(?:(?!" + re . src_ZCc + "|\\]).)*\\]|" + "\\((?:(?!" + re . src_ZCc + "|[)]).)*\\)|" + "\\{(?:(?!" + re . src_ZCc + "|[}]).)*\\}|" + '\\"(?:(?!' + re . src_ZCc + '|["]).)+\\"|' + "\\'(?:(?!" + re . src_ZCc + "|[']).)+\\'|" + "\\'(?=" + re . src_pseudo_letter + "|[-])|" + // allow `I'm_king` if no pair found
"\\.{2,}[a-zA-Z0-9%/&]|" + // google has many dots in "google search" links (#66, #81).
// github has ... in commit range links,
// Restrict to
@ -6818,10 +6918,10 @@
// - parts of file path
// - params separator
// until more examples found.
"\\.(?!" + re . src_ZCc + "|[.]). |" + ( opts && opts [ "---" ] ? "\\-(?!--(?:[^-]|$))(?:-*)|" : "\\-+|" ) + ",(?!" + re . src_ZCc + "). |" + // allow `,,,` in paths
";(?!" + re . src_ZCc + "). |" + // allow `;` if not followed by space-like char
"\\!+(?!" + re . src_ZCc + "|[!]). |" + // allow `!!!` in paths, but not at the end
"\\?(?!" + re . src_ZCc + "|[?]). " + ")+" + "|\\/" + ")?" ;
"\\.(?!" + re . src_ZCc + "|[.]|$ )|" + ( opts [ "---" ] ? "\\-(?!--(?:[^-]|$))(?:-*)|" : "\\-+|" ) + ",(?!" + re . src_ZCc + "|$ )|" + // allow `,,,` in paths
";(?!" + re . src_ZCc + "|$ )|" + // allow `;` if not followed by space-like char
"\\!+(?!" + re . src_ZCc + "|[!]|$ )|" + // allow `!!!` in paths, but not at the end
"\\?(?!" + re . src_ZCc + "|[?]|$ )" + ")+" + "|\\/" + ")?" ;
// Allow anything in markdown spec, forbid quote (") at the first position
// because emails enclosed in quotes are far more common
re . src_email_name = '[\\-;:&=\\+\\$,\\.a-zA-Z0-9_][\\-;:&=\\+\\$,\\"\\.a-zA-Z0-9_]*' ;
@ -7073,6 +7173,7 @@
// (?!_) cause 1.5x slowdown
self . re . schema_test = RegExp ( "(^|(?!_)(?:[><\uff5c]|" + re$1 . src_ZPCc + "))(" + slist + ")" , "i" ) ;
self . re . schema_search = RegExp ( "(^|(?!_)(?:[><\uff5c]|" + re$1 . src_ZPCc + "))(" + slist + ")" , "ig" ) ;
self . re . schema_at_start = RegExp ( "^" + self . re . schema_search . source , "i" ) ;
self . re . pretest = RegExp ( "(" + self . re . schema_test . source + ")|(" + self . re . host_fuzzy_test . source + ")|@" , "i" ) ;
// Cleanup
@ -7322,6 +7423,26 @@
}
return null ;
} ;
/ * *
* LinkifyIt # matchAtStart ( text ) - > Match | null
*
* Returns fully - formed ( not fuzzy ) link if it starts at the beginning
* of the string , and null otherwise .
* * / L i n k i f y I t . p r o t o t y p e . m a t c h A t S t a r t = f u n c t i o n m a t c h A t S t a r t ( t e x t ) {
// Reset scan cache
this . __ text_cache__ = text ;
this . __ index__ = - 1 ;
if ( ! text . length ) return null ;
var m = this . re . schema_at_start . exec ( text ) ;
if ( ! m ) return null ;
var len = this . testSchemaAt ( text , m [ 2 ] , m [ 0 ] . length ) ;
if ( len ) {
this . __ schema__ = m [ 2 ] ;
this . __ index__ = m . index + m [ 1 ] . length ;
this . __ last_index__ = m . index + m [ 0 ] . length + len ;
}
return createMatch ( this , 0 ) ;
} ;
/ * * c h a i n a b l e
* LinkifyIt # tlds ( list [ , keepOld ] ) - > this
* - list ( Array ) : list of tlds
@ -7814,14 +7935,14 @@
} ,
components : {
core : {
rules : [ "normalize" , "block" , "inline" ]
rules : [ "normalize" , "block" , "inline" , "text_join" ]
} ,
block : {
rules : [ "paragraph" ]
} ,
inline : {
rules : [ "text" ] ,
rules2 : [ "balance_pairs" , "text_collapse " ]
rules2 : [ "balance_pairs" , "fragments_join " ]
}
}
} ;
@ -7855,14 +7976,14 @@
} ,
components : {
core : {
rules : [ "normalize" , "block" , "inline" ]
rules : [ "normalize" , "block" , "inline" , "text_join" ]
} ,
block : {
rules : [ "blockquote" , "code" , "fence" , "heading" , "hr" , "html_block" , "lheading" , "list" , "reference" , "paragraph" ]
} ,
inline : {
rules : [ "autolink" , "backticks" , "emphasis" , "entity" , "escape" , "html_inline" , "image" , "link" , "newline" , "text" ] ,
rules2 : [ "balance_pairs" , "emphasis" , "text_collapse " ]
rules2 : [ "balance_pairs" , "emphasis" , "fragments_join " ]
}
}
} ;