Markdown parser, done right. 100% CommonMark support, extensions, syntax plugins & high speed
https://markdown-it.github.io/
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
162 lines
4.0 KiB
162 lines
4.0 KiB
// Replace link-like texts with link nodes.
|
|
//
|
|
// Currently restricted by `inline.validateLink()` to http/https/ftp
|
|
//
|
|
'use strict';
|
|
|
|
|
|
var Autolinker = require('autolinker');
|
|
var arrayReplaceAt = require('../common/utils').arrayReplaceAt;
|
|
|
|
|
|
var LINK_SCAN_RE = /www|@|\:\/\//;
|
|
|
|
|
|
function isLinkOpen(str) {
|
|
return /^<a[>\s]/i.test(str);
|
|
}
|
|
function isLinkClose(str) {
|
|
return /^<\/a\s*>/i.test(str);
|
|
}
|
|
|
|
// Stupid fabric to avoid singletons, for thread safety.
|
|
// Required for engines like Nashorn.
|
|
//
|
|
function createLinkifier() {
|
|
var links = [];
|
|
var autolinker = new Autolinker({
|
|
stripPrefix: false,
|
|
url: true,
|
|
email: true,
|
|
twitter: false,
|
|
replaceFn: function (autolinker, match) {
|
|
// Only collect matched strings but don't change anything.
|
|
switch (match.getType()) {
|
|
/*eslint default-case:0*/
|
|
case 'url':
|
|
links.push({
|
|
text: match.matchedText,
|
|
url: match.getUrl()
|
|
});
|
|
break;
|
|
case 'email':
|
|
links.push({
|
|
text: match.matchedText,
|
|
// normalize email protocol
|
|
url: 'mailto:' + match.getEmail().replace(/^mailto:/i, '')
|
|
});
|
|
break;
|
|
}
|
|
return false;
|
|
}
|
|
});
|
|
|
|
return {
|
|
links: links,
|
|
autolinker: autolinker
|
|
};
|
|
}
|
|
|
|
|
|
module.exports = function linkify(state) {
|
|
var i, j, l, tokens, token, text, nodes, ln, pos, level, htmlLinkLevel,
|
|
blockTokens = state.tokens,
|
|
linkifier = null, links, autolinker;
|
|
|
|
if (!state.options.linkify) { return; }
|
|
|
|
for (j = 0, l = blockTokens.length; j < l; j++) {
|
|
if (blockTokens[j].type !== 'inline') { continue; }
|
|
tokens = blockTokens[j].children;
|
|
|
|
htmlLinkLevel = 0;
|
|
|
|
// We scan from the end, to keep position when new tags added.
|
|
// Use reversed logic in links start/end match
|
|
for (i = tokens.length - 1; i >= 0; i--) {
|
|
token = tokens[i];
|
|
|
|
// Skip content of markdown links
|
|
if (token.type === 'link_close') {
|
|
i--;
|
|
while (tokens[i].level !== token.level && tokens[i].type !== 'link_open') {
|
|
i--;
|
|
}
|
|
continue;
|
|
}
|
|
|
|
// Skip content of html tag links
|
|
if (token.type === 'htmltag') {
|
|
if (isLinkOpen(token.content) && htmlLinkLevel > 0) {
|
|
htmlLinkLevel--;
|
|
}
|
|
if (isLinkClose(token.content)) {
|
|
htmlLinkLevel++;
|
|
}
|
|
}
|
|
if (htmlLinkLevel > 0) { continue; }
|
|
|
|
if (token.type === 'text' && LINK_SCAN_RE.test(token.content)) {
|
|
|
|
// Init linkifier in lazy manner, only if required.
|
|
if (!linkifier) {
|
|
linkifier = createLinkifier();
|
|
links = linkifier.links;
|
|
autolinker = linkifier.autolinker;
|
|
}
|
|
|
|
text = token.content;
|
|
links.length = 0;
|
|
autolinker.link(text);
|
|
|
|
if (!links.length) { continue; }
|
|
|
|
// Now split string to nodes
|
|
nodes = [];
|
|
level = token.level;
|
|
|
|
for (ln = 0; ln < links.length; ln++) {
|
|
|
|
if (!state.inline.validateLink(links[ln].url)) { continue; }
|
|
|
|
pos = text.indexOf(links[ln].text);
|
|
|
|
if (pos) {
|
|
level = level;
|
|
nodes.push({
|
|
type: 'text',
|
|
content: text.slice(0, pos),
|
|
level: level
|
|
});
|
|
}
|
|
nodes.push({
|
|
type: 'link_open',
|
|
href: links[ln].url,
|
|
title: '',
|
|
level: level++
|
|
});
|
|
nodes.push({
|
|
type: 'text',
|
|
content: links[ln].text,
|
|
level: level
|
|
});
|
|
nodes.push({
|
|
type: 'link_close',
|
|
level: --level
|
|
});
|
|
text = text.slice(pos + links[ln].text.length);
|
|
}
|
|
if (text.length) {
|
|
nodes.push({
|
|
type: 'text',
|
|
content: text,
|
|
level: level
|
|
});
|
|
}
|
|
|
|
// replace current node
|
|
blockTokens[j].children = tokens = arrayReplaceAt(tokens, i, nodes);
|
|
}
|
|
}
|
|
}
|
|
};
|
|
|