This commit is contained in:
7
node_modules/micromark/dist/character/ascii-alpha.js
generated
vendored
Normal file
7
node_modules/micromark/dist/character/ascii-alpha.js
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
var regexCheck = require('../util/regex-check.js')
|
||||
|
||||
var asciiAlpha = regexCheck(/[A-Za-z]/)
|
||||
|
||||
module.exports = asciiAlpha
|
||||
7
node_modules/micromark/dist/character/ascii-alphanumeric.js
generated
vendored
Normal file
7
node_modules/micromark/dist/character/ascii-alphanumeric.js
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
var regexCheck = require('../util/regex-check.js')
|
||||
|
||||
var asciiAlphanumeric = regexCheck(/[\dA-Za-z]/)
|
||||
|
||||
module.exports = asciiAlphanumeric
|
||||
7
node_modules/micromark/dist/character/ascii-atext.js
generated
vendored
Normal file
7
node_modules/micromark/dist/character/ascii-atext.js
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
var regexCheck = require('../util/regex-check.js')
|
||||
|
||||
var asciiAtext = regexCheck(/[#-'*+\--9=?A-Z^-~]/)
|
||||
|
||||
module.exports = asciiAtext
|
||||
12
node_modules/micromark/dist/character/ascii-control.js
generated
vendored
Normal file
12
node_modules/micromark/dist/character/ascii-control.js
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
'use strict'
|
||||
|
||||
// Note: EOF is seen as ASCII control here, because `null < 32 == true`.
|
||||
function asciiControl(code) {
|
||||
return (
|
||||
// Special whitespace codes (which have negative values), C0 and Control
|
||||
// character DEL
|
||||
code < 32 || code === 127
|
||||
)
|
||||
}
|
||||
|
||||
module.exports = asciiControl
|
||||
7
node_modules/micromark/dist/character/ascii-digit.js
generated
vendored
Normal file
7
node_modules/micromark/dist/character/ascii-digit.js
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
var regexCheck = require('../util/regex-check.js')
|
||||
|
||||
var asciiDigit = regexCheck(/\d/)
|
||||
|
||||
module.exports = asciiDigit
|
||||
7
node_modules/micromark/dist/character/ascii-hex-digit.js
generated
vendored
Normal file
7
node_modules/micromark/dist/character/ascii-hex-digit.js
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
var regexCheck = require('../util/regex-check.js')
|
||||
|
||||
var asciiHexDigit = regexCheck(/[\dA-Fa-f]/)
|
||||
|
||||
module.exports = asciiHexDigit
|
||||
7
node_modules/micromark/dist/character/ascii-punctuation.js
generated
vendored
Normal file
7
node_modules/micromark/dist/character/ascii-punctuation.js
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
var regexCheck = require('../util/regex-check.js')
|
||||
|
||||
var asciiPunctuation = regexCheck(/[!-/:-@[-`{-~]/)
|
||||
|
||||
module.exports = asciiPunctuation
|
||||
3
node_modules/micromark/dist/character/codes.d.ts
generated
vendored
Normal file
3
node_modules/micromark/dist/character/codes.d.ts
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
// This module is generated by `script/`.
|
||||
|
||||
export type Code = null | number
|
||||
257
node_modules/micromark/dist/character/codes.js
generated
vendored
Normal file
257
node_modules/micromark/dist/character/codes.js
generated
vendored
Normal file
@@ -0,0 +1,257 @@
|
||||
'use strict'
|
||||
|
||||
// This module is compiled away!
|
||||
//
|
||||
// micromark works based on character codes.
|
||||
// This module contains constants for the ASCII block and the replacement
|
||||
// character.
|
||||
// A couple of them are handled in a special way, such as the line endings
|
||||
// (CR, LF, and CR+LF, commonly known as end-of-line: EOLs), the tab (horizontal
|
||||
// tab) and its expansion based on what column it’s at (virtual space),
|
||||
// and the end-of-file (eof) character.
|
||||
// As values are preprocessed before handling them, the actual characters LF,
|
||||
// CR, HT, and NUL (which is present as the replacement character), are
|
||||
// guaranteed to not exist.
|
||||
//
|
||||
// Unicode basic latin block.
|
||||
var codes = {
|
||||
carriageReturn: -5,
|
||||
lineFeed: -4,
|
||||
carriageReturnLineFeed: -3,
|
||||
horizontalTab: -2,
|
||||
virtualSpace: -1,
|
||||
eof: null,
|
||||
nul: 0,
|
||||
soh: 1,
|
||||
stx: 2,
|
||||
etx: 3,
|
||||
eot: 4,
|
||||
enq: 5,
|
||||
ack: 6,
|
||||
bel: 7,
|
||||
bs: 8,
|
||||
ht: 9,
|
||||
// `\t`
|
||||
lf: 10,
|
||||
// `\n`
|
||||
vt: 11,
|
||||
// `\v`
|
||||
ff: 12,
|
||||
// `\f`
|
||||
cr: 13,
|
||||
// `\r`
|
||||
so: 14,
|
||||
si: 15,
|
||||
dle: 16,
|
||||
dc1: 17,
|
||||
dc2: 18,
|
||||
dc3: 19,
|
||||
dc4: 20,
|
||||
nak: 21,
|
||||
syn: 22,
|
||||
etb: 23,
|
||||
can: 24,
|
||||
em: 25,
|
||||
sub: 26,
|
||||
esc: 27,
|
||||
fs: 28,
|
||||
gs: 29,
|
||||
rs: 30,
|
||||
us: 31,
|
||||
space: 32,
|
||||
exclamationMark: 33,
|
||||
// `!`
|
||||
quotationMark: 34,
|
||||
// `"`
|
||||
numberSign: 35,
|
||||
// `#`
|
||||
dollarSign: 36,
|
||||
// `$`
|
||||
percentSign: 37,
|
||||
// `%`
|
||||
ampersand: 38,
|
||||
// `&`
|
||||
apostrophe: 39,
|
||||
// `'`
|
||||
leftParenthesis: 40,
|
||||
// `(`
|
||||
rightParenthesis: 41,
|
||||
// `)`
|
||||
asterisk: 42,
|
||||
// `*`
|
||||
plusSign: 43,
|
||||
// `+`
|
||||
comma: 44,
|
||||
// `,`
|
||||
dash: 45,
|
||||
// `-`
|
||||
dot: 46,
|
||||
// `.`
|
||||
slash: 47,
|
||||
// `/`
|
||||
digit0: 48,
|
||||
// `0`
|
||||
digit1: 49,
|
||||
// `1`
|
||||
digit2: 50,
|
||||
// `2`
|
||||
digit3: 51,
|
||||
// `3`
|
||||
digit4: 52,
|
||||
// `4`
|
||||
digit5: 53,
|
||||
// `5`
|
||||
digit6: 54,
|
||||
// `6`
|
||||
digit7: 55,
|
||||
// `7`
|
||||
digit8: 56,
|
||||
// `8`
|
||||
digit9: 57,
|
||||
// `9`
|
||||
colon: 58,
|
||||
// `:`
|
||||
semicolon: 59,
|
||||
// `;`
|
||||
lessThan: 60,
|
||||
// `<`
|
||||
equalsTo: 61,
|
||||
// `=`
|
||||
greaterThan: 62,
|
||||
// `>`
|
||||
questionMark: 63,
|
||||
// `?`
|
||||
atSign: 64,
|
||||
// `@`
|
||||
uppercaseA: 65,
|
||||
// `A`
|
||||
uppercaseB: 66,
|
||||
// `B`
|
||||
uppercaseC: 67,
|
||||
// `C`
|
||||
uppercaseD: 68,
|
||||
// `D`
|
||||
uppercaseE: 69,
|
||||
// `E`
|
||||
uppercaseF: 70,
|
||||
// `F`
|
||||
uppercaseG: 71,
|
||||
// `G`
|
||||
uppercaseH: 72,
|
||||
// `H`
|
||||
uppercaseI: 73,
|
||||
// `I`
|
||||
uppercaseJ: 74,
|
||||
// `J`
|
||||
uppercaseK: 75,
|
||||
// `K`
|
||||
uppercaseL: 76,
|
||||
// `L`
|
||||
uppercaseM: 77,
|
||||
// `M`
|
||||
uppercaseN: 78,
|
||||
// `N`
|
||||
uppercaseO: 79,
|
||||
// `O`
|
||||
uppercaseP: 80,
|
||||
// `P`
|
||||
uppercaseQ: 81,
|
||||
// `Q`
|
||||
uppercaseR: 82,
|
||||
// `R`
|
||||
uppercaseS: 83,
|
||||
// `S`
|
||||
uppercaseT: 84,
|
||||
// `T`
|
||||
uppercaseU: 85,
|
||||
// `U`
|
||||
uppercaseV: 86,
|
||||
// `V`
|
||||
uppercaseW: 87,
|
||||
// `W`
|
||||
uppercaseX: 88,
|
||||
// `X`
|
||||
uppercaseY: 89,
|
||||
// `Y`
|
||||
uppercaseZ: 90,
|
||||
// `Z`
|
||||
leftSquareBracket: 91,
|
||||
// `[`
|
||||
backslash: 92,
|
||||
// `\`
|
||||
rightSquareBracket: 93,
|
||||
// `]`
|
||||
caret: 94,
|
||||
// `^`
|
||||
underscore: 95,
|
||||
// `_`
|
||||
graveAccent: 96,
|
||||
// `` ` ``
|
||||
lowercaseA: 97,
|
||||
// `a`
|
||||
lowercaseB: 98,
|
||||
// `b`
|
||||
lowercaseC: 99,
|
||||
// `c`
|
||||
lowercaseD: 100,
|
||||
// `d`
|
||||
lowercaseE: 101,
|
||||
// `e`
|
||||
lowercaseF: 102,
|
||||
// `f`
|
||||
lowercaseG: 103,
|
||||
// `g`
|
||||
lowercaseH: 104,
|
||||
// `h`
|
||||
lowercaseI: 105,
|
||||
// `i`
|
||||
lowercaseJ: 106,
|
||||
// `j`
|
||||
lowercaseK: 107,
|
||||
// `k`
|
||||
lowercaseL: 108,
|
||||
// `l`
|
||||
lowercaseM: 109,
|
||||
// `m`
|
||||
lowercaseN: 110,
|
||||
// `n`
|
||||
lowercaseO: 111,
|
||||
// `o`
|
||||
lowercaseP: 112,
|
||||
// `p`
|
||||
lowercaseQ: 113,
|
||||
// `q`
|
||||
lowercaseR: 114,
|
||||
// `r`
|
||||
lowercaseS: 115,
|
||||
// `s`
|
||||
lowercaseT: 116,
|
||||
// `t`
|
||||
lowercaseU: 117,
|
||||
// `u`
|
||||
lowercaseV: 118,
|
||||
// `v`
|
||||
lowercaseW: 119,
|
||||
// `w`
|
||||
lowercaseX: 120,
|
||||
// `x`
|
||||
lowercaseY: 121,
|
||||
// `y`
|
||||
lowercaseZ: 122,
|
||||
// `z`
|
||||
leftCurlyBrace: 123,
|
||||
// `{`
|
||||
verticalBar: 124,
|
||||
// `|`
|
||||
rightCurlyBrace: 125,
|
||||
// `}`
|
||||
tilde: 126,
|
||||
// `~`
|
||||
del: 127,
|
||||
// Unicode Specials block.
|
||||
byteOrderMarker: 65279,
|
||||
// Unicode Specials block.
|
||||
replacementCharacter: 65533 // `<60>`
|
||||
}
|
||||
|
||||
module.exports = codes
|
||||
7
node_modules/micromark/dist/character/markdown-line-ending-or-space.js
generated
vendored
Normal file
7
node_modules/micromark/dist/character/markdown-line-ending-or-space.js
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
function markdownLineEndingOrSpace(code) {
|
||||
return code < 0 || code === 32
|
||||
}
|
||||
|
||||
module.exports = markdownLineEndingOrSpace
|
||||
7
node_modules/micromark/dist/character/markdown-line-ending.js
generated
vendored
Normal file
7
node_modules/micromark/dist/character/markdown-line-ending.js
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
function markdownLineEnding(code) {
|
||||
return code < -2
|
||||
}
|
||||
|
||||
module.exports = markdownLineEnding
|
||||
7
node_modules/micromark/dist/character/markdown-space.js
generated
vendored
Normal file
7
node_modules/micromark/dist/character/markdown-space.js
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
function markdownSpace(code) {
|
||||
return code === -2 || code === -1 || code === 32
|
||||
}
|
||||
|
||||
module.exports = markdownSpace
|
||||
10
node_modules/micromark/dist/character/unicode-punctuation.js
generated
vendored
Normal file
10
node_modules/micromark/dist/character/unicode-punctuation.js
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
'use strict'
|
||||
|
||||
var unicodePunctuationRegex = require('../constant/unicode-punctuation-regex.js')
|
||||
var regexCheck = require('../util/regex-check.js')
|
||||
|
||||
// In fact adds to the bundle size.
|
||||
|
||||
var unicodePunctuation = regexCheck(unicodePunctuationRegex)
|
||||
|
||||
module.exports = unicodePunctuation
|
||||
7
node_modules/micromark/dist/character/unicode-whitespace.js
generated
vendored
Normal file
7
node_modules/micromark/dist/character/unicode-whitespace.js
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
var regexCheck = require('../util/regex-check.js')
|
||||
|
||||
var unicodeWhitespace = regexCheck(/\s/)
|
||||
|
||||
module.exports = unicodeWhitespace
|
||||
102
node_modules/micromark/dist/character/values.d.ts
generated
vendored
Normal file
102
node_modules/micromark/dist/character/values.d.ts
generated
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
// This module is generated by `script/`.
|
||||
|
||||
export type Value =
|
||||
| '\t'
|
||||
| '\n'
|
||||
| '\r'
|
||||
| ' '
|
||||
| '!'
|
||||
| '"'
|
||||
| '#'
|
||||
| '$'
|
||||
| '%'
|
||||
| '&'
|
||||
| "'"
|
||||
| '('
|
||||
| ')'
|
||||
| '*'
|
||||
| '+'
|
||||
| ','
|
||||
| '-'
|
||||
| '.'
|
||||
| '/'
|
||||
| '0'
|
||||
| '1'
|
||||
| '2'
|
||||
| '3'
|
||||
| '4'
|
||||
| '5'
|
||||
| '6'
|
||||
| '7'
|
||||
| '8'
|
||||
| '9'
|
||||
| ':'
|
||||
| ';'
|
||||
| '<'
|
||||
| '='
|
||||
| '>'
|
||||
| '?'
|
||||
| '@'
|
||||
| 'A'
|
||||
| 'B'
|
||||
| 'C'
|
||||
| 'D'
|
||||
| 'E'
|
||||
| 'F'
|
||||
| 'G'
|
||||
| 'H'
|
||||
| 'I'
|
||||
| 'J'
|
||||
| 'K'
|
||||
| 'L'
|
||||
| 'M'
|
||||
| 'N'
|
||||
| 'O'
|
||||
| 'P'
|
||||
| 'Q'
|
||||
| 'R'
|
||||
| 'S'
|
||||
| 'T'
|
||||
| 'U'
|
||||
| 'V'
|
||||
| 'W'
|
||||
| 'X'
|
||||
| 'Y'
|
||||
| 'Z'
|
||||
| '['
|
||||
| '\\'
|
||||
| ']'
|
||||
| '^'
|
||||
| '_'
|
||||
| '`'
|
||||
| 'a'
|
||||
| 'b'
|
||||
| 'c'
|
||||
| 'd'
|
||||
| 'e'
|
||||
| 'f'
|
||||
| 'g'
|
||||
| 'h'
|
||||
| 'i'
|
||||
| 'j'
|
||||
| 'k'
|
||||
| 'l'
|
||||
| 'm'
|
||||
| 'n'
|
||||
| 'o'
|
||||
| 'p'
|
||||
| 'q'
|
||||
| 'r'
|
||||
| 's'
|
||||
| 't'
|
||||
| 'u'
|
||||
| 'v'
|
||||
| 'w'
|
||||
| 'x'
|
||||
| 'y'
|
||||
| 'z'
|
||||
| '{'
|
||||
| '|'
|
||||
| '}'
|
||||
| '~'
|
||||
| '<27>'
|
||||
111
node_modules/micromark/dist/character/values.js
generated
vendored
Normal file
111
node_modules/micromark/dist/character/values.js
generated
vendored
Normal file
@@ -0,0 +1,111 @@
|
||||
'use strict'
|
||||
|
||||
// This module is compiled away!
|
||||
//
|
||||
// While micromark works based on character codes, this module includes the
|
||||
// string versions of ’em.
|
||||
// The C0 block, except for LF, CR, HT, and w/ the replacement character added,
|
||||
// are available here.
|
||||
var values = {
|
||||
ht: '\t',
|
||||
lf: '\n',
|
||||
cr: '\r',
|
||||
space: ' ',
|
||||
exclamationMark: '!',
|
||||
quotationMark: '"',
|
||||
numberSign: '#',
|
||||
dollarSign: '$',
|
||||
percentSign: '%',
|
||||
ampersand: '&',
|
||||
apostrophe: "'",
|
||||
leftParenthesis: '(',
|
||||
rightParenthesis: ')',
|
||||
asterisk: '*',
|
||||
plusSign: '+',
|
||||
comma: ',',
|
||||
dash: '-',
|
||||
dot: '.',
|
||||
slash: '/',
|
||||
digit0: '0',
|
||||
digit1: '1',
|
||||
digit2: '2',
|
||||
digit3: '3',
|
||||
digit4: '4',
|
||||
digit5: '5',
|
||||
digit6: '6',
|
||||
digit7: '7',
|
||||
digit8: '8',
|
||||
digit9: '9',
|
||||
colon: ':',
|
||||
semicolon: ';',
|
||||
lessThan: '<',
|
||||
equalsTo: '=',
|
||||
greaterThan: '>',
|
||||
questionMark: '?',
|
||||
atSign: '@',
|
||||
uppercaseA: 'A',
|
||||
uppercaseB: 'B',
|
||||
uppercaseC: 'C',
|
||||
uppercaseD: 'D',
|
||||
uppercaseE: 'E',
|
||||
uppercaseF: 'F',
|
||||
uppercaseG: 'G',
|
||||
uppercaseH: 'H',
|
||||
uppercaseI: 'I',
|
||||
uppercaseJ: 'J',
|
||||
uppercaseK: 'K',
|
||||
uppercaseL: 'L',
|
||||
uppercaseM: 'M',
|
||||
uppercaseN: 'N',
|
||||
uppercaseO: 'O',
|
||||
uppercaseP: 'P',
|
||||
uppercaseQ: 'Q',
|
||||
uppercaseR: 'R',
|
||||
uppercaseS: 'S',
|
||||
uppercaseT: 'T',
|
||||
uppercaseU: 'U',
|
||||
uppercaseV: 'V',
|
||||
uppercaseW: 'W',
|
||||
uppercaseX: 'X',
|
||||
uppercaseY: 'Y',
|
||||
uppercaseZ: 'Z',
|
||||
leftSquareBracket: '[',
|
||||
backslash: '\\',
|
||||
rightSquareBracket: ']',
|
||||
caret: '^',
|
||||
underscore: '_',
|
||||
graveAccent: '`',
|
||||
lowercaseA: 'a',
|
||||
lowercaseB: 'b',
|
||||
lowercaseC: 'c',
|
||||
lowercaseD: 'd',
|
||||
lowercaseE: 'e',
|
||||
lowercaseF: 'f',
|
||||
lowercaseG: 'g',
|
||||
lowercaseH: 'h',
|
||||
lowercaseI: 'i',
|
||||
lowercaseJ: 'j',
|
||||
lowercaseK: 'k',
|
||||
lowercaseL: 'l',
|
||||
lowercaseM: 'm',
|
||||
lowercaseN: 'n',
|
||||
lowercaseO: 'o',
|
||||
lowercaseP: 'p',
|
||||
lowercaseQ: 'q',
|
||||
lowercaseR: 'r',
|
||||
lowercaseS: 's',
|
||||
lowercaseT: 't',
|
||||
lowercaseU: 'u',
|
||||
lowercaseV: 'v',
|
||||
lowercaseW: 'w',
|
||||
lowercaseX: 'x',
|
||||
lowercaseY: 'y',
|
||||
lowercaseZ: 'z',
|
||||
leftCurlyBrace: '{',
|
||||
verticalBar: '|',
|
||||
rightCurlyBrace: '}',
|
||||
tilde: '~',
|
||||
replacementCharacter: '<27>'
|
||||
}
|
||||
|
||||
module.exports = values
|
||||
787
node_modules/micromark/dist/compile/html.js
generated
vendored
Normal file
787
node_modules/micromark/dist/compile/html.js
generated
vendored
Normal file
@@ -0,0 +1,787 @@
|
||||
'use strict'
|
||||
|
||||
var decodeEntity = require('parse-entities/decode-entity.js')
|
||||
var assign = require('../constant/assign.js')
|
||||
var hasOwnProperty = require('../constant/has-own-property.js')
|
||||
var combineHtmlExtensions = require('../util/combine-html-extensions.js')
|
||||
var chunkedPush = require('../util/chunked-push.js')
|
||||
var miniflat = require('../util/miniflat.js')
|
||||
var normalizeIdentifier = require('../util/normalize-identifier.js')
|
||||
var normalizeUri = require('../util/normalize-uri.js')
|
||||
var safeFromInt = require('../util/safe-from-int.js')
|
||||
|
||||
function _interopDefaultLegacy(e) {
|
||||
return e && typeof e === 'object' && 'default' in e ? e : {default: e}
|
||||
}
|
||||
|
||||
var decodeEntity__default = /*#__PURE__*/ _interopDefaultLegacy(decodeEntity)
|
||||
|
||||
// While micromark is a lexer/tokenizer, the common case of going from markdown
|
||||
// dealt with.
|
||||
// Technically, we can skip `>` and `"` in many cases, but CM includes them.
|
||||
|
||||
var characterReferences = {
|
||||
'"': 'quot',
|
||||
'&': 'amp',
|
||||
'<': 'lt',
|
||||
'>': 'gt'
|
||||
} // These two are allowlists of essentially safe protocols for full URLs in
|
||||
// respectively the `href` (on `<a>`) and `src` (on `<img>`) attributes.
|
||||
// They are based on what is allowed on GitHub,
|
||||
// <https://github.com/syntax-tree/hast-util-sanitize/blob/9275b21/lib/github.json#L31>
|
||||
|
||||
var protocolHref = /^(https?|ircs?|mailto|xmpp)$/i
|
||||
var protocolSrc = /^https?$/i
|
||||
|
||||
function compileHtml(options) {
|
||||
// Configuration.
|
||||
// Includes `htmlExtensions` (an array of extensions), `defaultLineEnding` (a
|
||||
// preferred EOL), `allowDangerousProtocol` (whether to allow potential
|
||||
// dangerous protocols), and `allowDangerousHtml` (whether to allow potential
|
||||
// dangerous HTML).
|
||||
var settings = options || {} // Tags is needed because according to markdown, links and emphasis and
|
||||
// whatnot can exist in images, however, as HTML doesn’t allow content in
|
||||
// images, the tags are ignored in the `alt` attribute, but the content
|
||||
// remains.
|
||||
|
||||
var tags = true // An object to track identifiers to media (URLs and titles) defined with
|
||||
// definitions.
|
||||
|
||||
var definitions = {} // A lot of the handlers need to capture some of the output data, modify it
|
||||
// somehow, and then deal with it.
|
||||
// We do that by tracking a stack of buffers, that can be opened (with
|
||||
// `buffer`) and closed (with `resume`) to access them.
|
||||
|
||||
var buffers = [[]] // As we can have links in images and the other way around, where the deepest
|
||||
// ones are closed first, we need to track which one we’re in.
|
||||
|
||||
var mediaStack = [] // Same for tightness, which is specific to lists.
|
||||
// We need to track if we’re currently in a tight or loose container.
|
||||
|
||||
var tightStack = []
|
||||
var defaultHandlers = {
|
||||
enter: {
|
||||
blockQuote: onenterblockquote,
|
||||
codeFenced: onentercodefenced,
|
||||
codeFencedFenceInfo: buffer,
|
||||
codeFencedFenceMeta: buffer,
|
||||
codeIndented: onentercodeindented,
|
||||
codeText: onentercodetext,
|
||||
content: onentercontent,
|
||||
definition: onenterdefinition,
|
||||
definitionDestinationString: onenterdefinitiondestinationstring,
|
||||
definitionLabelString: buffer,
|
||||
definitionTitleString: buffer,
|
||||
emphasis: onenteremphasis,
|
||||
htmlFlow: onenterhtmlflow,
|
||||
htmlText: onenterhtml,
|
||||
image: onenterimage,
|
||||
label: buffer,
|
||||
link: onenterlink,
|
||||
listItemMarker: onenterlistitemmarker,
|
||||
listItemValue: onenterlistitemvalue,
|
||||
listOrdered: onenterlistordered,
|
||||
listUnordered: onenterlistunordered,
|
||||
paragraph: onenterparagraph,
|
||||
reference: buffer,
|
||||
resource: onenterresource,
|
||||
resourceDestinationString: onenterresourcedestinationstring,
|
||||
resourceTitleString: buffer,
|
||||
setextHeading: onentersetextheading,
|
||||
strong: onenterstrong
|
||||
},
|
||||
exit: {
|
||||
atxHeading: onexitatxheading,
|
||||
atxHeadingSequence: onexitatxheadingsequence,
|
||||
autolinkEmail: onexitautolinkemail,
|
||||
autolinkProtocol: onexitautolinkprotocol,
|
||||
blockQuote: onexitblockquote,
|
||||
characterEscapeValue: onexitdata,
|
||||
characterReferenceMarkerHexadecimal: onexitcharacterreferencemarker,
|
||||
characterReferenceMarkerNumeric: onexitcharacterreferencemarker,
|
||||
characterReferenceValue: onexitcharacterreferencevalue,
|
||||
codeFenced: onexitflowcode,
|
||||
codeFencedFence: onexitcodefencedfence,
|
||||
codeFencedFenceInfo: onexitcodefencedfenceinfo,
|
||||
codeFencedFenceMeta: resume,
|
||||
codeFlowValue: onexitcodeflowvalue,
|
||||
codeIndented: onexitflowcode,
|
||||
codeText: onexitcodetext,
|
||||
codeTextData: onexitdata,
|
||||
data: onexitdata,
|
||||
definition: onexitdefinition,
|
||||
definitionDestinationString: onexitdefinitiondestinationstring,
|
||||
definitionLabelString: onexitdefinitionlabelstring,
|
||||
definitionTitleString: onexitdefinitiontitlestring,
|
||||
emphasis: onexitemphasis,
|
||||
hardBreakEscape: onexithardbreak,
|
||||
hardBreakTrailing: onexithardbreak,
|
||||
htmlFlow: onexithtml,
|
||||
htmlFlowData: onexitdata,
|
||||
htmlText: onexithtml,
|
||||
htmlTextData: onexitdata,
|
||||
image: onexitmedia,
|
||||
label: onexitlabel,
|
||||
labelText: onexitlabeltext,
|
||||
lineEnding: onexitlineending,
|
||||
link: onexitmedia,
|
||||
listOrdered: onexitlistordered,
|
||||
listUnordered: onexitlistunordered,
|
||||
paragraph: onexitparagraph,
|
||||
reference: resume,
|
||||
referenceString: onexitreferencestring,
|
||||
resource: resume,
|
||||
resourceDestinationString: onexitresourcedestinationstring,
|
||||
resourceTitleString: onexitresourcetitlestring,
|
||||
setextHeading: onexitsetextheading,
|
||||
setextHeadingLineSequence: onexitsetextheadinglinesequence,
|
||||
setextHeadingText: onexitsetextheadingtext,
|
||||
strong: onexitstrong,
|
||||
thematicBreak: onexitthematicbreak
|
||||
}
|
||||
} // Combine the HTML extensions with the default handlers.
|
||||
// An HTML extension is an object whose fields are either `enter` or `exit`
|
||||
// (reflecting whether a token is entered or exited).
|
||||
// The values at such objects are names of tokens mapping to handlers.
|
||||
// Handlers are called, respectively when a token is opener or closed, with
|
||||
// that token, and a context as `this`.
|
||||
|
||||
var handlers = combineHtmlExtensions(
|
||||
[defaultHandlers].concat(miniflat(settings.htmlExtensions))
|
||||
) // Handlers do often need to keep track of some state.
|
||||
// That state is provided here as a key-value store (an object).
|
||||
|
||||
var data = {
|
||||
tightStack: tightStack
|
||||
} // The context for handlers references a couple of useful functions.
|
||||
// In handlers from extensions, those can be accessed at `this`.
|
||||
// For the handlers here, they can be accessed directly.
|
||||
|
||||
var context = {
|
||||
lineEndingIfNeeded: lineEndingIfNeeded,
|
||||
options: settings,
|
||||
encode: encode,
|
||||
raw: raw,
|
||||
tag: tag,
|
||||
buffer: buffer,
|
||||
resume: resume,
|
||||
setData: setData,
|
||||
getData: getData
|
||||
} // Generally, micromark copies line endings (`'\r'`, `'\n'`, `'\r\n'`) in the
|
||||
// markdown document over to the compiled HTML.
|
||||
// In some cases, such as `> a`, CommonMark requires that extra line endings
|
||||
// are added: `<blockquote>\n<p>a</p>\n</blockquote>`.
|
||||
// This variable hold the default line ending when given (or `undefined`),
|
||||
// and in the latter case will be updated to the first found line ending if
|
||||
// there is one.
|
||||
|
||||
var lineEndingStyle = settings.defaultLineEnding // Return the function that handles a slice of events.
|
||||
|
||||
return compile // Deal w/ a slice of events.
|
||||
// Return either the empty string if there’s nothing of note to return, or the
|
||||
// result when done.
|
||||
|
||||
function compile(events) {
|
||||
// As definitions can come after references, we need to figure out the media
|
||||
// (urls and titles) defined by them before handling the references.
|
||||
// So, we do sort of what HTML does: put metadata at the start (in head), and
|
||||
// then put content after (`body`).
|
||||
var head = []
|
||||
var body = []
|
||||
var index
|
||||
var start
|
||||
var listStack
|
||||
var handler
|
||||
var result
|
||||
index = -1
|
||||
start = 0
|
||||
listStack = []
|
||||
|
||||
while (++index < events.length) {
|
||||
// Figure out the line ending style used in the document.
|
||||
if (
|
||||
!lineEndingStyle &&
|
||||
(events[index][1].type === 'lineEnding' ||
|
||||
events[index][1].type === 'lineEndingBlank')
|
||||
) {
|
||||
lineEndingStyle = events[index][2].sliceSerialize(events[index][1])
|
||||
} // Preprocess lists to infer whether the list is loose or not.
|
||||
|
||||
if (
|
||||
events[index][1].type === 'listOrdered' ||
|
||||
events[index][1].type === 'listUnordered'
|
||||
) {
|
||||
if (events[index][0] === 'enter') {
|
||||
listStack.push(index)
|
||||
} else {
|
||||
prepareList(events.slice(listStack.pop(), index))
|
||||
}
|
||||
} // Move definitions to the front.
|
||||
|
||||
if (events[index][1].type === 'definition') {
|
||||
if (events[index][0] === 'enter') {
|
||||
body = chunkedPush(body, events.slice(start, index))
|
||||
start = index
|
||||
} else {
|
||||
head = chunkedPush(head, events.slice(start, index + 1))
|
||||
start = index + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
head = chunkedPush(head, body)
|
||||
head = chunkedPush(head, events.slice(start))
|
||||
result = head
|
||||
index = -1 // Handle the start of the document, if defined.
|
||||
|
||||
if (handlers.enter.null) {
|
||||
handlers.enter.null.call(context)
|
||||
} // Handle all events.
|
||||
|
||||
while (++index < events.length) {
|
||||
handler = handlers[result[index][0]]
|
||||
|
||||
if (hasOwnProperty.call(handler, result[index][1].type)) {
|
||||
handler[result[index][1].type].call(
|
||||
assign(
|
||||
{
|
||||
sliceSerialize: result[index][2].sliceSerialize
|
||||
},
|
||||
context
|
||||
),
|
||||
result[index][1]
|
||||
)
|
||||
}
|
||||
} // Handle the end of the document, if defined.
|
||||
|
||||
if (handlers.exit.null) {
|
||||
handlers.exit.null.call(context)
|
||||
}
|
||||
|
||||
return buffers[0].join('')
|
||||
} // Figure out whether lists are loose or not.
|
||||
|
||||
function prepareList(slice) {
|
||||
var length = slice.length - 1 // Skip close.
|
||||
|
||||
var index = 0 // Skip open.
|
||||
|
||||
var containerBalance = 0
|
||||
var loose
|
||||
var atMarker
|
||||
var event
|
||||
|
||||
while (++index < length) {
|
||||
event = slice[index]
|
||||
|
||||
if (event[1]._container) {
|
||||
atMarker = undefined
|
||||
|
||||
if (event[0] === 'enter') {
|
||||
containerBalance++
|
||||
} else {
|
||||
containerBalance--
|
||||
}
|
||||
} else if (event[1].type === 'listItemPrefix') {
|
||||
if (event[0] === 'exit') {
|
||||
atMarker = true
|
||||
}
|
||||
} else if (event[1].type === 'linePrefix');
|
||||
else if (event[1].type === 'lineEndingBlank') {
|
||||
if (event[0] === 'enter' && !containerBalance) {
|
||||
if (atMarker) {
|
||||
atMarker = undefined
|
||||
} else {
|
||||
loose = true
|
||||
}
|
||||
}
|
||||
} else {
|
||||
atMarker = undefined
|
||||
}
|
||||
}
|
||||
|
||||
slice[0][1]._loose = loose
|
||||
} // Set data into the key-value store.
|
||||
|
||||
function setData(key, value) {
|
||||
data[key] = value
|
||||
} // Get data from the key-value store.
|
||||
|
||||
function getData(key) {
|
||||
return data[key]
|
||||
} // Capture some of the output data.
|
||||
|
||||
function buffer() {
|
||||
buffers.push([])
|
||||
} // Stop capturing and access the output data.
|
||||
|
||||
function resume() {
|
||||
return buffers.pop().join('')
|
||||
} // Output (parts of) HTML tags.
|
||||
|
||||
function tag(value) {
|
||||
if (!tags) return
|
||||
setData('lastWasTag', true)
|
||||
buffers[buffers.length - 1].push(value)
|
||||
} // Output raw data.
|
||||
|
||||
function raw(value) {
|
||||
setData('lastWasTag')
|
||||
buffers[buffers.length - 1].push(value)
|
||||
} // Output an extra line ending.
|
||||
|
||||
function lineEnding() {
|
||||
raw(lineEndingStyle || '\n')
|
||||
} // Output an extra line ending if the previous value wasn’t EOF/EOL.
|
||||
|
||||
function lineEndingIfNeeded() {
|
||||
var buffer = buffers[buffers.length - 1]
|
||||
var slice = buffer[buffer.length - 1]
|
||||
var previous = slice ? slice.charCodeAt(slice.length - 1) : null
|
||||
|
||||
if (previous === 10 || previous === 13 || previous === null) {
|
||||
return
|
||||
}
|
||||
|
||||
lineEnding()
|
||||
} // Make a value safe for injection in HTML (except w/ `ignoreEncode`).
|
||||
|
||||
function encode(value) {
|
||||
return getData('ignoreEncode') ? value : value.replace(/["&<>]/g, replace)
|
||||
|
||||
function replace(value) {
|
||||
return '&' + characterReferences[value] + ';'
|
||||
}
|
||||
} // Make a value safe for injection as a URL.
|
||||
// This does encode unsafe characters with percent-encoding, skipping already
|
||||
// encoded sequences (`normalizeUri`).
|
||||
// Further unsafe characters are encoded as character references (`encode`).
|
||||
// Finally, if the URL includes an unknown protocol (such as a dangerous
|
||||
// example, `javascript:`), the value is ignored.
|
||||
|
||||
function url(url, protocol) {
|
||||
var value = encode(normalizeUri(url || ''))
|
||||
var colon = value.indexOf(':')
|
||||
var questionMark = value.indexOf('?')
|
||||
var numberSign = value.indexOf('#')
|
||||
var slash = value.indexOf('/')
|
||||
|
||||
if (
|
||||
settings.allowDangerousProtocol || // If there is no protocol, it’s relative.
|
||||
colon < 0 || // If the first colon is after a `?`, `#`, or `/`, it’s not a protocol.
|
||||
(slash > -1 && colon > slash) ||
|
||||
(questionMark > -1 && colon > questionMark) ||
|
||||
(numberSign > -1 && colon > numberSign) || // It is a protocol, it should be allowed.
|
||||
protocol.test(value.slice(0, colon))
|
||||
) {
|
||||
return value
|
||||
}
|
||||
|
||||
return ''
|
||||
} //
|
||||
// Handlers.
|
||||
//
|
||||
|
||||
function onenterlistordered(token) {
|
||||
tightStack.push(!token._loose)
|
||||
lineEndingIfNeeded()
|
||||
tag('<ol')
|
||||
setData('expectFirstItem', true)
|
||||
}
|
||||
|
||||
function onenterlistunordered(token) {
|
||||
tightStack.push(!token._loose)
|
||||
lineEndingIfNeeded()
|
||||
tag('<ul')
|
||||
setData('expectFirstItem', true)
|
||||
}
|
||||
|
||||
function onenterlistitemvalue(token) {
|
||||
var value
|
||||
|
||||
if (getData('expectFirstItem')) {
|
||||
value = parseInt(this.sliceSerialize(token), 10)
|
||||
|
||||
if (value !== 1) {
|
||||
tag(' start="' + encode(String(value)) + '"')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function onenterlistitemmarker() {
|
||||
if (getData('expectFirstItem')) {
|
||||
tag('>')
|
||||
} else {
|
||||
onexitlistitem()
|
||||
}
|
||||
|
||||
lineEndingIfNeeded()
|
||||
tag('<li>')
|
||||
setData('expectFirstItem') // “Hack” to prevent a line ending from showing up if the item is empty.
|
||||
|
||||
setData('lastWasTag')
|
||||
}
|
||||
|
||||
function onexitlistordered() {
|
||||
onexitlistitem()
|
||||
tightStack.pop()
|
||||
lineEnding()
|
||||
tag('</ol>')
|
||||
}
|
||||
|
||||
function onexitlistunordered() {
|
||||
onexitlistitem()
|
||||
tightStack.pop()
|
||||
lineEnding()
|
||||
tag('</ul>')
|
||||
}
|
||||
|
||||
function onexitlistitem() {
|
||||
if (getData('lastWasTag') && !getData('slurpAllLineEndings')) {
|
||||
lineEndingIfNeeded()
|
||||
}
|
||||
|
||||
tag('</li>')
|
||||
setData('slurpAllLineEndings')
|
||||
}
|
||||
|
||||
function onenterblockquote() {
|
||||
tightStack.push(false)
|
||||
lineEndingIfNeeded()
|
||||
tag('<blockquote>')
|
||||
}
|
||||
|
||||
function onexitblockquote() {
|
||||
tightStack.pop()
|
||||
lineEndingIfNeeded()
|
||||
tag('</blockquote>')
|
||||
setData('slurpAllLineEndings')
|
||||
}
|
||||
|
||||
function onenterparagraph() {
|
||||
if (!tightStack[tightStack.length - 1]) {
|
||||
lineEndingIfNeeded()
|
||||
tag('<p>')
|
||||
}
|
||||
|
||||
setData('slurpAllLineEndings')
|
||||
}
|
||||
|
||||
function onexitparagraph() {
|
||||
if (tightStack[tightStack.length - 1]) {
|
||||
setData('slurpAllLineEndings', true)
|
||||
} else {
|
||||
tag('</p>')
|
||||
}
|
||||
}
|
||||
|
||||
function onentercodefenced() {
|
||||
lineEndingIfNeeded()
|
||||
tag('<pre><code')
|
||||
setData('fencesCount', 0)
|
||||
}
|
||||
|
||||
function onexitcodefencedfenceinfo() {
|
||||
var value = resume()
|
||||
tag(' class="language-' + value + '"')
|
||||
}
|
||||
|
||||
function onexitcodefencedfence() {
|
||||
if (!getData('fencesCount')) {
|
||||
tag('>')
|
||||
setData('fencedCodeInside', true)
|
||||
setData('slurpOneLineEnding', true)
|
||||
}
|
||||
|
||||
setData('fencesCount', getData('fencesCount') + 1)
|
||||
}
|
||||
|
||||
function onentercodeindented() {
|
||||
lineEndingIfNeeded()
|
||||
tag('<pre><code>')
|
||||
}
|
||||
|
||||
function onexitflowcode() {
|
||||
// Send an extra line feed if we saw data.
|
||||
if (getData('flowCodeSeenData')) lineEndingIfNeeded()
|
||||
tag('</code></pre>')
|
||||
if (getData('fencesCount') < 2) lineEndingIfNeeded()
|
||||
setData('flowCodeSeenData')
|
||||
setData('fencesCount')
|
||||
setData('slurpOneLineEnding')
|
||||
}
|
||||
|
||||
function onenterimage() {
|
||||
mediaStack.push({
|
||||
image: true
|
||||
})
|
||||
tags = undefined // Disallow tags.
|
||||
}
|
||||
|
||||
function onenterlink() {
|
||||
mediaStack.push({})
|
||||
}
|
||||
|
||||
function onexitlabeltext(token) {
|
||||
mediaStack[mediaStack.length - 1].labelId = this.sliceSerialize(token)
|
||||
}
|
||||
|
||||
function onexitlabel() {
|
||||
mediaStack[mediaStack.length - 1].label = resume()
|
||||
}
|
||||
|
||||
function onexitreferencestring(token) {
|
||||
mediaStack[mediaStack.length - 1].referenceId = this.sliceSerialize(token)
|
||||
}
|
||||
|
||||
function onenterresource() {
|
||||
buffer() // We can have line endings in the resource, ignore them.
|
||||
|
||||
mediaStack[mediaStack.length - 1].destination = ''
|
||||
}
|
||||
|
||||
function onenterresourcedestinationstring() {
|
||||
buffer() // Ignore encoding the result, as we’ll first percent encode the url and
|
||||
// encode manually after.
|
||||
|
||||
setData('ignoreEncode', true)
|
||||
}
|
||||
|
||||
function onexitresourcedestinationstring() {
|
||||
mediaStack[mediaStack.length - 1].destination = resume()
|
||||
setData('ignoreEncode')
|
||||
}
|
||||
|
||||
function onexitresourcetitlestring() {
|
||||
mediaStack[mediaStack.length - 1].title = resume()
|
||||
}
|
||||
|
||||
function onexitmedia() {
|
||||
var index = mediaStack.length - 1 // Skip current.
|
||||
|
||||
var media = mediaStack[index]
|
||||
var context =
|
||||
media.destination === undefined
|
||||
? definitions[normalizeIdentifier(media.referenceId || media.labelId)]
|
||||
: media
|
||||
tags = true
|
||||
|
||||
while (index--) {
|
||||
if (mediaStack[index].image) {
|
||||
tags = undefined
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if (media.image) {
|
||||
tag('<img src="' + url(context.destination, protocolSrc) + '" alt="')
|
||||
raw(media.label)
|
||||
tag('"')
|
||||
} else {
|
||||
tag('<a href="' + url(context.destination, protocolHref) + '"')
|
||||
}
|
||||
|
||||
tag(context.title ? ' title="' + context.title + '"' : '')
|
||||
|
||||
if (media.image) {
|
||||
tag(' />')
|
||||
} else {
|
||||
tag('>')
|
||||
raw(media.label)
|
||||
tag('</a>')
|
||||
}
|
||||
|
||||
mediaStack.pop()
|
||||
}
|
||||
|
||||
function onenterdefinition() {
|
||||
buffer()
|
||||
mediaStack.push({})
|
||||
}
|
||||
|
||||
function onexitdefinitionlabelstring(token) {
|
||||
// Discard label, use the source content instead.
|
||||
resume()
|
||||
mediaStack[mediaStack.length - 1].labelId = this.sliceSerialize(token)
|
||||
}
|
||||
|
||||
function onenterdefinitiondestinationstring() {
|
||||
buffer()
|
||||
setData('ignoreEncode', true)
|
||||
}
|
||||
|
||||
function onexitdefinitiondestinationstring() {
|
||||
mediaStack[mediaStack.length - 1].destination = resume()
|
||||
setData('ignoreEncode')
|
||||
}
|
||||
|
||||
function onexitdefinitiontitlestring() {
|
||||
mediaStack[mediaStack.length - 1].title = resume()
|
||||
}
|
||||
|
||||
function onexitdefinition() {
|
||||
var id = normalizeIdentifier(mediaStack[mediaStack.length - 1].labelId)
|
||||
resume()
|
||||
|
||||
if (!hasOwnProperty.call(definitions, id)) {
|
||||
definitions[id] = mediaStack[mediaStack.length - 1]
|
||||
}
|
||||
|
||||
mediaStack.pop()
|
||||
}
|
||||
|
||||
function onentercontent() {
|
||||
setData('slurpAllLineEndings', true)
|
||||
}
|
||||
|
||||
function onexitatxheadingsequence(token) {
|
||||
// Exit for further sequences.
|
||||
if (getData('headingRank')) return
|
||||
setData('headingRank', this.sliceSerialize(token).length)
|
||||
lineEndingIfNeeded()
|
||||
tag('<h' + getData('headingRank') + '>')
|
||||
}
|
||||
|
||||
function onentersetextheading() {
|
||||
buffer()
|
||||
setData('slurpAllLineEndings')
|
||||
}
|
||||
|
||||
function onexitsetextheadingtext() {
|
||||
setData('slurpAllLineEndings', true)
|
||||
}
|
||||
|
||||
function onexitatxheading() {
|
||||
tag('</h' + getData('headingRank') + '>')
|
||||
setData('headingRank')
|
||||
}
|
||||
|
||||
function onexitsetextheadinglinesequence(token) {
|
||||
setData(
|
||||
'headingRank',
|
||||
this.sliceSerialize(token).charCodeAt(0) === 61 ? 1 : 2
|
||||
)
|
||||
}
|
||||
|
||||
function onexitsetextheading() {
|
||||
var value = resume()
|
||||
lineEndingIfNeeded()
|
||||
tag('<h' + getData('headingRank') + '>')
|
||||
raw(value)
|
||||
tag('</h' + getData('headingRank') + '>')
|
||||
setData('slurpAllLineEndings')
|
||||
setData('headingRank')
|
||||
}
|
||||
|
||||
function onexitdata(token) {
|
||||
raw(encode(this.sliceSerialize(token)))
|
||||
}
|
||||
|
||||
function onexitlineending(token) {
|
||||
if (getData('slurpAllLineEndings')) {
|
||||
return
|
||||
}
|
||||
|
||||
if (getData('slurpOneLineEnding')) {
|
||||
setData('slurpOneLineEnding')
|
||||
return
|
||||
}
|
||||
|
||||
if (getData('inCodeText')) {
|
||||
raw(' ')
|
||||
return
|
||||
}
|
||||
|
||||
raw(encode(this.sliceSerialize(token)))
|
||||
}
|
||||
|
||||
function onexitcodeflowvalue(token) {
|
||||
raw(encode(this.sliceSerialize(token)))
|
||||
setData('flowCodeSeenData', true)
|
||||
}
|
||||
|
||||
function onexithardbreak() {
|
||||
tag('<br />')
|
||||
}
|
||||
|
||||
function onenterhtmlflow() {
|
||||
lineEndingIfNeeded()
|
||||
onenterhtml()
|
||||
}
|
||||
|
||||
function onexithtml() {
|
||||
setData('ignoreEncode')
|
||||
}
|
||||
|
||||
function onenterhtml() {
|
||||
if (settings.allowDangerousHtml) {
|
||||
setData('ignoreEncode', true)
|
||||
}
|
||||
}
|
||||
|
||||
function onenteremphasis() {
|
||||
tag('<em>')
|
||||
}
|
||||
|
||||
function onenterstrong() {
|
||||
tag('<strong>')
|
||||
}
|
||||
|
||||
function onentercodetext() {
|
||||
setData('inCodeText', true)
|
||||
tag('<code>')
|
||||
}
|
||||
|
||||
function onexitcodetext() {
|
||||
setData('inCodeText')
|
||||
tag('</code>')
|
||||
}
|
||||
|
||||
function onexitemphasis() {
|
||||
tag('</em>')
|
||||
}
|
||||
|
||||
function onexitstrong() {
|
||||
tag('</strong>')
|
||||
}
|
||||
|
||||
function onexitthematicbreak() {
|
||||
lineEndingIfNeeded()
|
||||
tag('<hr />')
|
||||
}
|
||||
|
||||
function onexitcharacterreferencemarker(token) {
|
||||
setData('characterReferenceType', token.type)
|
||||
}
|
||||
|
||||
function onexitcharacterreferencevalue(token) {
|
||||
var value = this.sliceSerialize(token)
|
||||
value = getData('characterReferenceType')
|
||||
? safeFromInt(
|
||||
value,
|
||||
getData('characterReferenceType') ===
|
||||
'characterReferenceMarkerNumeric'
|
||||
? 10
|
||||
: 16
|
||||
)
|
||||
: decodeEntity__default['default'](value)
|
||||
raw(encode(value))
|
||||
setData('characterReferenceType')
|
||||
}
|
||||
|
||||
function onexitautolinkprotocol(token) {
|
||||
var uri = this.sliceSerialize(token)
|
||||
tag('<a href="' + url(uri, protocolHref) + '">')
|
||||
raw(encode(uri))
|
||||
tag('</a>')
|
||||
}
|
||||
|
||||
function onexitautolinkemail(token) {
|
||||
var uri = this.sliceSerialize(token)
|
||||
tag('<a href="' + url('mailto:' + uri, protocolHref) + '">')
|
||||
raw(encode(uri))
|
||||
tag('</a>')
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = compileHtml
|
||||
5
node_modules/micromark/dist/constant/assign.js
generated
vendored
Normal file
5
node_modules/micromark/dist/constant/assign.js
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
'use strict'
|
||||
|
||||
var assign = Object.assign
|
||||
|
||||
module.exports = assign
|
||||
23
node_modules/micromark/dist/constant/constants.d.ts
generated
vendored
Normal file
23
node_modules/micromark/dist/constant/constants.d.ts
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
// This module is generated by `script/`.
|
||||
|
||||
export type Constant =
|
||||
| 1
|
||||
| 2
|
||||
| 6
|
||||
| 63
|
||||
| 32
|
||||
| 'CDATA['
|
||||
| 7
|
||||
| 31
|
||||
| 3
|
||||
| 'flow'
|
||||
| 'content'
|
||||
| 'string'
|
||||
| 'text'
|
||||
| 4
|
||||
| 5
|
||||
| 8
|
||||
| 999
|
||||
| 10
|
||||
| 16
|
||||
| 10000
|
||||
71
node_modules/micromark/dist/constant/constants.js
generated
vendored
Normal file
71
node_modules/micromark/dist/constant/constants.js
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
'use strict'
|
||||
|
||||
// This module is compiled away!
|
||||
//
|
||||
// Parsing markdown comes with a couple of constants, such as minimum or maximum
|
||||
// sizes of certain sequences.
|
||||
// Additionally, there are a couple symbols used inside micromark.
|
||||
// These are all defined here, but compiled away by scripts.
|
||||
var constants = {
|
||||
attentionSideBefore: 1,
|
||||
// Symbol to mark an attention sequence as before content: `*a`
|
||||
attentionSideAfter: 2,
|
||||
// Symbol to mark an attention sequence as after content: `a*`
|
||||
atxHeadingOpeningFenceSizeMax: 6,
|
||||
// 6 number signs is fine, 7 isn’t.
|
||||
autolinkDomainSizeMax: 63,
|
||||
// 63 characters is fine, 64 is too many.
|
||||
autolinkSchemeSizeMax: 32,
|
||||
// 32 characters is fine, 33 is too many.
|
||||
cdataOpeningString: 'CDATA[',
|
||||
// And preceded by `<![`.
|
||||
characterGroupWhitespace: 1,
|
||||
// Symbol used to indicate a character is whitespace
|
||||
characterGroupPunctuation: 2,
|
||||
// Symbol used to indicate a character is whitespace
|
||||
characterReferenceDecimalSizeMax: 7,
|
||||
// `�`.
|
||||
characterReferenceHexadecimalSizeMax: 6,
|
||||
// `�`.
|
||||
characterReferenceNamedSizeMax: 31,
|
||||
// `∳`.
|
||||
codeFencedSequenceSizeMin: 3,
|
||||
// At least 3 ticks or tildes are needed.
|
||||
contentTypeFlow: 'flow',
|
||||
contentTypeContent: 'content',
|
||||
contentTypeString: 'string',
|
||||
contentTypeText: 'text',
|
||||
hardBreakPrefixSizeMin: 2,
|
||||
// At least 2 trailing spaces are needed.
|
||||
htmlRaw: 1,
|
||||
// Symbol for `<script>`
|
||||
htmlComment: 2,
|
||||
// Symbol for `<!---->`
|
||||
htmlInstruction: 3,
|
||||
// Symbol for `<?php?>`
|
||||
htmlDeclaration: 4,
|
||||
// Symbol for `<!doctype>`
|
||||
htmlCdata: 5,
|
||||
// Symbol for `<![CDATA[]]>`
|
||||
htmlBasic: 6,
|
||||
// Symbol for `<div`
|
||||
htmlComplete: 7,
|
||||
// Symbol for `<x>`
|
||||
htmlRawSizeMax: 8,
|
||||
// Length of `textarea`.
|
||||
linkResourceDestinationBalanceMax: 3,
|
||||
// See: <https://spec.commonmark.org/0.29/#link-destination>
|
||||
linkReferenceSizeMax: 999,
|
||||
// See: <https://spec.commonmark.org/0.29/#link-label>
|
||||
listItemValueSizeMax: 10,
|
||||
// See: <https://spec.commonmark.org/0.29/#ordered-list-marker>
|
||||
numericBaseDecimal: 10,
|
||||
numericBaseHexadecimal: 0x10,
|
||||
tabSize: 4,
|
||||
// Tabs have a hard-coded size of 4, per CommonMark.
|
||||
thematicBreakMarkerCountMin: 3,
|
||||
// At least 3 asterisks, dashes, or underscores are needed.
|
||||
v8MaxSafeChunkSize: 10000 // V8 (and potentially others) have problems injecting giant arrays into other arrays, hence we operate in chunks.
|
||||
}
|
||||
|
||||
module.exports = constants
|
||||
5
node_modules/micromark/dist/constant/from-char-code.js
generated
vendored
Normal file
5
node_modules/micromark/dist/constant/from-char-code.js
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
'use strict'
|
||||
|
||||
var fromCharCode = String.fromCharCode
|
||||
|
||||
module.exports = fromCharCode
|
||||
5
node_modules/micromark/dist/constant/has-own-property.js
generated
vendored
Normal file
5
node_modules/micromark/dist/constant/has-own-property.js
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
'use strict'
|
||||
|
||||
var own = {}.hasOwnProperty
|
||||
|
||||
module.exports = own
|
||||
69
node_modules/micromark/dist/constant/html-block-names.js
generated
vendored
Normal file
69
node_modules/micromark/dist/constant/html-block-names.js
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
'use strict'
|
||||
|
||||
// This module is copied from <https://spec.commonmark.org/0.29/#html-blocks>.
|
||||
var basics = [
|
||||
'address',
|
||||
'article',
|
||||
'aside',
|
||||
'base',
|
||||
'basefont',
|
||||
'blockquote',
|
||||
'body',
|
||||
'caption',
|
||||
'center',
|
||||
'col',
|
||||
'colgroup',
|
||||
'dd',
|
||||
'details',
|
||||
'dialog',
|
||||
'dir',
|
||||
'div',
|
||||
'dl',
|
||||
'dt',
|
||||
'fieldset',
|
||||
'figcaption',
|
||||
'figure',
|
||||
'footer',
|
||||
'form',
|
||||
'frame',
|
||||
'frameset',
|
||||
'h1',
|
||||
'h2',
|
||||
'h3',
|
||||
'h4',
|
||||
'h5',
|
||||
'h6',
|
||||
'head',
|
||||
'header',
|
||||
'hr',
|
||||
'html',
|
||||
'iframe',
|
||||
'legend',
|
||||
'li',
|
||||
'link',
|
||||
'main',
|
||||
'menu',
|
||||
'menuitem',
|
||||
'nav',
|
||||
'noframes',
|
||||
'ol',
|
||||
'optgroup',
|
||||
'option',
|
||||
'p',
|
||||
'param',
|
||||
'section',
|
||||
'source',
|
||||
'summary',
|
||||
'table',
|
||||
'tbody',
|
||||
'td',
|
||||
'tfoot',
|
||||
'th',
|
||||
'thead',
|
||||
'title',
|
||||
'tr',
|
||||
'track',
|
||||
'ul'
|
||||
]
|
||||
|
||||
module.exports = basics
|
||||
6
node_modules/micromark/dist/constant/html-raw-names.js
generated
vendored
Normal file
6
node_modules/micromark/dist/constant/html-raw-names.js
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
'use strict'
|
||||
|
||||
// This module is copied from <https://spec.commonmark.org/0.29/#html-blocks>.
|
||||
var raws = ['pre', 'script', 'style', 'textarea']
|
||||
|
||||
module.exports = raws
|
||||
5
node_modules/micromark/dist/constant/splice.js
generated
vendored
Normal file
5
node_modules/micromark/dist/constant/splice.js
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
'use strict'
|
||||
|
||||
var splice = [].splice
|
||||
|
||||
module.exports = splice
|
||||
3
node_modules/micromark/dist/constant/types.d.ts
generated
vendored
Normal file
3
node_modules/micromark/dist/constant/types.d.ts
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
// This module is generated by `script/`.
|
||||
|
||||
export type Type = string
|
||||
357
node_modules/micromark/dist/constant/types.js
generated
vendored
Normal file
357
node_modules/micromark/dist/constant/types.js
generated
vendored
Normal file
@@ -0,0 +1,357 @@
|
||||
'use strict'
|
||||
|
||||
// This module is compiled away!
|
||||
//
|
||||
// Here is the list of all types of tokens exposed by micromark, with a short
|
||||
// explanation of what they include and where they are found.
|
||||
// In picking names, generally, the rule is to be as explicit as possible
|
||||
// instead of reusing names.
|
||||
// For example, there is a `definitionDestination` and a `resourceDestination`,
|
||||
// instead of one shared name.
|
||||
var types = {
|
||||
// Generic type for data, such as in a title, a destination, etc.
|
||||
data: 'data',
|
||||
// Generic type for syntactic whitespace (tabs, virtual spaces, spaces).
|
||||
// Such as, between a fenced code fence and an info string.
|
||||
whitespace: 'whitespace',
|
||||
// Generic type for line endings (line feed, carriage return, carriage return +
|
||||
// line feed).
|
||||
lineEnding: 'lineEnding',
|
||||
// A line ending, but ending a blank line.
|
||||
lineEndingBlank: 'lineEndingBlank',
|
||||
// Generic type for whitespace (tabs, virtual spaces, spaces) at the start of a
|
||||
// line.
|
||||
linePrefix: 'linePrefix',
|
||||
// Generic type for whitespace (tabs, virtual spaces, spaces) at the end of a
|
||||
// line.
|
||||
lineSuffix: 'lineSuffix',
|
||||
// Whole ATX heading:
|
||||
//
|
||||
// ```markdown
|
||||
// #
|
||||
// ## Alpha
|
||||
// ### Bravo ###
|
||||
// ```
|
||||
//
|
||||
// Includes `atxHeadingSequence`, `whitespace`, `atxHeadingText`.
|
||||
atxHeading: 'atxHeading',
|
||||
// Sequence of number signs in an ATX heading (`###`).
|
||||
atxHeadingSequence: 'atxHeadingSequence',
|
||||
// Content in an ATX heading (`alpha`).
|
||||
// Includes text.
|
||||
atxHeadingText: 'atxHeadingText',
|
||||
// Whole autolink (`<https://example.com>` or `<admin@example.com>`)
|
||||
// Includes `autolinkMarker` and `autolinkProtocol` or `autolinkEmail`.
|
||||
autolink: 'autolink',
|
||||
// Email autolink w/o markers (`admin@example.com`)
|
||||
autolinkEmail: 'autolinkEmail',
|
||||
// Marker around an `autolinkProtocol` or `autolinkEmail` (`<` or `>`).
|
||||
autolinkMarker: 'autolinkMarker',
|
||||
// Protocol autolink w/o markers (`https://example.com`)
|
||||
autolinkProtocol: 'autolinkProtocol',
|
||||
// A whole character escape (`\-`).
|
||||
// Includes `escapeMarker` and `characterEscapeValue`.
|
||||
characterEscape: 'characterEscape',
|
||||
// The escaped character (`-`).
|
||||
characterEscapeValue: 'characterEscapeValue',
|
||||
// A whole character reference (`&`, `≠`, or `𝌆`).
|
||||
// Includes `characterReferenceMarker`, an optional
|
||||
// `characterReferenceMarkerNumeric`, in which case an optional
|
||||
// `characterReferenceMarkerHexadecimal`, and a `characterReferenceValue`.
|
||||
characterReference: 'characterReference',
|
||||
// The start or end marker (`&` or `;`).
|
||||
characterReferenceMarker: 'characterReferenceMarker',
|
||||
// Mark reference as numeric (`#`).
|
||||
characterReferenceMarkerNumeric: 'characterReferenceMarkerNumeric',
|
||||
// Mark reference as numeric (`x` or `X`).
|
||||
characterReferenceMarkerHexadecimal: 'characterReferenceMarkerHexadecimal',
|
||||
// Value of character reference w/o markers (`amp`, `8800`, or `1D306`).
|
||||
characterReferenceValue: 'characterReferenceValue',
|
||||
// Whole fenced code:
|
||||
//
|
||||
// ````markdown
|
||||
// ```js
|
||||
// alert(1)
|
||||
// ```
|
||||
// ````
|
||||
codeFenced: 'codeFenced',
|
||||
// A fenced code fence, including whitespace, sequence, info, and meta
|
||||
// (` ```js `).
|
||||
codeFencedFence: 'codeFencedFence',
|
||||
// Sequence of grave accent or tilde characters (` ``` `) in a fence.
|
||||
codeFencedFenceSequence: 'codeFencedFenceSequence',
|
||||
// Info word (`js`) in a fence.
|
||||
// Includes string.
|
||||
codeFencedFenceInfo: 'codeFencedFenceInfo',
|
||||
// Meta words (`highlight="1"`) in a fence.
|
||||
// Includes string.
|
||||
codeFencedFenceMeta: 'codeFencedFenceMeta',
|
||||
// A line of code.
|
||||
codeFlowValue: 'codeFlowValue',
|
||||
// Whole indented code:
|
||||
//
|
||||
// ```markdown
|
||||
// alert(1)
|
||||
// ```
|
||||
//
|
||||
// Includes `lineEnding`, `linePrefix`, and `codeFlowValue`.
|
||||
codeIndented: 'codeIndented',
|
||||
// A text code (``` `alpha` ```).
|
||||
// Includes `codeTextSequence`, `codeTextData`, `lineEnding`, and can include
|
||||
// `codeTextPadding`.
|
||||
codeText: 'codeText',
|
||||
codeTextData: 'codeTextData',
|
||||
// A space or line ending right after or before a tick.
|
||||
codeTextPadding: 'codeTextPadding',
|
||||
// A text code fence (` `` `).
|
||||
codeTextSequence: 'codeTextSequence',
|
||||
// Whole content:
|
||||
//
|
||||
// ```markdown
|
||||
// [a]: b
|
||||
// c
|
||||
// =
|
||||
// d
|
||||
// ```
|
||||
//
|
||||
// Includes `paragraph` and `definition`.
|
||||
content: 'content',
|
||||
// Whole definition:
|
||||
//
|
||||
// ```markdown
|
||||
// [micromark]: https://github.com/micromark/micromark
|
||||
// ```
|
||||
//
|
||||
// Includes `definitionLabel`, `definitionMarker`, `whitespace`,
|
||||
// `definitionDestination`, and optionally `lineEnding` and `definitionTitle`.
|
||||
definition: 'definition',
|
||||
// Destination of a definition (`https://github.com/micromark/micromark` or
|
||||
// `<https://github.com/micromark/micromark>`).
|
||||
// Includes `definitionDestinationLiteral` or `definitionDestinationRaw`.
|
||||
definitionDestination: 'definitionDestination',
|
||||
// Enclosed destination of a definition
|
||||
// (`<https://github.com/micromark/micromark>`).
|
||||
// Includes `definitionDestinationLiteralMarker` and optionally
|
||||
// `definitionDestinationString`.
|
||||
definitionDestinationLiteral: 'definitionDestinationLiteral',
|
||||
// Markers of an enclosed definition destination (`<` or `>`).
|
||||
definitionDestinationLiteralMarker: 'definitionDestinationLiteralMarker',
|
||||
// Unenclosed destination of a definition
|
||||
// (`https://github.com/micromark/micromark`).
|
||||
// Includes `definitionDestinationString`.
|
||||
definitionDestinationRaw: 'definitionDestinationRaw',
|
||||
// Text in an destination (`https://github.com/micromark/micromark`).
|
||||
// Includes string.
|
||||
definitionDestinationString: 'definitionDestinationString',
|
||||
// Label of a definition (`[micromark]`).
|
||||
// Includes `definitionLabelMarker` and `definitionLabelString`.
|
||||
definitionLabel: 'definitionLabel',
|
||||
// Markers of a definition label (`[` or `]`).
|
||||
definitionLabelMarker: 'definitionLabelMarker',
|
||||
// Value of a definition label (`micromark`).
|
||||
// Includes string.
|
||||
definitionLabelString: 'definitionLabelString',
|
||||
// Marker between a label and a destination (`:`).
|
||||
definitionMarker: 'definitionMarker',
|
||||
// Title of a definition (`"x"`, `'y'`, or `(z)`).
|
||||
// Includes `definitionTitleMarker` and optionally `definitionTitleString`.
|
||||
definitionTitle: 'definitionTitle',
|
||||
// Marker around a title of a definition (`"`, `'`, `(`, or `)`).
|
||||
definitionTitleMarker: 'definitionTitleMarker',
|
||||
// Data without markers in a title (`z`).
|
||||
// Includes string.
|
||||
definitionTitleString: 'definitionTitleString',
|
||||
// Emphasis (`*alpha*`).
|
||||
// Includes `emphasisSequence` and `emphasisText`.
|
||||
emphasis: 'emphasis',
|
||||
// Sequence of emphasis markers (`*` or `_`).
|
||||
emphasisSequence: 'emphasisSequence',
|
||||
// Emphasis text (`alpha`).
|
||||
// Includes text.
|
||||
emphasisText: 'emphasisText',
|
||||
// The character escape marker (`\`).
|
||||
escapeMarker: 'escapeMarker',
|
||||
// A hard break created with a backslash (`\\n`).
|
||||
// Includes `escapeMarker` (does not include the line ending)
|
||||
hardBreakEscape: 'hardBreakEscape',
|
||||
// A hard break created with trailing spaces (` \n`).
|
||||
// Does not include the line ending.
|
||||
hardBreakTrailing: 'hardBreakTrailing',
|
||||
// Flow HTML:
|
||||
//
|
||||
// ```markdown
|
||||
// <div
|
||||
// ```
|
||||
//
|
||||
// Inlcudes `lineEnding`, `htmlFlowData`.
|
||||
htmlFlow: 'htmlFlow',
|
||||
htmlFlowData: 'htmlFlowData',
|
||||
// HTML in text (the tag in `a <i> b`).
|
||||
// Includes `lineEnding`, `htmlTextData`.
|
||||
htmlText: 'htmlText',
|
||||
htmlTextData: 'htmlTextData',
|
||||
// Whole image (``, `![alpha][bravo]`, `![alpha][]`, or
|
||||
// `![alpha]`).
|
||||
// Includes `label` and an optional `resource` or `reference`.
|
||||
image: 'image',
|
||||
// Whole link label (`[*alpha*]`).
|
||||
// Includes `labelLink` or `labelImage`, `labelText`, and `labelEnd`.
|
||||
label: 'label',
|
||||
// Text in an label (`*alpha*`).
|
||||
// Includes text.
|
||||
labelText: 'labelText',
|
||||
// Start a link label (`[`).
|
||||
// Includes a `labelMarker`.
|
||||
labelLink: 'labelLink',
|
||||
// Start an image label (`![`).
|
||||
// Includes `labelImageMarker` and `labelMarker`.
|
||||
labelImage: 'labelImage',
|
||||
// Marker of a label (`[` or `]`).
|
||||
labelMarker: 'labelMarker',
|
||||
// Marker to start an image (`!`).
|
||||
labelImageMarker: 'labelImageMarker',
|
||||
// End a label (`]`).
|
||||
// Includes `labelMarker`.
|
||||
labelEnd: 'labelEnd',
|
||||
// Whole link (`[alpha](bravo)`, `[alpha][bravo]`, `[alpha][]`, or `[alpha]`).
|
||||
// Includes `label` and an optional `resource` or `reference`.
|
||||
link: 'link',
|
||||
// Whole paragraph:
|
||||
//
|
||||
// ```markdown
|
||||
// alpha
|
||||
// bravo.
|
||||
// ```
|
||||
//
|
||||
// Includes text.
|
||||
paragraph: 'paragraph',
|
||||
// A reference (`[alpha]` or `[]`).
|
||||
// Includes `referenceMarker` and an optional `referenceString`.
|
||||
reference: 'reference',
|
||||
// A reference marker (`[` or `]`).
|
||||
referenceMarker: 'referenceMarker',
|
||||
// Reference text (`alpha`).
|
||||
// Includes string.
|
||||
referenceString: 'referenceString',
|
||||
// A resource (`(https://example.com "alpha")`).
|
||||
// Includes `resourceMarker`, an optional `resourceDestination` with an optional
|
||||
// `whitespace` and `resourceTitle`.
|
||||
resource: 'resource',
|
||||
// A resource destination (`https://example.com`).
|
||||
// Includes `resourceDestinationLiteral` or `resourceDestinationRaw`.
|
||||
resourceDestination: 'resourceDestination',
|
||||
// A literal resource destination (`<https://example.com>`).
|
||||
// Includes `resourceDestinationLiteralMarker` and optionally
|
||||
// `resourceDestinationString`.
|
||||
resourceDestinationLiteral: 'resourceDestinationLiteral',
|
||||
// A resource destination marker (`<` or `>`).
|
||||
resourceDestinationLiteralMarker: 'resourceDestinationLiteralMarker',
|
||||
// A raw resource destination (`https://example.com`).
|
||||
// Includes `resourceDestinationString`.
|
||||
resourceDestinationRaw: 'resourceDestinationRaw',
|
||||
// Resource destination text (`https://example.com`).
|
||||
// Includes string.
|
||||
resourceDestinationString: 'resourceDestinationString',
|
||||
// A resource marker (`(` or `)`).
|
||||
resourceMarker: 'resourceMarker',
|
||||
// A resource title (`"alpha"`, `'alpha'`, or `(alpha)`).
|
||||
// Includes `resourceTitleMarker` and optionally `resourceTitleString`.
|
||||
resourceTitle: 'resourceTitle',
|
||||
// A resource title marker (`"`, `'`, `(`, or `)`).
|
||||
resourceTitleMarker: 'resourceTitleMarker',
|
||||
// Resource destination title (`alpha`).
|
||||
// Includes string.
|
||||
resourceTitleString: 'resourceTitleString',
|
||||
// Whole setext heading:
|
||||
//
|
||||
// ```markdown
|
||||
// alpha
|
||||
// bravo
|
||||
// =====
|
||||
// ```
|
||||
//
|
||||
// Includes `setextHeadingText`, `lineEnding`, `linePrefix`, and
|
||||
// `setextHeadingLine`.
|
||||
setextHeading: 'setextHeading',
|
||||
// Content in a setext heading (`alpha\nbravo`).
|
||||
// Includes text.
|
||||
setextHeadingText: 'setextHeadingText',
|
||||
// Underline in a setext heading, including whitespace suffix (`==`).
|
||||
// Includes `setextHeadingLineSequence`.
|
||||
setextHeadingLine: 'setextHeadingLine',
|
||||
// Sequence of equals or dash characters in underline in a setext heading (`-`).
|
||||
setextHeadingLineSequence: 'setextHeadingLineSequence',
|
||||
// Strong (`**alpha**`).
|
||||
// Includes `strongSequence` and `strongText`.
|
||||
strong: 'strong',
|
||||
// Sequence of strong markers (`**` or `__`).
|
||||
strongSequence: 'strongSequence',
|
||||
// Strong text (`alpha`).
|
||||
// Includes text.
|
||||
strongText: 'strongText',
|
||||
// Whole thematic break:
|
||||
//
|
||||
// ```markdown
|
||||
// * * *
|
||||
// ```
|
||||
//
|
||||
// Includes `thematicBreakSequence` and `whitespace`.
|
||||
thematicBreak: 'thematicBreak',
|
||||
// A sequence of one or more thematic break markers (`***`).
|
||||
thematicBreakSequence: 'thematicBreakSequence',
|
||||
// Whole block quote:
|
||||
//
|
||||
// ```markdown
|
||||
// > a
|
||||
// >
|
||||
// > b
|
||||
// ```
|
||||
//
|
||||
// Includes `blockQuotePrefix` and flow.
|
||||
blockQuote: 'blockQuote',
|
||||
// The `>` or `> ` of a block quote.
|
||||
blockQuotePrefix: 'blockQuotePrefix',
|
||||
// The `>` of a block quote prefix.
|
||||
blockQuoteMarker: 'blockQuoteMarker',
|
||||
// The optional ` ` of a block quote prefix.
|
||||
blockQuotePrefixWhitespace: 'blockQuotePrefixWhitespace',
|
||||
// Whole unordered list:
|
||||
//
|
||||
// ```markdown
|
||||
// - a
|
||||
// b
|
||||
// ```
|
||||
//
|
||||
// Includes `listItemPrefix`, flow, and optionally `listItemIndent` on further
|
||||
// lines.
|
||||
listOrdered: 'listOrdered',
|
||||
// Whole ordered list:
|
||||
//
|
||||
// ```markdown
|
||||
// 1. a
|
||||
// b
|
||||
// ```
|
||||
//
|
||||
// Includes `listItemPrefix`, flow, and optionally `listItemIndent` on further
|
||||
// lines.
|
||||
listUnordered: 'listUnordered',
|
||||
// The indent of further list item lines.
|
||||
listItemIndent: 'listItemIndent',
|
||||
// A marker, as in, `*`, `+`, `-`, `.`, or `)`.
|
||||
listItemMarker: 'listItemMarker',
|
||||
// The thing that starts a list item, such as `1. `.
|
||||
// Includes `listItemValue` if ordered, `listItemMarker`, and
|
||||
// `listItemPrefixWhitespace` (unless followed by a line ending).
|
||||
listItemPrefix: 'listItemPrefix',
|
||||
// The whitespace after a marker.
|
||||
listItemPrefixWhitespace: 'listItemPrefixWhitespace',
|
||||
// The numerical value of an ordered item.
|
||||
listItemValue: 'listItemValue',
|
||||
// Internal types used for subtokenizers, compiled away
|
||||
chunkContent: 'chunkContent',
|
||||
chunkFlow: 'chunkFlow',
|
||||
chunkText: 'chunkText',
|
||||
chunkString: 'chunkString'
|
||||
}
|
||||
|
||||
module.exports = types
|
||||
11
node_modules/micromark/dist/constant/unicode-punctuation-regex.js
generated
vendored
Normal file
11
node_modules/micromark/dist/constant/unicode-punctuation-regex.js
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
'use strict'
|
||||
|
||||
// This module is generated by `script/`.
|
||||
//
|
||||
// CommonMark handles attention (emphasis, strong) markers based on what comes
|
||||
// before or after them.
|
||||
// One such difference is if those characters are Unicode punctuation.
|
||||
// This script is generated from the Unicode data.
|
||||
var unicodePunctuation = /[!-\/:-@\[-`\{-~\xA1\xA7\xAB\xB6\xB7\xBB\xBF\u037E\u0387\u055A-\u055F\u0589\u058A\u05BE\u05C0\u05C3\u05C6\u05F3\u05F4\u0609\u060A\u060C\u060D\u061B\u061E\u061F\u066A-\u066D\u06D4\u0700-\u070D\u07F7-\u07F9\u0830-\u083E\u085E\u0964\u0965\u0970\u09FD\u0A76\u0AF0\u0C77\u0C84\u0DF4\u0E4F\u0E5A\u0E5B\u0F04-\u0F12\u0F14\u0F3A-\u0F3D\u0F85\u0FD0-\u0FD4\u0FD9\u0FDA\u104A-\u104F\u10FB\u1360-\u1368\u1400\u166E\u169B\u169C\u16EB-\u16ED\u1735\u1736\u17D4-\u17D6\u17D8-\u17DA\u1800-\u180A\u1944\u1945\u1A1E\u1A1F\u1AA0-\u1AA6\u1AA8-\u1AAD\u1B5A-\u1B60\u1BFC-\u1BFF\u1C3B-\u1C3F\u1C7E\u1C7F\u1CC0-\u1CC7\u1CD3\u2010-\u2027\u2030-\u2043\u2045-\u2051\u2053-\u205E\u207D\u207E\u208D\u208E\u2308-\u230B\u2329\u232A\u2768-\u2775\u27C5\u27C6\u27E6-\u27EF\u2983-\u2998\u29D8-\u29DB\u29FC\u29FD\u2CF9-\u2CFC\u2CFE\u2CFF\u2D70\u2E00-\u2E2E\u2E30-\u2E4F\u2E52\u3001-\u3003\u3008-\u3011\u3014-\u301F\u3030\u303D\u30A0\u30FB\uA4FE\uA4FF\uA60D-\uA60F\uA673\uA67E\uA6F2-\uA6F7\uA874-\uA877\uA8CE\uA8CF\uA8F8-\uA8FA\uA8FC\uA92E\uA92F\uA95F\uA9C1-\uA9CD\uA9DE\uA9DF\uAA5C-\uAA5F\uAADE\uAADF\uAAF0\uAAF1\uABEB\uFD3E\uFD3F\uFE10-\uFE19\uFE30-\uFE52\uFE54-\uFE61\uFE63\uFE68\uFE6A\uFE6B\uFF01-\uFF03\uFF05-\uFF0A\uFF0C-\uFF0F\uFF1A\uFF1B\uFF1F\uFF20\uFF3B-\uFF3D\uFF3F\uFF5B\uFF5D\uFF5F-\uFF65]/
|
||||
|
||||
module.exports = unicodePunctuation
|
||||
127
node_modules/micromark/dist/constructs.js
generated
vendored
Normal file
127
node_modules/micromark/dist/constructs.js
generated
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
'use strict'
|
||||
|
||||
Object.defineProperty(exports, '__esModule', {value: true})
|
||||
|
||||
var text$1 = require('./initialize/text.js')
|
||||
var attention = require('./tokenize/attention.js')
|
||||
var autolink = require('./tokenize/autolink.js')
|
||||
var blockQuote = require('./tokenize/block-quote.js')
|
||||
var characterEscape = require('./tokenize/character-escape.js')
|
||||
var characterReference = require('./tokenize/character-reference.js')
|
||||
var codeFenced = require('./tokenize/code-fenced.js')
|
||||
var codeIndented = require('./tokenize/code-indented.js')
|
||||
var codeText = require('./tokenize/code-text.js')
|
||||
var definition = require('./tokenize/definition.js')
|
||||
var hardBreakEscape = require('./tokenize/hard-break-escape.js')
|
||||
var headingAtx = require('./tokenize/heading-atx.js')
|
||||
var htmlFlow = require('./tokenize/html-flow.js')
|
||||
var htmlText = require('./tokenize/html-text.js')
|
||||
var labelEnd = require('./tokenize/label-end.js')
|
||||
var labelStartImage = require('./tokenize/label-start-image.js')
|
||||
var labelStartLink = require('./tokenize/label-start-link.js')
|
||||
var lineEnding = require('./tokenize/line-ending.js')
|
||||
var list = require('./tokenize/list.js')
|
||||
var setextUnderline = require('./tokenize/setext-underline.js')
|
||||
var thematicBreak = require('./tokenize/thematic-break.js')
|
||||
|
||||
var document = {
|
||||
42: list,
|
||||
// Asterisk
|
||||
43: list,
|
||||
// Plus sign
|
||||
45: list,
|
||||
// Dash
|
||||
48: list,
|
||||
// 0
|
||||
49: list,
|
||||
// 1
|
||||
50: list,
|
||||
// 2
|
||||
51: list,
|
||||
// 3
|
||||
52: list,
|
||||
// 4
|
||||
53: list,
|
||||
// 5
|
||||
54: list,
|
||||
// 6
|
||||
55: list,
|
||||
// 7
|
||||
56: list,
|
||||
// 8
|
||||
57: list,
|
||||
// 9
|
||||
62: blockQuote // Greater than
|
||||
}
|
||||
var contentInitial = {
|
||||
91: definition // Left square bracket
|
||||
}
|
||||
var flowInitial = {
|
||||
'-2': codeIndented,
|
||||
// Horizontal tab
|
||||
'-1': codeIndented,
|
||||
// Virtual space
|
||||
32: codeIndented // Space
|
||||
}
|
||||
var flow = {
|
||||
35: headingAtx,
|
||||
// Number sign
|
||||
42: thematicBreak,
|
||||
// Asterisk
|
||||
45: [setextUnderline, thematicBreak],
|
||||
// Dash
|
||||
60: htmlFlow,
|
||||
// Less than
|
||||
61: setextUnderline,
|
||||
// Equals to
|
||||
95: thematicBreak,
|
||||
// Underscore
|
||||
96: codeFenced,
|
||||
// Grave accent
|
||||
126: codeFenced // Tilde
|
||||
}
|
||||
var string = {
|
||||
38: characterReference,
|
||||
// Ampersand
|
||||
92: characterEscape // Backslash
|
||||
}
|
||||
var text = {
|
||||
'-5': lineEnding,
|
||||
// Carriage return
|
||||
'-4': lineEnding,
|
||||
// Line feed
|
||||
'-3': lineEnding,
|
||||
// Carriage return + line feed
|
||||
33: labelStartImage,
|
||||
// Exclamation mark
|
||||
38: characterReference,
|
||||
// Ampersand
|
||||
42: attention,
|
||||
// Asterisk
|
||||
60: [autolink, htmlText],
|
||||
// Less than
|
||||
91: labelStartLink,
|
||||
// Left square bracket
|
||||
92: [hardBreakEscape, characterEscape],
|
||||
// Backslash
|
||||
93: labelEnd,
|
||||
// Right square bracket
|
||||
95: attention,
|
||||
// Underscore
|
||||
96: codeText // Grave accent
|
||||
}
|
||||
var insideSpan = {
|
||||
null: [attention, text$1.resolver]
|
||||
}
|
||||
var disable = {
|
||||
null: []
|
||||
}
|
||||
|
||||
exports.contentInitial = contentInitial
|
||||
exports.disable = disable
|
||||
exports.document = document
|
||||
exports.flow = flow
|
||||
exports.flowInitial = flowInitial
|
||||
exports.insideSpan = insideSpan
|
||||
exports.string = string
|
||||
exports.text = text
|
||||
11
node_modules/micromark/dist/index.d.ts
generated
vendored
Normal file
11
node_modules/micromark/dist/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
import {Buffer, BufferEncoding, Options} from './shared-types'
|
||||
|
||||
declare function buffer(value: string | Buffer, options?: Options): string
|
||||
|
||||
declare function buffer(
|
||||
value: string | Buffer,
|
||||
encoding?: BufferEncoding,
|
||||
options?: Options
|
||||
): string
|
||||
|
||||
export default buffer
|
||||
21
node_modules/micromark/dist/index.js
generated
vendored
Normal file
21
node_modules/micromark/dist/index.js
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
'use strict'
|
||||
|
||||
var html = require('./compile/html.js')
|
||||
var parse = require('./parse.js')
|
||||
var postprocess = require('./postprocess.js')
|
||||
var preprocess = require('./preprocess.js')
|
||||
|
||||
function buffer(value, encoding, options) {
|
||||
if (typeof encoding !== 'string') {
|
||||
options = encoding
|
||||
encoding = undefined
|
||||
}
|
||||
|
||||
return html(options)(
|
||||
postprocess(
|
||||
parse(options).document().write(preprocess()(value, encoding, true))
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
module.exports = buffer
|
||||
69
node_modules/micromark/dist/initialize/content.js
generated
vendored
Normal file
69
node_modules/micromark/dist/initialize/content.js
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
'use strict'
|
||||
|
||||
Object.defineProperty(exports, '__esModule', {value: true})
|
||||
|
||||
var markdownLineEnding = require('../character/markdown-line-ending.js')
|
||||
var factorySpace = require('../tokenize/factory-space.js')
|
||||
|
||||
var tokenize = initializeContent
|
||||
|
||||
function initializeContent(effects) {
|
||||
var contentStart = effects.attempt(
|
||||
this.parser.constructs.contentInitial,
|
||||
afterContentStartConstruct,
|
||||
paragraphInitial
|
||||
)
|
||||
var previous
|
||||
return contentStart
|
||||
|
||||
function afterContentStartConstruct(code) {
|
||||
if (code === null) {
|
||||
effects.consume(code)
|
||||
return
|
||||
}
|
||||
|
||||
effects.enter('lineEnding')
|
||||
effects.consume(code)
|
||||
effects.exit('lineEnding')
|
||||
return factorySpace(effects, contentStart, 'linePrefix')
|
||||
}
|
||||
|
||||
function paragraphInitial(code) {
|
||||
effects.enter('paragraph')
|
||||
return lineStart(code)
|
||||
}
|
||||
|
||||
function lineStart(code) {
|
||||
var token = effects.enter('chunkText', {
|
||||
contentType: 'text',
|
||||
previous: previous
|
||||
})
|
||||
|
||||
if (previous) {
|
||||
previous.next = token
|
||||
}
|
||||
|
||||
previous = token
|
||||
return data(code)
|
||||
}
|
||||
|
||||
function data(code) {
|
||||
if (code === null) {
|
||||
effects.exit('chunkText')
|
||||
effects.exit('paragraph')
|
||||
effects.consume(code)
|
||||
return
|
||||
}
|
||||
|
||||
if (markdownLineEnding(code)) {
|
||||
effects.consume(code)
|
||||
effects.exit('chunkText')
|
||||
return lineStart
|
||||
} // Data.
|
||||
|
||||
effects.consume(code)
|
||||
return data
|
||||
}
|
||||
}
|
||||
|
||||
exports.tokenize = tokenize
|
||||
237
node_modules/micromark/dist/initialize/document.js
generated
vendored
Normal file
237
node_modules/micromark/dist/initialize/document.js
generated
vendored
Normal file
@@ -0,0 +1,237 @@
|
||||
'use strict'
|
||||
|
||||
Object.defineProperty(exports, '__esModule', {value: true})
|
||||
|
||||
var markdownLineEnding = require('../character/markdown-line-ending.js')
|
||||
var factorySpace = require('../tokenize/factory-space.js')
|
||||
var partialBlankLine = require('../tokenize/partial-blank-line.js')
|
||||
|
||||
var tokenize = initializeDocument
|
||||
var containerConstruct = {
|
||||
tokenize: tokenizeContainer
|
||||
}
|
||||
var lazyFlowConstruct = {
|
||||
tokenize: tokenizeLazyFlow
|
||||
}
|
||||
|
||||
function initializeDocument(effects) {
|
||||
var self = this
|
||||
var stack = []
|
||||
var continued = 0
|
||||
var inspectConstruct = {
|
||||
tokenize: tokenizeInspect,
|
||||
partial: true
|
||||
}
|
||||
var inspectResult
|
||||
var childFlow
|
||||
var childToken
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
if (continued < stack.length) {
|
||||
self.containerState = stack[continued][1]
|
||||
return effects.attempt(
|
||||
stack[continued][0].continuation,
|
||||
documentContinue,
|
||||
documentContinued
|
||||
)(code)
|
||||
}
|
||||
|
||||
return documentContinued(code)
|
||||
}
|
||||
|
||||
function documentContinue(code) {
|
||||
continued++
|
||||
return start(code)
|
||||
}
|
||||
|
||||
function documentContinued(code) {
|
||||
// If we’re in a concrete construct (such as when expecting another line of
|
||||
// HTML, or we resulted in lazy content), we can immediately start flow.
|
||||
if (inspectResult && inspectResult.flowContinue) {
|
||||
return flowStart(code)
|
||||
}
|
||||
|
||||
self.interrupt =
|
||||
childFlow &&
|
||||
childFlow.currentConstruct &&
|
||||
childFlow.currentConstruct.interruptible
|
||||
self.containerState = {}
|
||||
return effects.attempt(
|
||||
containerConstruct,
|
||||
containerContinue,
|
||||
flowStart
|
||||
)(code)
|
||||
}
|
||||
|
||||
function containerContinue(code) {
|
||||
stack.push([self.currentConstruct, self.containerState])
|
||||
self.containerState = undefined
|
||||
return documentContinued(code)
|
||||
}
|
||||
|
||||
function flowStart(code) {
|
||||
if (code === null) {
|
||||
exitContainers(0, true)
|
||||
effects.consume(code)
|
||||
return
|
||||
}
|
||||
|
||||
childFlow = childFlow || self.parser.flow(self.now())
|
||||
effects.enter('chunkFlow', {
|
||||
contentType: 'flow',
|
||||
previous: childToken,
|
||||
_tokenizer: childFlow
|
||||
})
|
||||
return flowContinue(code)
|
||||
}
|
||||
|
||||
function flowContinue(code) {
|
||||
if (code === null) {
|
||||
continueFlow(effects.exit('chunkFlow'))
|
||||
return flowStart(code)
|
||||
}
|
||||
|
||||
if (markdownLineEnding(code)) {
|
||||
effects.consume(code)
|
||||
continueFlow(effects.exit('chunkFlow'))
|
||||
return effects.check(inspectConstruct, documentAfterPeek)
|
||||
}
|
||||
|
||||
effects.consume(code)
|
||||
return flowContinue
|
||||
}
|
||||
|
||||
function documentAfterPeek(code) {
|
||||
exitContainers(
|
||||
inspectResult.continued,
|
||||
inspectResult && inspectResult.flowEnd
|
||||
)
|
||||
continued = 0
|
||||
return start(code)
|
||||
}
|
||||
|
||||
function continueFlow(token) {
|
||||
if (childToken) childToken.next = token
|
||||
childToken = token
|
||||
childFlow.lazy = inspectResult && inspectResult.lazy
|
||||
childFlow.defineSkip(token.start)
|
||||
childFlow.write(self.sliceStream(token))
|
||||
}
|
||||
|
||||
function exitContainers(size, end) {
|
||||
var index = stack.length // Close the flow.
|
||||
|
||||
if (childFlow && end) {
|
||||
childFlow.write([null])
|
||||
childToken = childFlow = undefined
|
||||
} // Exit open containers.
|
||||
|
||||
while (index-- > size) {
|
||||
self.containerState = stack[index][1]
|
||||
stack[index][0].exit.call(self, effects)
|
||||
}
|
||||
|
||||
stack.length = size
|
||||
}
|
||||
|
||||
function tokenizeInspect(effects, ok) {
|
||||
var subcontinued = 0
|
||||
inspectResult = {}
|
||||
return inspectStart
|
||||
|
||||
function inspectStart(code) {
|
||||
if (subcontinued < stack.length) {
|
||||
self.containerState = stack[subcontinued][1]
|
||||
return effects.attempt(
|
||||
stack[subcontinued][0].continuation,
|
||||
inspectContinue,
|
||||
inspectLess
|
||||
)(code)
|
||||
} // If we’re continued but in a concrete flow, we can’t have more
|
||||
// containers.
|
||||
|
||||
if (childFlow.currentConstruct && childFlow.currentConstruct.concrete) {
|
||||
inspectResult.flowContinue = true
|
||||
return inspectDone(code)
|
||||
}
|
||||
|
||||
self.interrupt =
|
||||
childFlow.currentConstruct && childFlow.currentConstruct.interruptible
|
||||
self.containerState = {}
|
||||
return effects.attempt(
|
||||
containerConstruct,
|
||||
inspectFlowEnd,
|
||||
inspectDone
|
||||
)(code)
|
||||
}
|
||||
|
||||
function inspectContinue(code) {
|
||||
subcontinued++
|
||||
return self.containerState._closeFlow
|
||||
? inspectFlowEnd(code)
|
||||
: inspectStart(code)
|
||||
}
|
||||
|
||||
function inspectLess(code) {
|
||||
if (childFlow.currentConstruct && childFlow.currentConstruct.lazy) {
|
||||
// Maybe another container?
|
||||
self.containerState = {}
|
||||
return effects.attempt(
|
||||
containerConstruct,
|
||||
inspectFlowEnd, // Maybe flow, or a blank line?
|
||||
effects.attempt(
|
||||
lazyFlowConstruct,
|
||||
inspectFlowEnd,
|
||||
effects.check(partialBlankLine, inspectFlowEnd, inspectLazy)
|
||||
)
|
||||
)(code)
|
||||
} // Otherwise we’re interrupting.
|
||||
|
||||
return inspectFlowEnd(code)
|
||||
}
|
||||
|
||||
function inspectLazy(code) {
|
||||
// Act as if all containers are continued.
|
||||
subcontinued = stack.length
|
||||
inspectResult.lazy = true
|
||||
inspectResult.flowContinue = true
|
||||
return inspectDone(code)
|
||||
} // We’re done with flow if we have more containers, or an interruption.
|
||||
|
||||
function inspectFlowEnd(code) {
|
||||
inspectResult.flowEnd = true
|
||||
return inspectDone(code)
|
||||
}
|
||||
|
||||
function inspectDone(code) {
|
||||
inspectResult.continued = subcontinued
|
||||
self.interrupt = self.containerState = undefined
|
||||
return ok(code)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function tokenizeContainer(effects, ok, nok) {
|
||||
return factorySpace(
|
||||
effects,
|
||||
effects.attempt(this.parser.constructs.document, ok, nok),
|
||||
'linePrefix',
|
||||
this.parser.constructs.disable.null.indexOf('codeIndented') > -1
|
||||
? undefined
|
||||
: 4
|
||||
)
|
||||
}
|
||||
|
||||
function tokenizeLazyFlow(effects, ok, nok) {
|
||||
return factorySpace(
|
||||
effects,
|
||||
effects.lazy(this.parser.constructs.flow, ok, nok),
|
||||
'linePrefix',
|
||||
this.parser.constructs.disable.null.indexOf('codeIndented') > -1
|
||||
? undefined
|
||||
: 4
|
||||
)
|
||||
}
|
||||
|
||||
exports.tokenize = tokenize
|
||||
60
node_modules/micromark/dist/initialize/flow.js
generated
vendored
Normal file
60
node_modules/micromark/dist/initialize/flow.js
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
'use strict'
|
||||
|
||||
Object.defineProperty(exports, '__esModule', {value: true})
|
||||
|
||||
var content = require('../tokenize/content.js')
|
||||
var factorySpace = require('../tokenize/factory-space.js')
|
||||
var partialBlankLine = require('../tokenize/partial-blank-line.js')
|
||||
|
||||
var tokenize = initializeFlow
|
||||
|
||||
function initializeFlow(effects) {
|
||||
var self = this
|
||||
var initial = effects.attempt(
|
||||
// Try to parse a blank line.
|
||||
partialBlankLine,
|
||||
atBlankEnding, // Try to parse initial flow (essentially, only code).
|
||||
effects.attempt(
|
||||
this.parser.constructs.flowInitial,
|
||||
afterConstruct,
|
||||
factorySpace(
|
||||
effects,
|
||||
effects.attempt(
|
||||
this.parser.constructs.flow,
|
||||
afterConstruct,
|
||||
effects.attempt(content, afterConstruct)
|
||||
),
|
||||
'linePrefix'
|
||||
)
|
||||
)
|
||||
)
|
||||
return initial
|
||||
|
||||
function atBlankEnding(code) {
|
||||
if (code === null) {
|
||||
effects.consume(code)
|
||||
return
|
||||
}
|
||||
|
||||
effects.enter('lineEndingBlank')
|
||||
effects.consume(code)
|
||||
effects.exit('lineEndingBlank')
|
||||
self.currentConstruct = undefined
|
||||
return initial
|
||||
}
|
||||
|
||||
function afterConstruct(code) {
|
||||
if (code === null) {
|
||||
effects.consume(code)
|
||||
return
|
||||
}
|
||||
|
||||
effects.enter('lineEnding')
|
||||
effects.consume(code)
|
||||
effects.exit('lineEnding')
|
||||
self.currentConstruct = undefined
|
||||
return initial
|
||||
}
|
||||
}
|
||||
|
||||
exports.tokenize = tokenize
|
||||
201
node_modules/micromark/dist/initialize/text.js
generated
vendored
Normal file
201
node_modules/micromark/dist/initialize/text.js
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
'use strict'
|
||||
|
||||
Object.defineProperty(exports, '__esModule', {value: true})
|
||||
|
||||
var assign = require('../constant/assign.js')
|
||||
var shallow = require('../util/shallow.js')
|
||||
|
||||
var text = initializeFactory('text')
|
||||
var string = initializeFactory('string')
|
||||
var resolver = {
|
||||
resolveAll: createResolver()
|
||||
}
|
||||
|
||||
function initializeFactory(field) {
|
||||
return {
|
||||
tokenize: initializeText,
|
||||
resolveAll: createResolver(
|
||||
field === 'text' ? resolveAllLineSuffixes : undefined
|
||||
)
|
||||
}
|
||||
|
||||
function initializeText(effects) {
|
||||
var self = this
|
||||
var constructs = this.parser.constructs[field]
|
||||
var text = effects.attempt(constructs, start, notText)
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
return atBreak(code) ? text(code) : notText(code)
|
||||
}
|
||||
|
||||
function notText(code) {
|
||||
if (code === null) {
|
||||
effects.consume(code)
|
||||
return
|
||||
}
|
||||
|
||||
effects.enter('data')
|
||||
effects.consume(code)
|
||||
return data
|
||||
}
|
||||
|
||||
function data(code) {
|
||||
if (atBreak(code)) {
|
||||
effects.exit('data')
|
||||
return text(code)
|
||||
} // Data.
|
||||
|
||||
effects.consume(code)
|
||||
return data
|
||||
}
|
||||
|
||||
function atBreak(code) {
|
||||
var list = constructs[code]
|
||||
var index = -1
|
||||
|
||||
if (code === null) {
|
||||
return true
|
||||
}
|
||||
|
||||
if (list) {
|
||||
while (++index < list.length) {
|
||||
if (
|
||||
!list[index].previous ||
|
||||
list[index].previous.call(self, self.previous)
|
||||
) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function createResolver(extraResolver) {
|
||||
return resolveAllText
|
||||
|
||||
function resolveAllText(events, context) {
|
||||
var index = -1
|
||||
var enter // A rather boring computation (to merge adjacent `data` events) which
|
||||
// improves mm performance by 29%.
|
||||
|
||||
while (++index <= events.length) {
|
||||
if (enter === undefined) {
|
||||
if (events[index] && events[index][1].type === 'data') {
|
||||
enter = index
|
||||
index++
|
||||
}
|
||||
} else if (!events[index] || events[index][1].type !== 'data') {
|
||||
// Don’t do anything if there is one data token.
|
||||
if (index !== enter + 2) {
|
||||
events[enter][1].end = events[index - 1][1].end
|
||||
events.splice(enter + 2, index - enter - 2)
|
||||
index = enter + 2
|
||||
}
|
||||
|
||||
enter = undefined
|
||||
}
|
||||
}
|
||||
|
||||
return extraResolver ? extraResolver(events, context) : events
|
||||
}
|
||||
} // A rather ugly set of instructions which again looks at chunks in the input
|
||||
// stream.
|
||||
// The reason to do this here is that it is *much* faster to parse in reverse.
|
||||
// And that we can’t hook into `null` to split the line suffix before an EOF.
|
||||
// To do: figure out if we can make this into a clean utility, or even in core.
|
||||
// As it will be useful for GFMs literal autolink extension (and maybe even
|
||||
// tables?)
|
||||
|
||||
function resolveAllLineSuffixes(events, context) {
|
||||
var eventIndex = -1
|
||||
var chunks
|
||||
var data
|
||||
var chunk
|
||||
var index
|
||||
var bufferIndex
|
||||
var size
|
||||
var tabs
|
||||
var token
|
||||
|
||||
while (++eventIndex <= events.length) {
|
||||
if (
|
||||
(eventIndex === events.length ||
|
||||
events[eventIndex][1].type === 'lineEnding') &&
|
||||
events[eventIndex - 1][1].type === 'data'
|
||||
) {
|
||||
data = events[eventIndex - 1][1]
|
||||
chunks = context.sliceStream(data)
|
||||
index = chunks.length
|
||||
bufferIndex = -1
|
||||
size = 0
|
||||
tabs = undefined
|
||||
|
||||
while (index--) {
|
||||
chunk = chunks[index]
|
||||
|
||||
if (typeof chunk === 'string') {
|
||||
bufferIndex = chunk.length
|
||||
|
||||
while (chunk.charCodeAt(bufferIndex - 1) === 32) {
|
||||
size++
|
||||
bufferIndex--
|
||||
}
|
||||
|
||||
if (bufferIndex) break
|
||||
bufferIndex = -1
|
||||
} // Number
|
||||
else if (chunk === -2) {
|
||||
tabs = true
|
||||
size++
|
||||
} else if (chunk === -1);
|
||||
else {
|
||||
// Replacement character, exit.
|
||||
index++
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if (size) {
|
||||
token = {
|
||||
type:
|
||||
eventIndex === events.length || tabs || size < 2
|
||||
? 'lineSuffix'
|
||||
: 'hardBreakTrailing',
|
||||
start: {
|
||||
line: data.end.line,
|
||||
column: data.end.column - size,
|
||||
offset: data.end.offset - size,
|
||||
_index: data.start._index + index,
|
||||
_bufferIndex: index
|
||||
? bufferIndex
|
||||
: data.start._bufferIndex + bufferIndex
|
||||
},
|
||||
end: shallow(data.end)
|
||||
}
|
||||
data.end = shallow(token.start)
|
||||
|
||||
if (data.start.offset === data.end.offset) {
|
||||
assign(data, token)
|
||||
} else {
|
||||
events.splice(
|
||||
eventIndex,
|
||||
0,
|
||||
['enter', token, context],
|
||||
['exit', token, context]
|
||||
)
|
||||
eventIndex += 2
|
||||
}
|
||||
}
|
||||
|
||||
eventIndex++
|
||||
}
|
||||
}
|
||||
|
||||
return events
|
||||
}
|
||||
|
||||
exports.resolver = resolver
|
||||
exports.string = string
|
||||
exports.text = text
|
||||
5
node_modules/micromark/dist/parse.d.ts
generated
vendored
Normal file
5
node_modules/micromark/dist/parse.d.ts
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
import {ParseOptions, Parser} from './shared-types'
|
||||
|
||||
declare function createParser(options?: ParseOptions): Parser
|
||||
|
||||
export default createParser
|
||||
36
node_modules/micromark/dist/parse.js
generated
vendored
Normal file
36
node_modules/micromark/dist/parse.js
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
'use strict'
|
||||
|
||||
var content = require('./initialize/content.js')
|
||||
var document = require('./initialize/document.js')
|
||||
var flow = require('./initialize/flow.js')
|
||||
var text = require('./initialize/text.js')
|
||||
var combineExtensions = require('./util/combine-extensions.js')
|
||||
var createTokenizer = require('./util/create-tokenizer.js')
|
||||
var miniflat = require('./util/miniflat.js')
|
||||
var constructs = require('./constructs.js')
|
||||
|
||||
function parse(options) {
|
||||
var settings = options || {}
|
||||
var parser = {
|
||||
defined: [],
|
||||
constructs: combineExtensions(
|
||||
[constructs].concat(miniflat(settings.extensions))
|
||||
),
|
||||
content: create(content),
|
||||
document: create(document),
|
||||
flow: create(flow),
|
||||
string: create(text.string),
|
||||
text: create(text.text)
|
||||
}
|
||||
return parser
|
||||
|
||||
function create(initializer) {
|
||||
return creator
|
||||
|
||||
function creator(from) {
|
||||
return createTokenizer(parser, initializer, from)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = parse
|
||||
5
node_modules/micromark/dist/postprocess.d.ts
generated
vendored
Normal file
5
node_modules/micromark/dist/postprocess.d.ts
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
import {Event} from './shared-types'
|
||||
|
||||
declare function postprocess(events: Event[]): Event[]
|
||||
|
||||
export default postprocess
|
||||
13
node_modules/micromark/dist/postprocess.js
generated
vendored
Normal file
13
node_modules/micromark/dist/postprocess.js
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
'use strict'
|
||||
|
||||
var subtokenize = require('./util/subtokenize.js')
|
||||
|
||||
function postprocess(events) {
|
||||
while (!subtokenize(events)) {
|
||||
// Empty
|
||||
}
|
||||
|
||||
return events
|
||||
}
|
||||
|
||||
module.exports = postprocess
|
||||
11
node_modules/micromark/dist/preprocess.d.ts
generated
vendored
Normal file
11
node_modules/micromark/dist/preprocess.d.ts
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
import {BufferEncoding} from './shared-types'
|
||||
|
||||
type PreprocessReturn = (
|
||||
value: string,
|
||||
encoding: BufferEncoding,
|
||||
end?: boolean
|
||||
) => string[]
|
||||
|
||||
declare function preprocess(): PreprocessReturn
|
||||
|
||||
export default preprocess
|
||||
87
node_modules/micromark/dist/preprocess.js
generated
vendored
Normal file
87
node_modules/micromark/dist/preprocess.js
generated
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
'use strict'
|
||||
|
||||
var search = /[\0\t\n\r]/g
|
||||
|
||||
function preprocess() {
|
||||
var start = true
|
||||
var column = 1
|
||||
var buffer = ''
|
||||
var atCarriageReturn
|
||||
return preprocessor
|
||||
|
||||
function preprocessor(value, encoding, end) {
|
||||
var chunks = []
|
||||
var match
|
||||
var next
|
||||
var startPosition
|
||||
var endPosition
|
||||
var code
|
||||
value = buffer + value.toString(encoding)
|
||||
startPosition = 0
|
||||
buffer = ''
|
||||
|
||||
if (start) {
|
||||
if (value.charCodeAt(0) === 65279) {
|
||||
startPosition++
|
||||
}
|
||||
|
||||
start = undefined
|
||||
}
|
||||
|
||||
while (startPosition < value.length) {
|
||||
search.lastIndex = startPosition
|
||||
match = search.exec(value)
|
||||
endPosition = match ? match.index : value.length
|
||||
code = value.charCodeAt(endPosition)
|
||||
|
||||
if (!match) {
|
||||
buffer = value.slice(startPosition)
|
||||
break
|
||||
}
|
||||
|
||||
if (code === 10 && startPosition === endPosition && atCarriageReturn) {
|
||||
chunks.push(-3)
|
||||
atCarriageReturn = undefined
|
||||
} else {
|
||||
if (atCarriageReturn) {
|
||||
chunks.push(-5)
|
||||
atCarriageReturn = undefined
|
||||
}
|
||||
|
||||
if (startPosition < endPosition) {
|
||||
chunks.push(value.slice(startPosition, endPosition))
|
||||
column += endPosition - startPosition
|
||||
}
|
||||
|
||||
if (code === 0) {
|
||||
chunks.push(65533)
|
||||
column++
|
||||
} else if (code === 9) {
|
||||
next = Math.ceil(column / 4) * 4
|
||||
chunks.push(-2)
|
||||
|
||||
while (column++ < next) chunks.push(-1)
|
||||
} else if (code === 10) {
|
||||
chunks.push(-4)
|
||||
column = 1
|
||||
} // Must be carriage return.
|
||||
else {
|
||||
atCarriageReturn = true
|
||||
column = 1
|
||||
}
|
||||
}
|
||||
|
||||
startPosition = endPosition + 1
|
||||
}
|
||||
|
||||
if (end) {
|
||||
if (atCarriageReturn) chunks.push(-5)
|
||||
if (buffer) chunks.push(buffer)
|
||||
chunks.push(null)
|
||||
}
|
||||
|
||||
return chunks
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = preprocess
|
||||
291
node_modules/micromark/dist/shared-types.d.ts
generated
vendored
Normal file
291
node_modules/micromark/dist/shared-types.d.ts
generated
vendored
Normal file
@@ -0,0 +1,291 @@
|
||||
// Minimum TypeScript Version: 3.0
|
||||
|
||||
import {Code} from './character/codes'
|
||||
import {Type} from './constant/types'
|
||||
|
||||
/**
|
||||
* A location in a string or buffer
|
||||
*/
|
||||
export interface Point {
|
||||
line: number
|
||||
column: number
|
||||
offset: number
|
||||
_index?: number
|
||||
_bufferIndex?: number
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
export interface Token {
|
||||
type: Type
|
||||
start: Point
|
||||
end: Point
|
||||
|
||||
previous?: Token
|
||||
next?: Token
|
||||
|
||||
/**
|
||||
* Declares a token as having content of a certain type.
|
||||
* Because markdown requires to first parse containers, flow, content completely,
|
||||
* and then later go on to phrasing and such, it needs to be declared somewhere on the tokens.
|
||||
*/
|
||||
contentType?: 'flow' | 'content' | 'string' | 'text'
|
||||
|
||||
/**
|
||||
* Used when dealing with linked tokens. A child tokenizer is needed to tokenize them, which is stored on those tokens
|
||||
*/
|
||||
_tokenizer?: Tokenizer
|
||||
|
||||
/**
|
||||
* Close and open are also used in attention:
|
||||
* depending on the characters before and after sequences (**),
|
||||
* the sequence can open, close, both, or none
|
||||
*/
|
||||
_open?: boolean
|
||||
|
||||
/**
|
||||
* Close and open are also used in attention:
|
||||
* depending on the characters before and after sequences (**),
|
||||
* the sequence can open, close, both, or none
|
||||
*/
|
||||
_close?: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
export type Event = [string, Token, Tokenizer]
|
||||
|
||||
/**
|
||||
* These these are transitions to update the CommonMark State Machine (CSMS)
|
||||
*/
|
||||
export interface Effects {
|
||||
/**
|
||||
* Enter and exit define where tokens start and end
|
||||
*/
|
||||
enter: (type: Type) => Token
|
||||
|
||||
/**
|
||||
* Enter and exit define where tokens start and end
|
||||
*/
|
||||
exit: (type: Type) => Token
|
||||
|
||||
/**
|
||||
* Consume deals with a character, and moves to the next
|
||||
*/
|
||||
consume: (code: number) => void
|
||||
|
||||
/**
|
||||
* Attempt deals with several values, and tries to parse according to those values.
|
||||
* If a value resulted in `ok`, it worked, the tokens that were made are used,
|
||||
* and `returnState` is switched to.
|
||||
* If the result is `nok`, the attempt failed,
|
||||
* so we revert to the original state, and `bogusState` is used.
|
||||
*/
|
||||
attempt: (
|
||||
constructInfo:
|
||||
| Construct
|
||||
| Construct[]
|
||||
| Record<CodeAsKey, Construct | Construct[]>,
|
||||
returnState: State,
|
||||
bogusState?: State
|
||||
) => (code: Code) => void
|
||||
|
||||
/**
|
||||
* Interrupt is used for stuff right after a line of content.
|
||||
*/
|
||||
interrupt: (
|
||||
constructInfo:
|
||||
| Construct
|
||||
| Construct[]
|
||||
| Record<CodeAsKey, Construct | Construct[]>,
|
||||
ok: Okay,
|
||||
nok?: NotOkay
|
||||
) => (code: Code) => void
|
||||
|
||||
check: (
|
||||
constructInfo:
|
||||
| Construct
|
||||
| Construct[]
|
||||
| Record<CodeAsKey, Construct | Construct[]>,
|
||||
ok: Okay,
|
||||
nok?: NotOkay
|
||||
) => (code: Code) => void
|
||||
|
||||
/**
|
||||
* Lazy is used for lines that were not properly preceded by the container.
|
||||
*/
|
||||
lazy: (
|
||||
constructInfo:
|
||||
| Construct
|
||||
| Construct[]
|
||||
| Record<CodeAsKey, Construct | Construct[]>,
|
||||
ok: Okay,
|
||||
nok?: NotOkay
|
||||
) => void
|
||||
}
|
||||
|
||||
/**
|
||||
* A state function should return another function: the next state-as-a-function to go to.
|
||||
*
|
||||
* But there is one case where they return void: for the eof character code (at the end of a value)
|
||||
* The reason being: well, there isn’t any state that makes sense, so void works well. Practically
|
||||
* that has also helped: if for some reason it was a mistake, then an exception is throw because
|
||||
* there is no next function, meaning it surfaces early.
|
||||
*/
|
||||
export type State = (code: number) => State | void
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
export type Okay = State
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
export type NotOkay = State
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
export interface Tokenizer {
|
||||
previous: Code
|
||||
events: Event[]
|
||||
parser: Parser
|
||||
sliceStream: (token: Token) => Chunk[]
|
||||
sliceSerialize: (token: Token) => string
|
||||
now: () => Point
|
||||
defineSkip: (value: Point) => void
|
||||
write: (slice: Chunk[]) => Event[]
|
||||
}
|
||||
|
||||
export type Resolve = (events: Event[], context: Tokenizer) => Event[]
|
||||
|
||||
export type Tokenize = (context: Tokenizer, effects: Effects) => State
|
||||
|
||||
export interface Construct {
|
||||
name?: string
|
||||
tokenize: Tokenize
|
||||
partial?: boolean
|
||||
resolve?: Resolve
|
||||
resolveTo?: Resolve
|
||||
resolveAll?: Resolve
|
||||
concrete?: boolean
|
||||
interruptible?: boolean
|
||||
lazy?: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
export interface Parser {
|
||||
constructs: Record<CodeAsKey, Construct | Construct[]>
|
||||
content: (from: Point) => Tokenizer
|
||||
document: (from: Point) => Tokenizer
|
||||
flow: (from: Point) => Tokenizer
|
||||
string: (from: Point) => Tokenizer
|
||||
text: (from: Point) => Tokenizer
|
||||
defined: string[]
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
export interface TokenizerThis {
|
||||
events: Event[]
|
||||
interrupt?: boolean
|
||||
lazy?: boolean
|
||||
containerState?: Record<string, unknown>
|
||||
}
|
||||
|
||||
/**
|
||||
* `Compile` is the return value of `lib/compile/html.js`
|
||||
*/
|
||||
export type Compile = (slice: Event[]) => string
|
||||
|
||||
/**
|
||||
* https://github.com/micromark/micromark#syntaxextension
|
||||
*/
|
||||
export interface SyntaxExtension {
|
||||
document?: Record<CodeAsKey, Construct | Construct[]>
|
||||
contentInitial?: Record<CodeAsKey, Construct | Construct[]>
|
||||
flowInitial?: Record<CodeAsKey, Construct | Construct[]>
|
||||
flow?: Record<CodeAsKey, Construct | Construct[]>
|
||||
string?: Record<CodeAsKey, Construct | Construct[]>
|
||||
text?: Record<CodeAsKey, Construct | Construct[]>
|
||||
}
|
||||
|
||||
/**
|
||||
* https://github.com/micromark/micromark#htmlextension
|
||||
*/
|
||||
export type HtmlExtension =
|
||||
| {enter: Record<Type, () => void>}
|
||||
| {exit: Record<Type, () => void>}
|
||||
|
||||
export type Options = ParseOptions & CompileOptions
|
||||
|
||||
export interface ParseOptions {
|
||||
// Array of syntax extensions
|
||||
//
|
||||
extensions?: SyntaxExtension[]
|
||||
}
|
||||
|
||||
export interface CompileOptions {
|
||||
// Value to use for line endings not in `doc` (`string`, default: first line
|
||||
// ending or `'\n'`).
|
||||
//
|
||||
// Generally, micromark copies line endings (`'\r'`, `'\n'`, `'\r\n'`) in the
|
||||
// markdown document over to the compiled HTML.
|
||||
// In some cases, such as `> a`, CommonMark requires that extra line endings are
|
||||
// added: `<blockquote>\n<p>a</p>\n</blockquote>`.
|
||||
//
|
||||
defaultLineEnding?: '\r' | '\n' | '\r\n'
|
||||
// Whether to allow embedded HTML (`boolean`, default: `false`).
|
||||
//
|
||||
allowDangerousHtml?: boolean
|
||||
// Whether to allow potentially dangerous protocols in links and images (`boolean`,
|
||||
// default: `false`).
|
||||
// URLs relative to the current protocol are always allowed (such as, `image.jpg`).
|
||||
// For links, the allowed protocols are `http`, `https`, `irc`, `ircs`, `mailto`,
|
||||
// and `xmpp`.
|
||||
// For images, the allowed protocols are `http` and `https`.
|
||||
//
|
||||
allowDangerousProtocol?: boolean
|
||||
// Array of HTML extensions
|
||||
//
|
||||
htmlExtensions?: HtmlExtension[]
|
||||
}
|
||||
|
||||
export type Chunk = NonNullable<Code> | string
|
||||
|
||||
// TypeScript will complain that `null` can't be the key of an object. So when a `Code` value is a key of an object, use CodeAsKey instead.
|
||||
export type CodeAsKey = NonNullable<Code> | 'null'
|
||||
|
||||
/**
|
||||
* Encodings supported by the buffer class
|
||||
*
|
||||
* @remarks
|
||||
* This is a copy of the typing from Node, copied to prevent Node globals from being needed.
|
||||
* Copied from https://github.com/DefinitelyTyped/DefinitelyTyped/blob/a2bc1d868d81733a8969236655fa600bd3651a7b/types/node/globals.d.ts#L174
|
||||
*/
|
||||
export type BufferEncoding =
|
||||
| 'ascii'
|
||||
| 'utf8'
|
||||
| 'utf-8'
|
||||
| 'utf16le'
|
||||
| 'ucs2'
|
||||
| 'ucs-2'
|
||||
| 'base64'
|
||||
| 'latin1'
|
||||
| 'binary'
|
||||
| 'hex'
|
||||
|
||||
/**
|
||||
* This is an interface for Node's Buffer.
|
||||
*/
|
||||
export interface Buffer {
|
||||
toString: (encoding?: BufferEncoding) => string
|
||||
}
|
||||
|
||||
export type CodeCheck = (code: Code) => boolean
|
||||
6
node_modules/micromark/dist/stream.d.ts
generated
vendored
Normal file
6
node_modules/micromark/dist/stream.d.ts
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
import {EventEmitter} from 'events'
|
||||
import {Options} from './shared-types'
|
||||
|
||||
declare function stream(options?: Options): EventEmitter
|
||||
|
||||
export default stream
|
||||
103
node_modules/micromark/dist/stream.js
generated
vendored
Normal file
103
node_modules/micromark/dist/stream.js
generated
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
'use strict'
|
||||
|
||||
var events = require('events')
|
||||
var html = require('./compile/html.js')
|
||||
var parse = require('./parse.js')
|
||||
var postprocess = require('./postprocess.js')
|
||||
var preprocess = require('./preprocess.js')
|
||||
|
||||
function stream(options) {
|
||||
var preprocess$1 = preprocess()
|
||||
var tokenize = parse(options).document().write
|
||||
var compile = html(options)
|
||||
var emitter = new events.EventEmitter()
|
||||
var ended
|
||||
emitter.writable = emitter.readable = true
|
||||
emitter.write = write
|
||||
emitter.end = end
|
||||
emitter.pipe = pipe
|
||||
return emitter // Write a chunk into memory.
|
||||
|
||||
function write(chunk, encoding, callback) {
|
||||
if (typeof encoding === 'function') {
|
||||
callback = encoding
|
||||
encoding = undefined
|
||||
}
|
||||
|
||||
if (ended) {
|
||||
throw new Error('Did not expect `write` after `end`')
|
||||
}
|
||||
|
||||
tokenize(preprocess$1(chunk || '', encoding))
|
||||
|
||||
if (callback) {
|
||||
callback()
|
||||
} // Signal succesful write.
|
||||
|
||||
return true
|
||||
} // End the writing.
|
||||
// Passes all arguments to a final `write`.
|
||||
|
||||
function end(chunk, encoding, callback) {
|
||||
write(chunk, encoding, callback)
|
||||
emitter.emit(
|
||||
'data',
|
||||
compile(postprocess(tokenize(preprocess$1('', encoding, true))))
|
||||
)
|
||||
emitter.emit('end')
|
||||
ended = true
|
||||
return true
|
||||
} // Pipe the processor into a writable stream.
|
||||
// Basically `Stream#pipe`, but inlined and simplified to keep the bundled
|
||||
// size down.
|
||||
// See: <https://github.com/nodejs/node/blob/43a5170/lib/internal/streams/legacy.js#L13>.
|
||||
|
||||
function pipe(dest, options) {
|
||||
emitter.on('data', ondata)
|
||||
emitter.on('error', onerror)
|
||||
emitter.on('end', cleanup)
|
||||
emitter.on('close', cleanup) // If the `end` option is not supplied, `dest.end()` will be called when the
|
||||
// `end` or `close` events are received.
|
||||
|
||||
if (!dest._isStdio && (!options || options.end !== false)) {
|
||||
emitter.on('end', onend)
|
||||
}
|
||||
|
||||
dest.on('error', onerror)
|
||||
dest.on('close', cleanup)
|
||||
dest.emit('pipe', emitter)
|
||||
return dest // End destination.
|
||||
|
||||
function onend() {
|
||||
if (dest.end) {
|
||||
dest.end()
|
||||
}
|
||||
} // Handle data.
|
||||
|
||||
function ondata(chunk) {
|
||||
if (dest.writable) {
|
||||
dest.write(chunk)
|
||||
}
|
||||
} // Clean listeners.
|
||||
|
||||
function cleanup() {
|
||||
emitter.removeListener('data', ondata)
|
||||
emitter.removeListener('end', onend)
|
||||
emitter.removeListener('error', onerror)
|
||||
emitter.removeListener('end', cleanup)
|
||||
emitter.removeListener('close', cleanup)
|
||||
dest.removeListener('error', onerror)
|
||||
dest.removeListener('close', cleanup)
|
||||
} // Close dangling pipes and handle unheard errors.
|
||||
|
||||
function onerror(error) {
|
||||
cleanup()
|
||||
|
||||
if (!emitter.listenerCount('error')) {
|
||||
throw error // Unhandled stream error in pipe.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = stream
|
||||
186
node_modules/micromark/dist/tokenize/attention.js
generated
vendored
Normal file
186
node_modules/micromark/dist/tokenize/attention.js
generated
vendored
Normal file
@@ -0,0 +1,186 @@
|
||||
'use strict'
|
||||
|
||||
var chunkedPush = require('../util/chunked-push.js')
|
||||
var chunkedSplice = require('../util/chunked-splice.js')
|
||||
var classifyCharacter = require('../util/classify-character.js')
|
||||
var movePoint = require('../util/move-point.js')
|
||||
var resolveAll = require('../util/resolve-all.js')
|
||||
var shallow = require('../util/shallow.js')
|
||||
|
||||
var attention = {
|
||||
name: 'attention',
|
||||
tokenize: tokenizeAttention,
|
||||
resolveAll: resolveAllAttention
|
||||
}
|
||||
|
||||
function resolveAllAttention(events, context) {
|
||||
var index = -1
|
||||
var open
|
||||
var group
|
||||
var text
|
||||
var openingSequence
|
||||
var closingSequence
|
||||
var use
|
||||
var nextEvents
|
||||
var offset // Walk through all events.
|
||||
//
|
||||
// Note: performance of this is fine on an mb of normal markdown, but it’s
|
||||
// a bottleneck for malicious stuff.
|
||||
|
||||
while (++index < events.length) {
|
||||
// Find a token that can close.
|
||||
if (
|
||||
events[index][0] === 'enter' &&
|
||||
events[index][1].type === 'attentionSequence' &&
|
||||
events[index][1]._close
|
||||
) {
|
||||
open = index // Now walk back to find an opener.
|
||||
|
||||
while (open--) {
|
||||
// Find a token that can open the closer.
|
||||
if (
|
||||
events[open][0] === 'exit' &&
|
||||
events[open][1].type === 'attentionSequence' &&
|
||||
events[open][1]._open && // If the markers are the same:
|
||||
context.sliceSerialize(events[open][1]).charCodeAt(0) ===
|
||||
context.sliceSerialize(events[index][1]).charCodeAt(0)
|
||||
) {
|
||||
// If the opening can close or the closing can open,
|
||||
// and the close size *is not* a multiple of three,
|
||||
// but the sum of the opening and closing size *is* multiple of three,
|
||||
// then don’t match.
|
||||
if (
|
||||
(events[open][1]._close || events[index][1]._open) &&
|
||||
(events[index][1].end.offset - events[index][1].start.offset) % 3 &&
|
||||
!(
|
||||
(events[open][1].end.offset -
|
||||
events[open][1].start.offset +
|
||||
events[index][1].end.offset -
|
||||
events[index][1].start.offset) %
|
||||
3
|
||||
)
|
||||
) {
|
||||
continue
|
||||
} // Number of markers to use from the sequence.
|
||||
|
||||
use =
|
||||
events[open][1].end.offset - events[open][1].start.offset > 1 &&
|
||||
events[index][1].end.offset - events[index][1].start.offset > 1
|
||||
? 2
|
||||
: 1
|
||||
openingSequence = {
|
||||
type: use > 1 ? 'strongSequence' : 'emphasisSequence',
|
||||
start: movePoint(shallow(events[open][1].end), -use),
|
||||
end: shallow(events[open][1].end)
|
||||
}
|
||||
closingSequence = {
|
||||
type: use > 1 ? 'strongSequence' : 'emphasisSequence',
|
||||
start: shallow(events[index][1].start),
|
||||
end: movePoint(shallow(events[index][1].start), use)
|
||||
}
|
||||
text = {
|
||||
type: use > 1 ? 'strongText' : 'emphasisText',
|
||||
start: shallow(events[open][1].end),
|
||||
end: shallow(events[index][1].start)
|
||||
}
|
||||
group = {
|
||||
type: use > 1 ? 'strong' : 'emphasis',
|
||||
start: shallow(openingSequence.start),
|
||||
end: shallow(closingSequence.end)
|
||||
}
|
||||
events[open][1].end = shallow(openingSequence.start)
|
||||
events[index][1].start = shallow(closingSequence.end)
|
||||
nextEvents = [] // If there are more markers in the opening, add them before.
|
||||
|
||||
if (events[open][1].end.offset - events[open][1].start.offset) {
|
||||
nextEvents = chunkedPush(nextEvents, [
|
||||
['enter', events[open][1], context],
|
||||
['exit', events[open][1], context]
|
||||
])
|
||||
} // Opening.
|
||||
|
||||
nextEvents = chunkedPush(nextEvents, [
|
||||
['enter', group, context],
|
||||
['enter', openingSequence, context],
|
||||
['exit', openingSequence, context],
|
||||
['enter', text, context]
|
||||
]) // Between.
|
||||
|
||||
nextEvents = chunkedPush(
|
||||
nextEvents,
|
||||
resolveAll(
|
||||
context.parser.constructs.insideSpan.null,
|
||||
events.slice(open + 1, index),
|
||||
context
|
||||
)
|
||||
) // Closing.
|
||||
|
||||
nextEvents = chunkedPush(nextEvents, [
|
||||
['exit', text, context],
|
||||
['enter', closingSequence, context],
|
||||
['exit', closingSequence, context],
|
||||
['exit', group, context]
|
||||
]) // If there are more markers in the closing, add them after.
|
||||
|
||||
if (events[index][1].end.offset - events[index][1].start.offset) {
|
||||
offset = 2
|
||||
nextEvents = chunkedPush(nextEvents, [
|
||||
['enter', events[index][1], context],
|
||||
['exit', events[index][1], context]
|
||||
])
|
||||
} else {
|
||||
offset = 0
|
||||
}
|
||||
|
||||
chunkedSplice(events, open - 1, index - open + 3, nextEvents)
|
||||
index = open + nextEvents.length - offset - 2
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
} // Remove remaining sequences.
|
||||
|
||||
index = -1
|
||||
|
||||
while (++index < events.length) {
|
||||
if (events[index][1].type === 'attentionSequence') {
|
||||
events[index][1].type = 'data'
|
||||
}
|
||||
}
|
||||
|
||||
return events
|
||||
}
|
||||
|
||||
function tokenizeAttention(effects, ok) {
|
||||
var before = classifyCharacter(this.previous)
|
||||
var marker
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
effects.enter('attentionSequence')
|
||||
marker = code
|
||||
return sequence(code)
|
||||
}
|
||||
|
||||
function sequence(code) {
|
||||
var token
|
||||
var after
|
||||
var open
|
||||
var close
|
||||
|
||||
if (code === marker) {
|
||||
effects.consume(code)
|
||||
return sequence
|
||||
}
|
||||
|
||||
token = effects.exit('attentionSequence')
|
||||
after = classifyCharacter(code)
|
||||
open = !after || (after === 2 && before)
|
||||
close = !before || (before === 2 && after)
|
||||
token._open = marker === 42 ? open : open && (before || !close)
|
||||
token._close = marker === 42 ? close : close && (after || !open)
|
||||
return ok(code)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = attention
|
||||
125
node_modules/micromark/dist/tokenize/autolink.js
generated
vendored
Normal file
125
node_modules/micromark/dist/tokenize/autolink.js
generated
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
'use strict'
|
||||
|
||||
var asciiAlpha = require('../character/ascii-alpha.js')
|
||||
var asciiAlphanumeric = require('../character/ascii-alphanumeric.js')
|
||||
var asciiAtext = require('../character/ascii-atext.js')
|
||||
var asciiControl = require('../character/ascii-control.js')
|
||||
|
||||
var autolink = {
|
||||
name: 'autolink',
|
||||
tokenize: tokenizeAutolink
|
||||
}
|
||||
|
||||
function tokenizeAutolink(effects, ok, nok) {
|
||||
var size = 1
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
effects.enter('autolink')
|
||||
effects.enter('autolinkMarker')
|
||||
effects.consume(code)
|
||||
effects.exit('autolinkMarker')
|
||||
effects.enter('autolinkProtocol')
|
||||
return open
|
||||
}
|
||||
|
||||
function open(code) {
|
||||
if (asciiAlpha(code)) {
|
||||
effects.consume(code)
|
||||
return schemeOrEmailAtext
|
||||
}
|
||||
|
||||
return asciiAtext(code) ? emailAtext(code) : nok(code)
|
||||
}
|
||||
|
||||
function schemeOrEmailAtext(code) {
|
||||
return code === 43 || code === 45 || code === 46 || asciiAlphanumeric(code)
|
||||
? schemeInsideOrEmailAtext(code)
|
||||
: emailAtext(code)
|
||||
}
|
||||
|
||||
function schemeInsideOrEmailAtext(code) {
|
||||
if (code === 58) {
|
||||
effects.consume(code)
|
||||
return urlInside
|
||||
}
|
||||
|
||||
if (
|
||||
(code === 43 || code === 45 || code === 46 || asciiAlphanumeric(code)) &&
|
||||
size++ < 32
|
||||
) {
|
||||
effects.consume(code)
|
||||
return schemeInsideOrEmailAtext
|
||||
}
|
||||
|
||||
return emailAtext(code)
|
||||
}
|
||||
|
||||
function urlInside(code) {
|
||||
if (code === 62) {
|
||||
effects.exit('autolinkProtocol')
|
||||
return end(code)
|
||||
}
|
||||
|
||||
if (code === 32 || code === 60 || asciiControl(code)) {
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
effects.consume(code)
|
||||
return urlInside
|
||||
}
|
||||
|
||||
function emailAtext(code) {
|
||||
if (code === 64) {
|
||||
effects.consume(code)
|
||||
size = 0
|
||||
return emailAtSignOrDot
|
||||
}
|
||||
|
||||
if (asciiAtext(code)) {
|
||||
effects.consume(code)
|
||||
return emailAtext
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function emailAtSignOrDot(code) {
|
||||
return asciiAlphanumeric(code) ? emailLabel(code) : nok(code)
|
||||
}
|
||||
|
||||
function emailLabel(code) {
|
||||
if (code === 46) {
|
||||
effects.consume(code)
|
||||
size = 0
|
||||
return emailAtSignOrDot
|
||||
}
|
||||
|
||||
if (code === 62) {
|
||||
// Exit, then change the type.
|
||||
effects.exit('autolinkProtocol').type = 'autolinkEmail'
|
||||
return end(code)
|
||||
}
|
||||
|
||||
return emailValue(code)
|
||||
}
|
||||
|
||||
function emailValue(code) {
|
||||
if ((code === 45 || asciiAlphanumeric(code)) && size++ < 63) {
|
||||
effects.consume(code)
|
||||
return code === 45 ? emailValue : emailLabel
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function end(code) {
|
||||
effects.enter('autolinkMarker')
|
||||
effects.consume(code)
|
||||
effects.exit('autolinkMarker')
|
||||
effects.exit('autolink')
|
||||
return ok
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = autolink
|
||||
67
node_modules/micromark/dist/tokenize/block-quote.js
generated
vendored
Normal file
67
node_modules/micromark/dist/tokenize/block-quote.js
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
'use strict'
|
||||
|
||||
var markdownSpace = require('../character/markdown-space.js')
|
||||
var factorySpace = require('./factory-space.js')
|
||||
|
||||
var blockQuote = {
|
||||
name: 'blockQuote',
|
||||
tokenize: tokenizeBlockQuoteStart,
|
||||
continuation: {
|
||||
tokenize: tokenizeBlockQuoteContinuation
|
||||
},
|
||||
exit: exit
|
||||
}
|
||||
|
||||
function tokenizeBlockQuoteStart(effects, ok, nok) {
|
||||
var self = this
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
if (code === 62) {
|
||||
if (!self.containerState.open) {
|
||||
effects.enter('blockQuote', {
|
||||
_container: true
|
||||
})
|
||||
self.containerState.open = true
|
||||
}
|
||||
|
||||
effects.enter('blockQuotePrefix')
|
||||
effects.enter('blockQuoteMarker')
|
||||
effects.consume(code)
|
||||
effects.exit('blockQuoteMarker')
|
||||
return after
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function after(code) {
|
||||
if (markdownSpace(code)) {
|
||||
effects.enter('blockQuotePrefixWhitespace')
|
||||
effects.consume(code)
|
||||
effects.exit('blockQuotePrefixWhitespace')
|
||||
effects.exit('blockQuotePrefix')
|
||||
return ok
|
||||
}
|
||||
|
||||
effects.exit('blockQuotePrefix')
|
||||
return ok(code)
|
||||
}
|
||||
}
|
||||
|
||||
function tokenizeBlockQuoteContinuation(effects, ok, nok) {
|
||||
return factorySpace(
|
||||
effects,
|
||||
effects.attempt(blockQuote, ok, nok),
|
||||
'linePrefix',
|
||||
this.parser.constructs.disable.null.indexOf('codeIndented') > -1
|
||||
? undefined
|
||||
: 4
|
||||
)
|
||||
}
|
||||
|
||||
function exit(effects) {
|
||||
effects.exit('blockQuote')
|
||||
}
|
||||
|
||||
module.exports = blockQuote
|
||||
34
node_modules/micromark/dist/tokenize/character-escape.js
generated
vendored
Normal file
34
node_modules/micromark/dist/tokenize/character-escape.js
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
'use strict'
|
||||
|
||||
var asciiPunctuation = require('../character/ascii-punctuation.js')
|
||||
|
||||
var characterEscape = {
|
||||
name: 'characterEscape',
|
||||
tokenize: tokenizeCharacterEscape
|
||||
}
|
||||
|
||||
function tokenizeCharacterEscape(effects, ok, nok) {
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
effects.enter('characterEscape')
|
||||
effects.enter('escapeMarker')
|
||||
effects.consume(code)
|
||||
effects.exit('escapeMarker')
|
||||
return open
|
||||
}
|
||||
|
||||
function open(code) {
|
||||
if (asciiPunctuation(code)) {
|
||||
effects.enter('characterEscapeValue')
|
||||
effects.consume(code)
|
||||
effects.exit('characterEscapeValue')
|
||||
effects.exit('characterEscape')
|
||||
return ok
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = characterEscape
|
||||
94
node_modules/micromark/dist/tokenize/character-reference.js
generated
vendored
Normal file
94
node_modules/micromark/dist/tokenize/character-reference.js
generated
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
'use strict'
|
||||
|
||||
var decodeEntity = require('parse-entities/decode-entity.js')
|
||||
var asciiAlphanumeric = require('../character/ascii-alphanumeric.js')
|
||||
var asciiDigit = require('../character/ascii-digit.js')
|
||||
var asciiHexDigit = require('../character/ascii-hex-digit.js')
|
||||
|
||||
function _interopDefaultLegacy(e) {
|
||||
return e && typeof e === 'object' && 'default' in e ? e : {default: e}
|
||||
}
|
||||
|
||||
var decodeEntity__default = /*#__PURE__*/ _interopDefaultLegacy(decodeEntity)
|
||||
|
||||
var characterReference = {
|
||||
name: 'characterReference',
|
||||
tokenize: tokenizeCharacterReference
|
||||
}
|
||||
|
||||
function tokenizeCharacterReference(effects, ok, nok) {
|
||||
var self = this
|
||||
var size = 0
|
||||
var max
|
||||
var test
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
effects.enter('characterReference')
|
||||
effects.enter('characterReferenceMarker')
|
||||
effects.consume(code)
|
||||
effects.exit('characterReferenceMarker')
|
||||
return open
|
||||
}
|
||||
|
||||
function open(code) {
|
||||
if (code === 35) {
|
||||
effects.enter('characterReferenceMarkerNumeric')
|
||||
effects.consume(code)
|
||||
effects.exit('characterReferenceMarkerNumeric')
|
||||
return numeric
|
||||
}
|
||||
|
||||
effects.enter('characterReferenceValue')
|
||||
max = 31
|
||||
test = asciiAlphanumeric
|
||||
return value(code)
|
||||
}
|
||||
|
||||
function numeric(code) {
|
||||
if (code === 88 || code === 120) {
|
||||
effects.enter('characterReferenceMarkerHexadecimal')
|
||||
effects.consume(code)
|
||||
effects.exit('characterReferenceMarkerHexadecimal')
|
||||
effects.enter('characterReferenceValue')
|
||||
max = 6
|
||||
test = asciiHexDigit
|
||||
return value
|
||||
}
|
||||
|
||||
effects.enter('characterReferenceValue')
|
||||
max = 7
|
||||
test = asciiDigit
|
||||
return value(code)
|
||||
}
|
||||
|
||||
function value(code) {
|
||||
var token
|
||||
|
||||
if (code === 59 && size) {
|
||||
token = effects.exit('characterReferenceValue')
|
||||
|
||||
if (
|
||||
test === asciiAlphanumeric &&
|
||||
!decodeEntity__default['default'](self.sliceSerialize(token))
|
||||
) {
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
effects.enter('characterReferenceMarker')
|
||||
effects.consume(code)
|
||||
effects.exit('characterReferenceMarker')
|
||||
effects.exit('characterReference')
|
||||
return ok
|
||||
}
|
||||
|
||||
if (test(code) && size++ < max) {
|
||||
effects.consume(code)
|
||||
return value
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = characterReference
|
||||
176
node_modules/micromark/dist/tokenize/code-fenced.js
generated
vendored
Normal file
176
node_modules/micromark/dist/tokenize/code-fenced.js
generated
vendored
Normal file
@@ -0,0 +1,176 @@
|
||||
'use strict'
|
||||
|
||||
var markdownLineEnding = require('../character/markdown-line-ending.js')
|
||||
var markdownLineEndingOrSpace = require('../character/markdown-line-ending-or-space.js')
|
||||
var prefixSize = require('../util/prefix-size.js')
|
||||
var factorySpace = require('./factory-space.js')
|
||||
|
||||
var codeFenced = {
|
||||
name: 'codeFenced',
|
||||
tokenize: tokenizeCodeFenced,
|
||||
concrete: true
|
||||
}
|
||||
|
||||
function tokenizeCodeFenced(effects, ok, nok) {
|
||||
var self = this
|
||||
var closingFenceConstruct = {
|
||||
tokenize: tokenizeClosingFence,
|
||||
partial: true
|
||||
}
|
||||
var initialPrefix = prefixSize(this.events, 'linePrefix')
|
||||
var sizeOpen = 0
|
||||
var marker
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
effects.enter('codeFenced')
|
||||
effects.enter('codeFencedFence')
|
||||
effects.enter('codeFencedFenceSequence')
|
||||
marker = code
|
||||
return sequenceOpen(code)
|
||||
}
|
||||
|
||||
function sequenceOpen(code) {
|
||||
if (code === marker) {
|
||||
effects.consume(code)
|
||||
sizeOpen++
|
||||
return sequenceOpen
|
||||
}
|
||||
|
||||
effects.exit('codeFencedFenceSequence')
|
||||
return sizeOpen < 3
|
||||
? nok(code)
|
||||
: factorySpace(effects, infoOpen, 'whitespace')(code)
|
||||
}
|
||||
|
||||
function infoOpen(code) {
|
||||
if (code === null || markdownLineEnding(code)) {
|
||||
return openAfter(code)
|
||||
}
|
||||
|
||||
effects.enter('codeFencedFenceInfo')
|
||||
effects.enter('chunkString', {
|
||||
contentType: 'string'
|
||||
})
|
||||
return info(code)
|
||||
}
|
||||
|
||||
function info(code) {
|
||||
if (code === null || markdownLineEndingOrSpace(code)) {
|
||||
effects.exit('chunkString')
|
||||
effects.exit('codeFencedFenceInfo')
|
||||
return factorySpace(effects, infoAfter, 'whitespace')(code)
|
||||
}
|
||||
|
||||
if (code === 96 && code === marker) return nok(code)
|
||||
effects.consume(code)
|
||||
return info
|
||||
}
|
||||
|
||||
function infoAfter(code) {
|
||||
if (code === null || markdownLineEnding(code)) {
|
||||
return openAfter(code)
|
||||
}
|
||||
|
||||
effects.enter('codeFencedFenceMeta')
|
||||
effects.enter('chunkString', {
|
||||
contentType: 'string'
|
||||
})
|
||||
return meta(code)
|
||||
}
|
||||
|
||||
function meta(code) {
|
||||
if (code === null || markdownLineEnding(code)) {
|
||||
effects.exit('chunkString')
|
||||
effects.exit('codeFencedFenceMeta')
|
||||
return openAfter(code)
|
||||
}
|
||||
|
||||
if (code === 96 && code === marker) return nok(code)
|
||||
effects.consume(code)
|
||||
return meta
|
||||
}
|
||||
|
||||
function openAfter(code) {
|
||||
effects.exit('codeFencedFence')
|
||||
return self.interrupt ? ok(code) : content(code)
|
||||
}
|
||||
|
||||
function content(code) {
|
||||
if (code === null) {
|
||||
return after(code)
|
||||
}
|
||||
|
||||
if (markdownLineEnding(code)) {
|
||||
effects.enter('lineEnding')
|
||||
effects.consume(code)
|
||||
effects.exit('lineEnding')
|
||||
return effects.attempt(
|
||||
closingFenceConstruct,
|
||||
after,
|
||||
initialPrefix
|
||||
? factorySpace(effects, content, 'linePrefix', initialPrefix + 1)
|
||||
: content
|
||||
)
|
||||
}
|
||||
|
||||
effects.enter('codeFlowValue')
|
||||
return contentContinue(code)
|
||||
}
|
||||
|
||||
function contentContinue(code) {
|
||||
if (code === null || markdownLineEnding(code)) {
|
||||
effects.exit('codeFlowValue')
|
||||
return content(code)
|
||||
}
|
||||
|
||||
effects.consume(code)
|
||||
return contentContinue
|
||||
}
|
||||
|
||||
function after(code) {
|
||||
effects.exit('codeFenced')
|
||||
return ok(code)
|
||||
}
|
||||
|
||||
function tokenizeClosingFence(effects, ok, nok) {
|
||||
var size = 0
|
||||
return factorySpace(
|
||||
effects,
|
||||
closingSequenceStart,
|
||||
'linePrefix',
|
||||
this.parser.constructs.disable.null.indexOf('codeIndented') > -1
|
||||
? undefined
|
||||
: 4
|
||||
)
|
||||
|
||||
function closingSequenceStart(code) {
|
||||
effects.enter('codeFencedFence')
|
||||
effects.enter('codeFencedFenceSequence')
|
||||
return closingSequence(code)
|
||||
}
|
||||
|
||||
function closingSequence(code) {
|
||||
if (code === marker) {
|
||||
effects.consume(code)
|
||||
size++
|
||||
return closingSequence
|
||||
}
|
||||
|
||||
if (size < sizeOpen) return nok(code)
|
||||
effects.exit('codeFencedFenceSequence')
|
||||
return factorySpace(effects, closingSequenceEnd, 'whitespace')(code)
|
||||
}
|
||||
|
||||
function closingSequenceEnd(code) {
|
||||
if (code === null || markdownLineEnding(code)) {
|
||||
effects.exit('codeFencedFence')
|
||||
return ok(code)
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = codeFenced
|
||||
72
node_modules/micromark/dist/tokenize/code-indented.js
generated
vendored
Normal file
72
node_modules/micromark/dist/tokenize/code-indented.js
generated
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
'use strict'
|
||||
|
||||
var markdownLineEnding = require('../character/markdown-line-ending.js')
|
||||
var chunkedSplice = require('../util/chunked-splice.js')
|
||||
var prefixSize = require('../util/prefix-size.js')
|
||||
var factorySpace = require('./factory-space.js')
|
||||
|
||||
var codeIndented = {
|
||||
name: 'codeIndented',
|
||||
tokenize: tokenizeCodeIndented,
|
||||
resolve: resolveCodeIndented
|
||||
}
|
||||
var indentedContentConstruct = {
|
||||
tokenize: tokenizeIndentedContent,
|
||||
partial: true
|
||||
}
|
||||
|
||||
function resolveCodeIndented(events, context) {
|
||||
var code = {
|
||||
type: 'codeIndented',
|
||||
start: events[0][1].start,
|
||||
end: events[events.length - 1][1].end
|
||||
}
|
||||
chunkedSplice(events, 0, 0, [['enter', code, context]])
|
||||
chunkedSplice(events, events.length, 0, [['exit', code, context]])
|
||||
return events
|
||||
}
|
||||
|
||||
function tokenizeCodeIndented(effects, ok, nok) {
|
||||
return effects.attempt(indentedContentConstruct, afterPrefix, nok)
|
||||
|
||||
function afterPrefix(code) {
|
||||
if (code === null) {
|
||||
return ok(code)
|
||||
}
|
||||
|
||||
if (markdownLineEnding(code)) {
|
||||
return effects.attempt(indentedContentConstruct, afterPrefix, ok)(code)
|
||||
}
|
||||
|
||||
effects.enter('codeFlowValue')
|
||||
return content(code)
|
||||
}
|
||||
|
||||
function content(code) {
|
||||
if (code === null || markdownLineEnding(code)) {
|
||||
effects.exit('codeFlowValue')
|
||||
return afterPrefix(code)
|
||||
}
|
||||
|
||||
effects.consume(code)
|
||||
return content
|
||||
}
|
||||
}
|
||||
|
||||
function tokenizeIndentedContent(effects, ok, nok) {
|
||||
var self = this
|
||||
return factorySpace(effects, afterPrefix, 'linePrefix', 4 + 1)
|
||||
|
||||
function afterPrefix(code) {
|
||||
if (markdownLineEnding(code)) {
|
||||
effects.enter('lineEnding')
|
||||
effects.consume(code)
|
||||
effects.exit('lineEnding')
|
||||
return factorySpace(effects, afterPrefix, 'linePrefix', 4 + 1)
|
||||
}
|
||||
|
||||
return prefixSize(self.events, 'linePrefix') < 4 ? nok(code) : ok(code)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = codeIndented
|
||||
162
node_modules/micromark/dist/tokenize/code-text.js
generated
vendored
Normal file
162
node_modules/micromark/dist/tokenize/code-text.js
generated
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
'use strict'
|
||||
|
||||
var markdownLineEnding = require('../character/markdown-line-ending.js')
|
||||
|
||||
var codeText = {
|
||||
name: 'codeText',
|
||||
tokenize: tokenizeCodeText,
|
||||
resolve: resolveCodeText,
|
||||
previous: previous
|
||||
}
|
||||
|
||||
function resolveCodeText(events) {
|
||||
var tailExitIndex = events.length - 4
|
||||
var headEnterIndex = 3
|
||||
var index
|
||||
var enter // If we start and end with an EOL or a space.
|
||||
|
||||
if (
|
||||
(events[headEnterIndex][1].type === 'lineEnding' ||
|
||||
events[headEnterIndex][1].type === 'space') &&
|
||||
(events[tailExitIndex][1].type === 'lineEnding' ||
|
||||
events[tailExitIndex][1].type === 'space')
|
||||
) {
|
||||
index = headEnterIndex // And we have data.
|
||||
|
||||
while (++index < tailExitIndex) {
|
||||
if (events[index][1].type === 'codeTextData') {
|
||||
// Then we have padding.
|
||||
events[tailExitIndex][1].type = events[headEnterIndex][1].type =
|
||||
'codeTextPadding'
|
||||
headEnterIndex += 2
|
||||
tailExitIndex -= 2
|
||||
break
|
||||
}
|
||||
}
|
||||
} // Merge adjacent spaces and data.
|
||||
|
||||
index = headEnterIndex - 1
|
||||
tailExitIndex++
|
||||
|
||||
while (++index <= tailExitIndex) {
|
||||
if (enter === undefined) {
|
||||
if (index !== tailExitIndex && events[index][1].type !== 'lineEnding') {
|
||||
enter = index
|
||||
}
|
||||
} else if (
|
||||
index === tailExitIndex ||
|
||||
events[index][1].type === 'lineEnding'
|
||||
) {
|
||||
events[enter][1].type = 'codeTextData'
|
||||
|
||||
if (index !== enter + 2) {
|
||||
events[enter][1].end = events[index - 1][1].end
|
||||
events.splice(enter + 2, index - enter - 2)
|
||||
tailExitIndex -= index - enter - 2
|
||||
index = enter + 2
|
||||
}
|
||||
|
||||
enter = undefined
|
||||
}
|
||||
}
|
||||
|
||||
return events
|
||||
}
|
||||
|
||||
function previous(code) {
|
||||
// If there is a previous code, there will always be a tail.
|
||||
return (
|
||||
code !== 96 ||
|
||||
this.events[this.events.length - 1][1].type === 'characterEscape'
|
||||
)
|
||||
}
|
||||
|
||||
function tokenizeCodeText(effects, ok, nok) {
|
||||
var sizeOpen = 0
|
||||
var size
|
||||
var token
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
effects.enter('codeText')
|
||||
effects.enter('codeTextSequence')
|
||||
return openingSequence(code)
|
||||
}
|
||||
|
||||
function openingSequence(code) {
|
||||
if (code === 96) {
|
||||
effects.consume(code)
|
||||
sizeOpen++
|
||||
return openingSequence
|
||||
}
|
||||
|
||||
effects.exit('codeTextSequence')
|
||||
return gap(code)
|
||||
}
|
||||
|
||||
function gap(code) {
|
||||
// EOF.
|
||||
if (code === null) {
|
||||
return nok(code)
|
||||
} // Closing fence?
|
||||
// Could also be data.
|
||||
|
||||
if (code === 96) {
|
||||
token = effects.enter('codeTextSequence')
|
||||
size = 0
|
||||
return closingSequence(code)
|
||||
} // Tabs don’t work, and virtual spaces don’t make sense.
|
||||
|
||||
if (code === 32) {
|
||||
effects.enter('space')
|
||||
effects.consume(code)
|
||||
effects.exit('space')
|
||||
return gap
|
||||
}
|
||||
|
||||
if (markdownLineEnding(code)) {
|
||||
effects.enter('lineEnding')
|
||||
effects.consume(code)
|
||||
effects.exit('lineEnding')
|
||||
return gap
|
||||
} // Data.
|
||||
|
||||
effects.enter('codeTextData')
|
||||
return data(code)
|
||||
} // In code.
|
||||
|
||||
function data(code) {
|
||||
if (
|
||||
code === null ||
|
||||
code === 32 ||
|
||||
code === 96 ||
|
||||
markdownLineEnding(code)
|
||||
) {
|
||||
effects.exit('codeTextData')
|
||||
return gap(code)
|
||||
}
|
||||
|
||||
effects.consume(code)
|
||||
return data
|
||||
} // Closing fence.
|
||||
|
||||
function closingSequence(code) {
|
||||
// More.
|
||||
if (code === 96) {
|
||||
effects.consume(code)
|
||||
size++
|
||||
return closingSequence
|
||||
} // Done!
|
||||
|
||||
if (size === sizeOpen) {
|
||||
effects.exit('codeTextSequence')
|
||||
effects.exit('codeText')
|
||||
return ok(code)
|
||||
} // More or less accents: mark as data.
|
||||
|
||||
token.type = 'codeTextData'
|
||||
return data(code)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = codeText
|
||||
99
node_modules/micromark/dist/tokenize/content.js
generated
vendored
Normal file
99
node_modules/micromark/dist/tokenize/content.js
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
'use strict'
|
||||
|
||||
var markdownLineEnding = require('../character/markdown-line-ending.js')
|
||||
var prefixSize = require('../util/prefix-size.js')
|
||||
var subtokenize = require('../util/subtokenize.js')
|
||||
var factorySpace = require('./factory-space.js')
|
||||
|
||||
// No name because it must not be turned off.
|
||||
var content = {
|
||||
tokenize: tokenizeContent,
|
||||
resolve: resolveContent,
|
||||
interruptible: true,
|
||||
lazy: true
|
||||
}
|
||||
var continuationConstruct = {
|
||||
tokenize: tokenizeContinuation,
|
||||
partial: true
|
||||
} // Content is transparent: it’s parsed right now. That way, definitions are also
|
||||
// parsed right now: before text in paragraphs (specifically, media) are parsed.
|
||||
|
||||
function resolveContent(events) {
|
||||
subtokenize(events)
|
||||
return events
|
||||
}
|
||||
|
||||
function tokenizeContent(effects, ok) {
|
||||
var previous
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
effects.enter('content')
|
||||
previous = effects.enter('chunkContent', {
|
||||
contentType: 'content'
|
||||
})
|
||||
return data(code)
|
||||
}
|
||||
|
||||
function data(code) {
|
||||
if (code === null) {
|
||||
return contentEnd(code)
|
||||
}
|
||||
|
||||
if (markdownLineEnding(code)) {
|
||||
return effects.check(
|
||||
continuationConstruct,
|
||||
contentContinue,
|
||||
contentEnd
|
||||
)(code)
|
||||
} // Data.
|
||||
|
||||
effects.consume(code)
|
||||
return data
|
||||
}
|
||||
|
||||
function contentEnd(code) {
|
||||
effects.exit('chunkContent')
|
||||
effects.exit('content')
|
||||
return ok(code)
|
||||
}
|
||||
|
||||
function contentContinue(code) {
|
||||
effects.consume(code)
|
||||
effects.exit('chunkContent')
|
||||
previous = previous.next = effects.enter('chunkContent', {
|
||||
contentType: 'content',
|
||||
previous: previous
|
||||
})
|
||||
return data
|
||||
}
|
||||
}
|
||||
|
||||
function tokenizeContinuation(effects, ok, nok) {
|
||||
var self = this
|
||||
return startLookahead
|
||||
|
||||
function startLookahead(code) {
|
||||
effects.enter('lineEnding')
|
||||
effects.consume(code)
|
||||
effects.exit('lineEnding')
|
||||
return factorySpace(effects, prefixed, 'linePrefix')
|
||||
}
|
||||
|
||||
function prefixed(code) {
|
||||
if (code === null || markdownLineEnding(code)) {
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
if (
|
||||
self.parser.constructs.disable.null.indexOf('codeIndented') > -1 ||
|
||||
prefixSize(self.events, 'linePrefix') < 4
|
||||
) {
|
||||
return effects.interrupt(self.parser.constructs.flow, nok, ok)(code)
|
||||
}
|
||||
|
||||
return ok(code)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = content
|
||||
115
node_modules/micromark/dist/tokenize/definition.js
generated
vendored
Normal file
115
node_modules/micromark/dist/tokenize/definition.js
generated
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
'use strict'
|
||||
|
||||
var markdownLineEnding = require('../character/markdown-line-ending.js')
|
||||
var markdownLineEndingOrSpace = require('../character/markdown-line-ending-or-space.js')
|
||||
var normalizeIdentifier = require('../util/normalize-identifier.js')
|
||||
var factoryDestination = require('./factory-destination.js')
|
||||
var factoryLabel = require('./factory-label.js')
|
||||
var factorySpace = require('./factory-space.js')
|
||||
var factoryWhitespace = require('./factory-whitespace.js')
|
||||
var factoryTitle = require('./factory-title.js')
|
||||
|
||||
var definition = {
|
||||
name: 'definition',
|
||||
tokenize: tokenizeDefinition
|
||||
}
|
||||
var titleConstruct = {
|
||||
tokenize: tokenizeTitle,
|
||||
partial: true
|
||||
}
|
||||
|
||||
function tokenizeDefinition(effects, ok, nok) {
|
||||
var self = this
|
||||
var identifier
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
effects.enter('definition')
|
||||
return factoryLabel.call(
|
||||
self,
|
||||
effects,
|
||||
labelAfter,
|
||||
nok,
|
||||
'definitionLabel',
|
||||
'definitionLabelMarker',
|
||||
'definitionLabelString'
|
||||
)(code)
|
||||
}
|
||||
|
||||
function labelAfter(code) {
|
||||
identifier = normalizeIdentifier(
|
||||
self.sliceSerialize(self.events[self.events.length - 1][1]).slice(1, -1)
|
||||
)
|
||||
|
||||
if (code === 58) {
|
||||
effects.enter('definitionMarker')
|
||||
effects.consume(code)
|
||||
effects.exit('definitionMarker') // Note: blank lines can’t exist in content.
|
||||
|
||||
return factoryWhitespace(
|
||||
effects,
|
||||
factoryDestination(
|
||||
effects,
|
||||
effects.attempt(
|
||||
titleConstruct,
|
||||
factorySpace(effects, after, 'whitespace'),
|
||||
factorySpace(effects, after, 'whitespace')
|
||||
),
|
||||
nok,
|
||||
'definitionDestination',
|
||||
'definitionDestinationLiteral',
|
||||
'definitionDestinationLiteralMarker',
|
||||
'definitionDestinationRaw',
|
||||
'definitionDestinationString'
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function after(code) {
|
||||
if (code === null || markdownLineEnding(code)) {
|
||||
effects.exit('definition')
|
||||
|
||||
if (self.parser.defined.indexOf(identifier) < 0) {
|
||||
self.parser.defined.push(identifier)
|
||||
}
|
||||
|
||||
return ok(code)
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
}
|
||||
|
||||
function tokenizeTitle(effects, ok, nok) {
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
return markdownLineEndingOrSpace(code)
|
||||
? factoryWhitespace(effects, before)(code)
|
||||
: nok(code)
|
||||
}
|
||||
|
||||
function before(code) {
|
||||
if (code === 34 || code === 39 || code === 40) {
|
||||
return factoryTitle(
|
||||
effects,
|
||||
factorySpace(effects, after, 'whitespace'),
|
||||
nok,
|
||||
'definitionTitle',
|
||||
'definitionTitleMarker',
|
||||
'definitionTitleString'
|
||||
)(code)
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function after(code) {
|
||||
return code === null || markdownLineEnding(code) ? ok(code) : nok(code)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = definition
|
||||
131
node_modules/micromark/dist/tokenize/factory-destination.js
generated
vendored
Normal file
131
node_modules/micromark/dist/tokenize/factory-destination.js
generated
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
'use strict'
|
||||
|
||||
var asciiControl = require('../character/ascii-control.js')
|
||||
var markdownLineEndingOrSpace = require('../character/markdown-line-ending-or-space.js')
|
||||
var markdownLineEnding = require('../character/markdown-line-ending.js')
|
||||
|
||||
// eslint-disable-next-line max-params
|
||||
function destinationFactory(
|
||||
effects,
|
||||
ok,
|
||||
nok,
|
||||
type,
|
||||
literalType,
|
||||
literalMarkerType,
|
||||
rawType,
|
||||
stringType,
|
||||
max
|
||||
) {
|
||||
var limit = max || Infinity
|
||||
var balance = 0
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
if (code === 60) {
|
||||
effects.enter(type)
|
||||
effects.enter(literalType)
|
||||
effects.enter(literalMarkerType)
|
||||
effects.consume(code)
|
||||
effects.exit(literalMarkerType)
|
||||
return destinationEnclosedBefore
|
||||
}
|
||||
|
||||
if (asciiControl(code) || code === 41) {
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
effects.enter(type)
|
||||
effects.enter(rawType)
|
||||
effects.enter(stringType)
|
||||
effects.enter('chunkString', {
|
||||
contentType: 'string'
|
||||
})
|
||||
return destinationRaw(code)
|
||||
}
|
||||
|
||||
function destinationEnclosedBefore(code) {
|
||||
if (code === 62) {
|
||||
effects.enter(literalMarkerType)
|
||||
effects.consume(code)
|
||||
effects.exit(literalMarkerType)
|
||||
effects.exit(literalType)
|
||||
effects.exit(type)
|
||||
return ok
|
||||
}
|
||||
|
||||
effects.enter(stringType)
|
||||
effects.enter('chunkString', {
|
||||
contentType: 'string'
|
||||
})
|
||||
return destinationEnclosed(code)
|
||||
}
|
||||
|
||||
function destinationEnclosed(code) {
|
||||
if (code === 62) {
|
||||
effects.exit('chunkString')
|
||||
effects.exit(stringType)
|
||||
return destinationEnclosedBefore(code)
|
||||
}
|
||||
|
||||
if (code === null || code === 60 || markdownLineEnding(code)) {
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
effects.consume(code)
|
||||
return code === 92 ? destinationEnclosedEscape : destinationEnclosed
|
||||
}
|
||||
|
||||
function destinationEnclosedEscape(code) {
|
||||
if (code === 60 || code === 62 || code === 92) {
|
||||
effects.consume(code)
|
||||
return destinationEnclosed
|
||||
}
|
||||
|
||||
return destinationEnclosed(code)
|
||||
}
|
||||
|
||||
function destinationRaw(code) {
|
||||
if (code === 40) {
|
||||
if (++balance > limit) return nok(code)
|
||||
effects.consume(code)
|
||||
return destinationRaw
|
||||
}
|
||||
|
||||
if (code === 41) {
|
||||
if (!balance--) {
|
||||
effects.exit('chunkString')
|
||||
effects.exit(stringType)
|
||||
effects.exit(rawType)
|
||||
effects.exit(type)
|
||||
return ok(code)
|
||||
}
|
||||
|
||||
effects.consume(code)
|
||||
return destinationRaw
|
||||
}
|
||||
|
||||
if (code === null || markdownLineEndingOrSpace(code)) {
|
||||
if (balance) return nok(code)
|
||||
effects.exit('chunkString')
|
||||
effects.exit(stringType)
|
||||
effects.exit(rawType)
|
||||
effects.exit(type)
|
||||
return ok(code)
|
||||
}
|
||||
|
||||
if (asciiControl(code)) return nok(code)
|
||||
effects.consume(code)
|
||||
return code === 92 ? destinationRawEscape : destinationRaw
|
||||
}
|
||||
|
||||
function destinationRawEscape(code) {
|
||||
if (code === 40 || code === 41 || code === 92) {
|
||||
effects.consume(code)
|
||||
return destinationRaw
|
||||
}
|
||||
|
||||
return destinationRaw(code)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = destinationFactory
|
||||
88
node_modules/micromark/dist/tokenize/factory-label.js
generated
vendored
Normal file
88
node_modules/micromark/dist/tokenize/factory-label.js
generated
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
'use strict'
|
||||
|
||||
var markdownLineEnding = require('../character/markdown-line-ending.js')
|
||||
var markdownSpace = require('../character/markdown-space.js')
|
||||
|
||||
// eslint-disable-next-line max-params
|
||||
function labelFactory(effects, ok, nok, type, markerType, stringType) {
|
||||
var self = this
|
||||
var size = 0
|
||||
var data
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
effects.enter(type)
|
||||
effects.enter(markerType)
|
||||
effects.consume(code)
|
||||
effects.exit(markerType)
|
||||
effects.enter(stringType)
|
||||
return atBreak
|
||||
}
|
||||
|
||||
function atBreak(code) {
|
||||
if (
|
||||
code === null ||
|
||||
code === 91 ||
|
||||
(code === 93 && !data) ||
|
||||
/* c8 ignore next */
|
||||
(code === 94 &&
|
||||
/* c8 ignore next */
|
||||
!size &&
|
||||
/* c8 ignore next */
|
||||
'_hiddenFootnoteSupport' in self.parser.constructs) ||
|
||||
size > 999
|
||||
) {
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
if (code === 93) {
|
||||
effects.exit(stringType)
|
||||
effects.enter(markerType)
|
||||
effects.consume(code)
|
||||
effects.exit(markerType)
|
||||
effects.exit(type)
|
||||
return ok
|
||||
}
|
||||
|
||||
if (markdownLineEnding(code)) {
|
||||
effects.enter('lineEnding')
|
||||
effects.consume(code)
|
||||
effects.exit('lineEnding')
|
||||
return atBreak
|
||||
}
|
||||
|
||||
effects.enter('chunkString', {
|
||||
contentType: 'string'
|
||||
})
|
||||
return label(code)
|
||||
}
|
||||
|
||||
function label(code) {
|
||||
if (
|
||||
code === null ||
|
||||
code === 91 ||
|
||||
code === 93 ||
|
||||
markdownLineEnding(code) ||
|
||||
size++ > 999
|
||||
) {
|
||||
effects.exit('chunkString')
|
||||
return atBreak(code)
|
||||
}
|
||||
|
||||
effects.consume(code)
|
||||
data = data || !markdownSpace(code)
|
||||
return code === 92 ? labelEscape : label
|
||||
}
|
||||
|
||||
function labelEscape(code) {
|
||||
if (code === 91 || code === 92 || code === 93) {
|
||||
effects.consume(code)
|
||||
size++
|
||||
return label
|
||||
}
|
||||
|
||||
return label(code)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = labelFactory
|
||||
30
node_modules/micromark/dist/tokenize/factory-space.js
generated
vendored
Normal file
30
node_modules/micromark/dist/tokenize/factory-space.js
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
'use strict'
|
||||
|
||||
var markdownSpace = require('../character/markdown-space.js')
|
||||
|
||||
function spaceFactory(effects, ok, type, max) {
|
||||
var limit = max ? max - 1 : Infinity
|
||||
var size = 0
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
if (markdownSpace(code)) {
|
||||
effects.enter(type)
|
||||
return prefix(code)
|
||||
}
|
||||
|
||||
return ok(code)
|
||||
}
|
||||
|
||||
function prefix(code) {
|
||||
if (markdownSpace(code) && size++ < limit) {
|
||||
effects.consume(code)
|
||||
return prefix
|
||||
}
|
||||
|
||||
effects.exit(type)
|
||||
return ok(code)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = spaceFactory
|
||||
75
node_modules/micromark/dist/tokenize/factory-title.js
generated
vendored
Normal file
75
node_modules/micromark/dist/tokenize/factory-title.js
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
'use strict'
|
||||
|
||||
var markdownLineEnding = require('../character/markdown-line-ending.js')
|
||||
var factorySpace = require('./factory-space.js')
|
||||
|
||||
function titleFactory(effects, ok, nok, type, markerType, stringType) {
|
||||
var marker
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
effects.enter(type)
|
||||
effects.enter(markerType)
|
||||
effects.consume(code)
|
||||
effects.exit(markerType)
|
||||
marker = code === 40 ? 41 : code
|
||||
return atFirstTitleBreak
|
||||
}
|
||||
|
||||
function atFirstTitleBreak(code) {
|
||||
if (code === marker) {
|
||||
effects.enter(markerType)
|
||||
effects.consume(code)
|
||||
effects.exit(markerType)
|
||||
effects.exit(type)
|
||||
return ok
|
||||
}
|
||||
|
||||
effects.enter(stringType)
|
||||
return atTitleBreak(code)
|
||||
}
|
||||
|
||||
function atTitleBreak(code) {
|
||||
if (code === marker) {
|
||||
effects.exit(stringType)
|
||||
return atFirstTitleBreak(marker)
|
||||
}
|
||||
|
||||
if (code === null) {
|
||||
return nok(code)
|
||||
} // Note: blank lines can’t exist in content.
|
||||
|
||||
if (markdownLineEnding(code)) {
|
||||
effects.enter('lineEnding')
|
||||
effects.consume(code)
|
||||
effects.exit('lineEnding')
|
||||
return factorySpace(effects, atTitleBreak, 'linePrefix')
|
||||
}
|
||||
|
||||
effects.enter('chunkString', {
|
||||
contentType: 'string'
|
||||
})
|
||||
return title(code)
|
||||
}
|
||||
|
||||
function title(code) {
|
||||
if (code === marker || code === null || markdownLineEnding(code)) {
|
||||
effects.exit('chunkString')
|
||||
return atTitleBreak(code)
|
||||
}
|
||||
|
||||
effects.consume(code)
|
||||
return code === 92 ? titleEscape : title
|
||||
}
|
||||
|
||||
function titleEscape(code) {
|
||||
if (code === marker || code === 92) {
|
||||
effects.consume(code)
|
||||
return title
|
||||
}
|
||||
|
||||
return title(code)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = titleFactory
|
||||
32
node_modules/micromark/dist/tokenize/factory-whitespace.js
generated
vendored
Normal file
32
node_modules/micromark/dist/tokenize/factory-whitespace.js
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
'use strict'
|
||||
|
||||
var markdownLineEnding = require('../character/markdown-line-ending.js')
|
||||
var markdownSpace = require('../character/markdown-space.js')
|
||||
var factorySpace = require('./factory-space.js')
|
||||
|
||||
function whitespaceFactory(effects, ok) {
|
||||
var seen
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
if (markdownLineEnding(code)) {
|
||||
effects.enter('lineEnding')
|
||||
effects.consume(code)
|
||||
effects.exit('lineEnding')
|
||||
seen = true
|
||||
return start
|
||||
}
|
||||
|
||||
if (markdownSpace(code)) {
|
||||
return factorySpace(
|
||||
effects,
|
||||
start,
|
||||
seen ? 'linePrefix' : 'lineSuffix'
|
||||
)(code)
|
||||
}
|
||||
|
||||
return ok(code)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = whitespaceFactory
|
||||
31
node_modules/micromark/dist/tokenize/hard-break-escape.js
generated
vendored
Normal file
31
node_modules/micromark/dist/tokenize/hard-break-escape.js
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
'use strict'
|
||||
|
||||
var markdownLineEnding = require('../character/markdown-line-ending.js')
|
||||
|
||||
var hardBreakEscape = {
|
||||
name: 'hardBreakEscape',
|
||||
tokenize: tokenizeHardBreakEscape
|
||||
}
|
||||
|
||||
function tokenizeHardBreakEscape(effects, ok, nok) {
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
effects.enter('hardBreakEscape')
|
||||
effects.enter('escapeMarker')
|
||||
effects.consume(code)
|
||||
return open
|
||||
}
|
||||
|
||||
function open(code) {
|
||||
if (markdownLineEnding(code)) {
|
||||
effects.exit('escapeMarker')
|
||||
effects.exit('hardBreakEscape')
|
||||
return ok(code)
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = hardBreakEscape
|
||||
129
node_modules/micromark/dist/tokenize/heading-atx.js
generated
vendored
Normal file
129
node_modules/micromark/dist/tokenize/heading-atx.js
generated
vendored
Normal file
@@ -0,0 +1,129 @@
|
||||
'use strict'
|
||||
|
||||
var markdownLineEnding = require('../character/markdown-line-ending.js')
|
||||
var markdownLineEndingOrSpace = require('../character/markdown-line-ending-or-space.js')
|
||||
var markdownSpace = require('../character/markdown-space.js')
|
||||
var chunkedSplice = require('../util/chunked-splice.js')
|
||||
var factorySpace = require('./factory-space.js')
|
||||
|
||||
var headingAtx = {
|
||||
name: 'headingAtx',
|
||||
tokenize: tokenizeHeadingAtx,
|
||||
resolve: resolveHeadingAtx
|
||||
}
|
||||
|
||||
function resolveHeadingAtx(events, context) {
|
||||
var contentEnd = events.length - 2
|
||||
var contentStart = 3
|
||||
var content
|
||||
var text // Prefix whitespace, part of the opening.
|
||||
|
||||
if (events[contentStart][1].type === 'whitespace') {
|
||||
contentStart += 2
|
||||
} // Suffix whitespace, part of the closing.
|
||||
|
||||
if (
|
||||
contentEnd - 2 > contentStart &&
|
||||
events[contentEnd][1].type === 'whitespace'
|
||||
) {
|
||||
contentEnd -= 2
|
||||
}
|
||||
|
||||
if (
|
||||
events[contentEnd][1].type === 'atxHeadingSequence' &&
|
||||
(contentStart === contentEnd - 1 ||
|
||||
(contentEnd - 4 > contentStart &&
|
||||
events[contentEnd - 2][1].type === 'whitespace'))
|
||||
) {
|
||||
contentEnd -= contentStart + 1 === contentEnd ? 2 : 4
|
||||
}
|
||||
|
||||
if (contentEnd > contentStart) {
|
||||
content = {
|
||||
type: 'atxHeadingText',
|
||||
start: events[contentStart][1].start,
|
||||
end: events[contentEnd][1].end
|
||||
}
|
||||
text = {
|
||||
type: 'chunkText',
|
||||
start: events[contentStart][1].start,
|
||||
end: events[contentEnd][1].end,
|
||||
contentType: 'text'
|
||||
}
|
||||
chunkedSplice(events, contentStart, contentEnd - contentStart + 1, [
|
||||
['enter', content, context],
|
||||
['enter', text, context],
|
||||
['exit', text, context],
|
||||
['exit', content, context]
|
||||
])
|
||||
}
|
||||
|
||||
return events
|
||||
}
|
||||
|
||||
function tokenizeHeadingAtx(effects, ok, nok) {
|
||||
var self = this
|
||||
var size = 0
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
effects.enter('atxHeading')
|
||||
effects.enter('atxHeadingSequence')
|
||||
return fenceOpenInside(code)
|
||||
}
|
||||
|
||||
function fenceOpenInside(code) {
|
||||
if (code === 35 && size++ < 6) {
|
||||
effects.consume(code)
|
||||
return fenceOpenInside
|
||||
}
|
||||
|
||||
if (code === null || markdownLineEndingOrSpace(code)) {
|
||||
effects.exit('atxHeadingSequence')
|
||||
return self.interrupt ? ok(code) : headingBreak(code)
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function headingBreak(code) {
|
||||
if (code === 35) {
|
||||
effects.enter('atxHeadingSequence')
|
||||
return sequence(code)
|
||||
}
|
||||
|
||||
if (code === null || markdownLineEnding(code)) {
|
||||
effects.exit('atxHeading')
|
||||
return ok(code)
|
||||
}
|
||||
|
||||
if (markdownSpace(code)) {
|
||||
return factorySpace(effects, headingBreak, 'whitespace')(code)
|
||||
}
|
||||
|
||||
effects.enter('atxHeadingText')
|
||||
return data(code)
|
||||
}
|
||||
|
||||
function sequence(code) {
|
||||
if (code === 35) {
|
||||
effects.consume(code)
|
||||
return sequence
|
||||
}
|
||||
|
||||
effects.exit('atxHeadingSequence')
|
||||
return headingBreak(code)
|
||||
}
|
||||
|
||||
function data(code) {
|
||||
if (code === null || code === 35 || markdownLineEndingOrSpace(code)) {
|
||||
effects.exit('atxHeadingText')
|
||||
return headingBreak(code)
|
||||
}
|
||||
|
||||
effects.consume(code)
|
||||
return data
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = headingAtx
|
||||
486
node_modules/micromark/dist/tokenize/html-flow.js
generated
vendored
Normal file
486
node_modules/micromark/dist/tokenize/html-flow.js
generated
vendored
Normal file
@@ -0,0 +1,486 @@
|
||||
'use strict'
|
||||
|
||||
var asciiAlpha = require('../character/ascii-alpha.js')
|
||||
var asciiAlphanumeric = require('../character/ascii-alphanumeric.js')
|
||||
var markdownLineEnding = require('../character/markdown-line-ending.js')
|
||||
var markdownLineEndingOrSpace = require('../character/markdown-line-ending-or-space.js')
|
||||
var markdownSpace = require('../character/markdown-space.js')
|
||||
var fromCharCode = require('../constant/from-char-code.js')
|
||||
var htmlBlockNames = require('../constant/html-block-names.js')
|
||||
var htmlRawNames = require('../constant/html-raw-names.js')
|
||||
var partialBlankLine = require('./partial-blank-line.js')
|
||||
|
||||
var htmlFlow = {
|
||||
name: 'htmlFlow',
|
||||
tokenize: tokenizeHtmlFlow,
|
||||
resolveTo: resolveToHtmlFlow,
|
||||
concrete: true
|
||||
}
|
||||
var nextBlankConstruct = {
|
||||
tokenize: tokenizeNextBlank,
|
||||
partial: true
|
||||
}
|
||||
|
||||
function resolveToHtmlFlow(events) {
|
||||
var index = events.length
|
||||
|
||||
while (index--) {
|
||||
if (events[index][0] === 'enter' && events[index][1].type === 'htmlFlow') {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if (index > 1 && events[index - 2][1].type === 'linePrefix') {
|
||||
// Add the prefix start to the HTML token.
|
||||
events[index][1].start = events[index - 2][1].start // Add the prefix start to the HTML line token.
|
||||
|
||||
events[index + 1][1].start = events[index - 2][1].start // Remove the line prefix.
|
||||
|
||||
events.splice(index - 2, 2)
|
||||
}
|
||||
|
||||
return events
|
||||
}
|
||||
|
||||
function tokenizeHtmlFlow(effects, ok, nok) {
|
||||
var self = this
|
||||
var kind
|
||||
var startTag
|
||||
var buffer
|
||||
var index
|
||||
var marker
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
effects.enter('htmlFlow')
|
||||
effects.enter('htmlFlowData')
|
||||
effects.consume(code)
|
||||
return open
|
||||
}
|
||||
|
||||
function open(code) {
|
||||
if (code === 33) {
|
||||
effects.consume(code)
|
||||
return declarationStart
|
||||
}
|
||||
|
||||
if (code === 47) {
|
||||
effects.consume(code)
|
||||
return tagCloseStart
|
||||
}
|
||||
|
||||
if (code === 63) {
|
||||
effects.consume(code)
|
||||
kind = 3 // While we’re in an instruction instead of a declaration, we’re on a `?`
|
||||
// right now, so we do need to search for `>`, similar to declarations.
|
||||
|
||||
return self.interrupt ? ok : continuationDeclarationInside
|
||||
}
|
||||
|
||||
if (asciiAlpha(code)) {
|
||||
effects.consume(code)
|
||||
buffer = fromCharCode(code)
|
||||
startTag = true
|
||||
return tagName
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function declarationStart(code) {
|
||||
if (code === 45) {
|
||||
effects.consume(code)
|
||||
kind = 2
|
||||
return commentOpenInside
|
||||
}
|
||||
|
||||
if (code === 91) {
|
||||
effects.consume(code)
|
||||
kind = 5
|
||||
buffer = 'CDATA['
|
||||
index = 0
|
||||
return cdataOpenInside
|
||||
}
|
||||
|
||||
if (asciiAlpha(code)) {
|
||||
effects.consume(code)
|
||||
kind = 4
|
||||
return self.interrupt ? ok : continuationDeclarationInside
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function commentOpenInside(code) {
|
||||
if (code === 45) {
|
||||
effects.consume(code)
|
||||
return self.interrupt ? ok : continuationDeclarationInside
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function cdataOpenInside(code) {
|
||||
if (code === buffer.charCodeAt(index++)) {
|
||||
effects.consume(code)
|
||||
return index === buffer.length
|
||||
? self.interrupt
|
||||
? ok
|
||||
: continuation
|
||||
: cdataOpenInside
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function tagCloseStart(code) {
|
||||
if (asciiAlpha(code)) {
|
||||
effects.consume(code)
|
||||
buffer = fromCharCode(code)
|
||||
return tagName
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function tagName(code) {
|
||||
if (
|
||||
code === null ||
|
||||
code === 47 ||
|
||||
code === 62 ||
|
||||
markdownLineEndingOrSpace(code)
|
||||
) {
|
||||
if (
|
||||
code !== 47 &&
|
||||
startTag &&
|
||||
htmlRawNames.indexOf(buffer.toLowerCase()) > -1
|
||||
) {
|
||||
kind = 1
|
||||
return self.interrupt ? ok(code) : continuation(code)
|
||||
}
|
||||
|
||||
if (htmlBlockNames.indexOf(buffer.toLowerCase()) > -1) {
|
||||
kind = 6
|
||||
|
||||
if (code === 47) {
|
||||
effects.consume(code)
|
||||
return basicSelfClosing
|
||||
}
|
||||
|
||||
return self.interrupt ? ok(code) : continuation(code)
|
||||
}
|
||||
|
||||
kind = 7 // Do not support complete HTML when interrupting.
|
||||
|
||||
return self.interrupt
|
||||
? nok(code)
|
||||
: startTag
|
||||
? completeAttributeNameBefore(code)
|
||||
: completeClosingTagAfter(code)
|
||||
}
|
||||
|
||||
if (code === 45 || asciiAlphanumeric(code)) {
|
||||
effects.consume(code)
|
||||
buffer += fromCharCode(code)
|
||||
return tagName
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function basicSelfClosing(code) {
|
||||
if (code === 62) {
|
||||
effects.consume(code)
|
||||
return self.interrupt ? ok : continuation
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function completeClosingTagAfter(code) {
|
||||
if (markdownSpace(code)) {
|
||||
effects.consume(code)
|
||||
return completeClosingTagAfter
|
||||
}
|
||||
|
||||
return completeEnd(code)
|
||||
}
|
||||
|
||||
function completeAttributeNameBefore(code) {
|
||||
if (code === 47) {
|
||||
effects.consume(code)
|
||||
return completeEnd
|
||||
}
|
||||
|
||||
if (code === 58 || code === 95 || asciiAlpha(code)) {
|
||||
effects.consume(code)
|
||||
return completeAttributeName
|
||||
}
|
||||
|
||||
if (markdownSpace(code)) {
|
||||
effects.consume(code)
|
||||
return completeAttributeNameBefore
|
||||
}
|
||||
|
||||
return completeEnd(code)
|
||||
}
|
||||
|
||||
function completeAttributeName(code) {
|
||||
if (
|
||||
code === 45 ||
|
||||
code === 46 ||
|
||||
code === 58 ||
|
||||
code === 95 ||
|
||||
asciiAlphanumeric(code)
|
||||
) {
|
||||
effects.consume(code)
|
||||
return completeAttributeName
|
||||
}
|
||||
|
||||
return completeAttributeNameAfter(code)
|
||||
}
|
||||
|
||||
function completeAttributeNameAfter(code) {
|
||||
if (code === 61) {
|
||||
effects.consume(code)
|
||||
return completeAttributeValueBefore
|
||||
}
|
||||
|
||||
if (markdownSpace(code)) {
|
||||
effects.consume(code)
|
||||
return completeAttributeNameAfter
|
||||
}
|
||||
|
||||
return completeAttributeNameBefore(code)
|
||||
}
|
||||
|
||||
function completeAttributeValueBefore(code) {
|
||||
if (
|
||||
code === null ||
|
||||
code === 60 ||
|
||||
code === 61 ||
|
||||
code === 62 ||
|
||||
code === 96
|
||||
) {
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
if (code === 34 || code === 39) {
|
||||
effects.consume(code)
|
||||
marker = code
|
||||
return completeAttributeValueQuoted
|
||||
}
|
||||
|
||||
if (markdownSpace(code)) {
|
||||
effects.consume(code)
|
||||
return completeAttributeValueBefore
|
||||
}
|
||||
|
||||
marker = undefined
|
||||
return completeAttributeValueUnquoted(code)
|
||||
}
|
||||
|
||||
function completeAttributeValueQuoted(code) {
|
||||
if (code === marker) {
|
||||
effects.consume(code)
|
||||
return completeAttributeValueQuotedAfter
|
||||
}
|
||||
|
||||
if (code === null || markdownLineEnding(code)) {
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
effects.consume(code)
|
||||
return completeAttributeValueQuoted
|
||||
}
|
||||
|
||||
function completeAttributeValueUnquoted(code) {
|
||||
if (
|
||||
code === null ||
|
||||
code === 34 ||
|
||||
code === 39 ||
|
||||
code === 60 ||
|
||||
code === 61 ||
|
||||
code === 62 ||
|
||||
code === 96 ||
|
||||
markdownLineEndingOrSpace(code)
|
||||
) {
|
||||
return completeAttributeNameAfter(code)
|
||||
}
|
||||
|
||||
effects.consume(code)
|
||||
return completeAttributeValueUnquoted
|
||||
}
|
||||
|
||||
function completeAttributeValueQuotedAfter(code) {
|
||||
if (code === 47 || code === 62 || markdownSpace(code)) {
|
||||
return completeAttributeNameBefore(code)
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function completeEnd(code) {
|
||||
if (code === 62) {
|
||||
effects.consume(code)
|
||||
return completeAfter
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function completeAfter(code) {
|
||||
if (markdownSpace(code)) {
|
||||
effects.consume(code)
|
||||
return completeAfter
|
||||
}
|
||||
|
||||
return code === null || markdownLineEnding(code)
|
||||
? continuation(code)
|
||||
: nok(code)
|
||||
}
|
||||
|
||||
function continuation(code) {
|
||||
if (code === 45 && kind === 2) {
|
||||
effects.consume(code)
|
||||
return continuationCommentInside
|
||||
}
|
||||
|
||||
if (code === 60 && kind === 1) {
|
||||
effects.consume(code)
|
||||
return continuationRawTagOpen
|
||||
}
|
||||
|
||||
if (code === 62 && kind === 4) {
|
||||
effects.consume(code)
|
||||
return continuationClose
|
||||
}
|
||||
|
||||
if (code === 63 && kind === 3) {
|
||||
effects.consume(code)
|
||||
return continuationDeclarationInside
|
||||
}
|
||||
|
||||
if (code === 93 && kind === 5) {
|
||||
effects.consume(code)
|
||||
return continuationCharacterDataInside
|
||||
}
|
||||
|
||||
if (markdownLineEnding(code) && (kind === 6 || kind === 7)) {
|
||||
return effects.check(
|
||||
nextBlankConstruct,
|
||||
continuationClose,
|
||||
continuationAtLineEnding
|
||||
)(code)
|
||||
}
|
||||
|
||||
if (code === null || markdownLineEnding(code)) {
|
||||
return continuationAtLineEnding(code)
|
||||
}
|
||||
|
||||
effects.consume(code)
|
||||
return continuation
|
||||
}
|
||||
|
||||
function continuationAtLineEnding(code) {
|
||||
effects.exit('htmlFlowData')
|
||||
return htmlContinueStart(code)
|
||||
}
|
||||
|
||||
function htmlContinueStart(code) {
|
||||
if (code === null) {
|
||||
return done(code)
|
||||
}
|
||||
|
||||
if (markdownLineEnding(code)) {
|
||||
effects.enter('lineEnding')
|
||||
effects.consume(code)
|
||||
effects.exit('lineEnding')
|
||||
return htmlContinueStart
|
||||
}
|
||||
|
||||
effects.enter('htmlFlowData')
|
||||
return continuation(code)
|
||||
}
|
||||
|
||||
function continuationCommentInside(code) {
|
||||
if (code === 45) {
|
||||
effects.consume(code)
|
||||
return continuationDeclarationInside
|
||||
}
|
||||
|
||||
return continuation(code)
|
||||
}
|
||||
|
||||
function continuationRawTagOpen(code) {
|
||||
if (code === 47) {
|
||||
effects.consume(code)
|
||||
buffer = ''
|
||||
return continuationRawEndTag
|
||||
}
|
||||
|
||||
return continuation(code)
|
||||
}
|
||||
|
||||
function continuationRawEndTag(code) {
|
||||
if (code === 62 && htmlRawNames.indexOf(buffer.toLowerCase()) > -1) {
|
||||
effects.consume(code)
|
||||
return continuationClose
|
||||
}
|
||||
|
||||
if (asciiAlpha(code) && buffer.length < 8) {
|
||||
effects.consume(code)
|
||||
buffer += fromCharCode(code)
|
||||
return continuationRawEndTag
|
||||
}
|
||||
|
||||
return continuation(code)
|
||||
}
|
||||
|
||||
function continuationCharacterDataInside(code) {
|
||||
if (code === 93) {
|
||||
effects.consume(code)
|
||||
return continuationDeclarationInside
|
||||
}
|
||||
|
||||
return continuation(code)
|
||||
}
|
||||
|
||||
function continuationDeclarationInside(code) {
|
||||
if (code === 62) {
|
||||
effects.consume(code)
|
||||
return continuationClose
|
||||
}
|
||||
|
||||
return continuation(code)
|
||||
}
|
||||
|
||||
function continuationClose(code) {
|
||||
if (code === null || markdownLineEnding(code)) {
|
||||
effects.exit('htmlFlowData')
|
||||
return done(code)
|
||||
}
|
||||
|
||||
effects.consume(code)
|
||||
return continuationClose
|
||||
}
|
||||
|
||||
function done(code) {
|
||||
effects.exit('htmlFlow')
|
||||
return ok(code)
|
||||
}
|
||||
}
|
||||
|
||||
function tokenizeNextBlank(effects, ok, nok) {
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
effects.exit('htmlFlowData')
|
||||
effects.enter('lineEndingBlank')
|
||||
effects.consume(code)
|
||||
effects.exit('lineEndingBlank')
|
||||
return effects.attempt(partialBlankLine, ok, nok)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = htmlFlow
|
||||
435
node_modules/micromark/dist/tokenize/html-text.js
generated
vendored
Normal file
435
node_modules/micromark/dist/tokenize/html-text.js
generated
vendored
Normal file
@@ -0,0 +1,435 @@
|
||||
'use strict'
|
||||
|
||||
var asciiAlpha = require('../character/ascii-alpha.js')
|
||||
var asciiAlphanumeric = require('../character/ascii-alphanumeric.js')
|
||||
var markdownLineEnding = require('../character/markdown-line-ending.js')
|
||||
var markdownLineEndingOrSpace = require('../character/markdown-line-ending-or-space.js')
|
||||
var markdownSpace = require('../character/markdown-space.js')
|
||||
var factorySpace = require('./factory-space.js')
|
||||
|
||||
var htmlText = {
|
||||
name: 'htmlText',
|
||||
tokenize: tokenizeHtmlText
|
||||
}
|
||||
|
||||
function tokenizeHtmlText(effects, ok, nok) {
|
||||
var self = this
|
||||
var marker
|
||||
var buffer
|
||||
var index
|
||||
var returnState
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
effects.enter('htmlText')
|
||||
effects.enter('htmlTextData')
|
||||
effects.consume(code)
|
||||
return open
|
||||
}
|
||||
|
||||
function open(code) {
|
||||
if (code === 33) {
|
||||
effects.consume(code)
|
||||
return declarationOpen
|
||||
}
|
||||
|
||||
if (code === 47) {
|
||||
effects.consume(code)
|
||||
return tagCloseStart
|
||||
}
|
||||
|
||||
if (code === 63) {
|
||||
effects.consume(code)
|
||||
return instruction
|
||||
}
|
||||
|
||||
if (asciiAlpha(code)) {
|
||||
effects.consume(code)
|
||||
return tagOpen
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function declarationOpen(code) {
|
||||
if (code === 45) {
|
||||
effects.consume(code)
|
||||
return commentOpen
|
||||
}
|
||||
|
||||
if (code === 91) {
|
||||
effects.consume(code)
|
||||
buffer = 'CDATA['
|
||||
index = 0
|
||||
return cdataOpen
|
||||
}
|
||||
|
||||
if (asciiAlpha(code)) {
|
||||
effects.consume(code)
|
||||
return declaration
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function commentOpen(code) {
|
||||
if (code === 45) {
|
||||
effects.consume(code)
|
||||
return commentStart
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function commentStart(code) {
|
||||
if (code === null || code === 62) {
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
if (code === 45) {
|
||||
effects.consume(code)
|
||||
return commentStartDash
|
||||
}
|
||||
|
||||
return comment(code)
|
||||
}
|
||||
|
||||
function commentStartDash(code) {
|
||||
if (code === null || code === 62) {
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
return comment(code)
|
||||
}
|
||||
|
||||
function comment(code) {
|
||||
if (code === null) {
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
if (code === 45) {
|
||||
effects.consume(code)
|
||||
return commentClose
|
||||
}
|
||||
|
||||
if (markdownLineEnding(code)) {
|
||||
returnState = comment
|
||||
return atLineEnding(code)
|
||||
}
|
||||
|
||||
effects.consume(code)
|
||||
return comment
|
||||
}
|
||||
|
||||
function commentClose(code) {
|
||||
if (code === 45) {
|
||||
effects.consume(code)
|
||||
return end
|
||||
}
|
||||
|
||||
return comment(code)
|
||||
}
|
||||
|
||||
function cdataOpen(code) {
|
||||
if (code === buffer.charCodeAt(index++)) {
|
||||
effects.consume(code)
|
||||
return index === buffer.length ? cdata : cdataOpen
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function cdata(code) {
|
||||
if (code === null) {
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
if (code === 93) {
|
||||
effects.consume(code)
|
||||
return cdataClose
|
||||
}
|
||||
|
||||
if (markdownLineEnding(code)) {
|
||||
returnState = cdata
|
||||
return atLineEnding(code)
|
||||
}
|
||||
|
||||
effects.consume(code)
|
||||
return cdata
|
||||
}
|
||||
|
||||
function cdataClose(code) {
|
||||
if (code === 93) {
|
||||
effects.consume(code)
|
||||
return cdataEnd
|
||||
}
|
||||
|
||||
return cdata(code)
|
||||
}
|
||||
|
||||
function cdataEnd(code) {
|
||||
if (code === 62) {
|
||||
return end(code)
|
||||
}
|
||||
|
||||
if (code === 93) {
|
||||
effects.consume(code)
|
||||
return cdataEnd
|
||||
}
|
||||
|
||||
return cdata(code)
|
||||
}
|
||||
|
||||
function declaration(code) {
|
||||
if (code === null || code === 62) {
|
||||
return end(code)
|
||||
}
|
||||
|
||||
if (markdownLineEnding(code)) {
|
||||
returnState = declaration
|
||||
return atLineEnding(code)
|
||||
}
|
||||
|
||||
effects.consume(code)
|
||||
return declaration
|
||||
}
|
||||
|
||||
function instruction(code) {
|
||||
if (code === null) {
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
if (code === 63) {
|
||||
effects.consume(code)
|
||||
return instructionClose
|
||||
}
|
||||
|
||||
if (markdownLineEnding(code)) {
|
||||
returnState = instruction
|
||||
return atLineEnding(code)
|
||||
}
|
||||
|
||||
effects.consume(code)
|
||||
return instruction
|
||||
}
|
||||
|
||||
function instructionClose(code) {
|
||||
return code === 62 ? end(code) : instruction(code)
|
||||
}
|
||||
|
||||
function tagCloseStart(code) {
|
||||
if (asciiAlpha(code)) {
|
||||
effects.consume(code)
|
||||
return tagClose
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function tagClose(code) {
|
||||
if (code === 45 || asciiAlphanumeric(code)) {
|
||||
effects.consume(code)
|
||||
return tagClose
|
||||
}
|
||||
|
||||
return tagCloseBetween(code)
|
||||
}
|
||||
|
||||
function tagCloseBetween(code) {
|
||||
if (markdownLineEnding(code)) {
|
||||
returnState = tagCloseBetween
|
||||
return atLineEnding(code)
|
||||
}
|
||||
|
||||
if (markdownSpace(code)) {
|
||||
effects.consume(code)
|
||||
return tagCloseBetween
|
||||
}
|
||||
|
||||
return end(code)
|
||||
}
|
||||
|
||||
function tagOpen(code) {
|
||||
if (code === 45 || asciiAlphanumeric(code)) {
|
||||
effects.consume(code)
|
||||
return tagOpen
|
||||
}
|
||||
|
||||
if (code === 47 || code === 62 || markdownLineEndingOrSpace(code)) {
|
||||
return tagOpenBetween(code)
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function tagOpenBetween(code) {
|
||||
if (code === 47) {
|
||||
effects.consume(code)
|
||||
return end
|
||||
}
|
||||
|
||||
if (code === 58 || code === 95 || asciiAlpha(code)) {
|
||||
effects.consume(code)
|
||||
return tagOpenAttributeName
|
||||
}
|
||||
|
||||
if (markdownLineEnding(code)) {
|
||||
returnState = tagOpenBetween
|
||||
return atLineEnding(code)
|
||||
}
|
||||
|
||||
if (markdownSpace(code)) {
|
||||
effects.consume(code)
|
||||
return tagOpenBetween
|
||||
}
|
||||
|
||||
return end(code)
|
||||
}
|
||||
|
||||
function tagOpenAttributeName(code) {
|
||||
if (
|
||||
code === 45 ||
|
||||
code === 46 ||
|
||||
code === 58 ||
|
||||
code === 95 ||
|
||||
asciiAlphanumeric(code)
|
||||
) {
|
||||
effects.consume(code)
|
||||
return tagOpenAttributeName
|
||||
}
|
||||
|
||||
return tagOpenAttributeNameAfter(code)
|
||||
}
|
||||
|
||||
function tagOpenAttributeNameAfter(code) {
|
||||
if (code === 61) {
|
||||
effects.consume(code)
|
||||
return tagOpenAttributeValueBefore
|
||||
}
|
||||
|
||||
if (markdownLineEnding(code)) {
|
||||
returnState = tagOpenAttributeNameAfter
|
||||
return atLineEnding(code)
|
||||
}
|
||||
|
||||
if (markdownSpace(code)) {
|
||||
effects.consume(code)
|
||||
return tagOpenAttributeNameAfter
|
||||
}
|
||||
|
||||
return tagOpenBetween(code)
|
||||
}
|
||||
|
||||
function tagOpenAttributeValueBefore(code) {
|
||||
if (
|
||||
code === null ||
|
||||
code === 60 ||
|
||||
code === 61 ||
|
||||
code === 62 ||
|
||||
code === 96
|
||||
) {
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
if (code === 34 || code === 39) {
|
||||
effects.consume(code)
|
||||
marker = code
|
||||
return tagOpenAttributeValueQuoted
|
||||
}
|
||||
|
||||
if (markdownLineEnding(code)) {
|
||||
returnState = tagOpenAttributeValueBefore
|
||||
return atLineEnding(code)
|
||||
}
|
||||
|
||||
if (markdownSpace(code)) {
|
||||
effects.consume(code)
|
||||
return tagOpenAttributeValueBefore
|
||||
}
|
||||
|
||||
effects.consume(code)
|
||||
marker = undefined
|
||||
return tagOpenAttributeValueUnquoted
|
||||
}
|
||||
|
||||
function tagOpenAttributeValueQuoted(code) {
|
||||
if (code === marker) {
|
||||
effects.consume(code)
|
||||
return tagOpenAttributeValueQuotedAfter
|
||||
}
|
||||
|
||||
if (code === null) {
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
if (markdownLineEnding(code)) {
|
||||
returnState = tagOpenAttributeValueQuoted
|
||||
return atLineEnding(code)
|
||||
}
|
||||
|
||||
effects.consume(code)
|
||||
return tagOpenAttributeValueQuoted
|
||||
}
|
||||
|
||||
function tagOpenAttributeValueQuotedAfter(code) {
|
||||
if (code === 62 || code === 47 || markdownLineEndingOrSpace(code)) {
|
||||
return tagOpenBetween(code)
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function tagOpenAttributeValueUnquoted(code) {
|
||||
if (
|
||||
code === null ||
|
||||
code === 34 ||
|
||||
code === 39 ||
|
||||
code === 60 ||
|
||||
code === 61 ||
|
||||
code === 96
|
||||
) {
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
if (code === 62 || markdownLineEndingOrSpace(code)) {
|
||||
return tagOpenBetween(code)
|
||||
}
|
||||
|
||||
effects.consume(code)
|
||||
return tagOpenAttributeValueUnquoted
|
||||
} // We can’t have blank lines in content, so no need to worry about empty
|
||||
// tokens.
|
||||
|
||||
function atLineEnding(code) {
|
||||
effects.exit('htmlTextData')
|
||||
effects.enter('lineEnding')
|
||||
effects.consume(code)
|
||||
effects.exit('lineEnding')
|
||||
return factorySpace(
|
||||
effects,
|
||||
afterPrefix,
|
||||
'linePrefix',
|
||||
self.parser.constructs.disable.null.indexOf('codeIndented') > -1
|
||||
? undefined
|
||||
: 4
|
||||
)
|
||||
}
|
||||
|
||||
function afterPrefix(code) {
|
||||
effects.enter('htmlTextData')
|
||||
return returnState(code)
|
||||
}
|
||||
|
||||
function end(code) {
|
||||
if (code === 62) {
|
||||
effects.consume(code)
|
||||
effects.exit('htmlTextData')
|
||||
effects.exit('htmlText')
|
||||
return ok
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = htmlText
|
||||
330
node_modules/micromark/dist/tokenize/label-end.js
generated
vendored
Normal file
330
node_modules/micromark/dist/tokenize/label-end.js
generated
vendored
Normal file
@@ -0,0 +1,330 @@
|
||||
'use strict'
|
||||
|
||||
var markdownLineEndingOrSpace = require('../character/markdown-line-ending-or-space.js')
|
||||
var chunkedPush = require('../util/chunked-push.js')
|
||||
var chunkedSplice = require('../util/chunked-splice.js')
|
||||
var normalizeIdentifier = require('../util/normalize-identifier.js')
|
||||
var resolveAll = require('../util/resolve-all.js')
|
||||
var shallow = require('../util/shallow.js')
|
||||
var factoryDestination = require('./factory-destination.js')
|
||||
var factoryLabel = require('./factory-label.js')
|
||||
var factoryTitle = require('./factory-title.js')
|
||||
var factoryWhitespace = require('./factory-whitespace.js')
|
||||
|
||||
var labelEnd = {
|
||||
name: 'labelEnd',
|
||||
tokenize: tokenizeLabelEnd,
|
||||
resolveTo: resolveToLabelEnd,
|
||||
resolveAll: resolveAllLabelEnd
|
||||
}
|
||||
var resourceConstruct = {
|
||||
tokenize: tokenizeResource
|
||||
}
|
||||
var fullReferenceConstruct = {
|
||||
tokenize: tokenizeFullReference
|
||||
}
|
||||
var collapsedReferenceConstruct = {
|
||||
tokenize: tokenizeCollapsedReference
|
||||
}
|
||||
|
||||
function resolveAllLabelEnd(events) {
|
||||
var index = -1
|
||||
var token
|
||||
|
||||
while (++index < events.length) {
|
||||
token = events[index][1]
|
||||
|
||||
if (
|
||||
!token._used &&
|
||||
(token.type === 'labelImage' ||
|
||||
token.type === 'labelLink' ||
|
||||
token.type === 'labelEnd')
|
||||
) {
|
||||
// Remove the marker.
|
||||
events.splice(index + 1, token.type === 'labelImage' ? 4 : 2)
|
||||
token.type = 'data'
|
||||
index++
|
||||
}
|
||||
}
|
||||
|
||||
return events
|
||||
}
|
||||
|
||||
function resolveToLabelEnd(events, context) {
|
||||
var index = events.length
|
||||
var offset = 0
|
||||
var group
|
||||
var label
|
||||
var text
|
||||
var token
|
||||
var open
|
||||
var close
|
||||
var media // Find an opening.
|
||||
|
||||
while (index--) {
|
||||
token = events[index][1]
|
||||
|
||||
if (open) {
|
||||
// If we see another link, or inactive link label, we’ve been here before.
|
||||
if (
|
||||
token.type === 'link' ||
|
||||
(token.type === 'labelLink' && token._inactive)
|
||||
) {
|
||||
break
|
||||
} // Mark other link openings as inactive, as we can’t have links in
|
||||
// links.
|
||||
|
||||
if (events[index][0] === 'enter' && token.type === 'labelLink') {
|
||||
token._inactive = true
|
||||
}
|
||||
} else if (close) {
|
||||
if (
|
||||
events[index][0] === 'enter' &&
|
||||
(token.type === 'labelImage' || token.type === 'labelLink') &&
|
||||
!token._balanced
|
||||
) {
|
||||
open = index
|
||||
|
||||
if (token.type !== 'labelLink') {
|
||||
offset = 2
|
||||
break
|
||||
}
|
||||
}
|
||||
} else if (token.type === 'labelEnd') {
|
||||
close = index
|
||||
}
|
||||
}
|
||||
|
||||
group = {
|
||||
type: events[open][1].type === 'labelLink' ? 'link' : 'image',
|
||||
start: shallow(events[open][1].start),
|
||||
end: shallow(events[events.length - 1][1].end)
|
||||
}
|
||||
label = {
|
||||
type: 'label',
|
||||
start: shallow(events[open][1].start),
|
||||
end: shallow(events[close][1].end)
|
||||
}
|
||||
text = {
|
||||
type: 'labelText',
|
||||
start: shallow(events[open + offset + 2][1].end),
|
||||
end: shallow(events[close - 2][1].start)
|
||||
}
|
||||
media = [
|
||||
['enter', group, context],
|
||||
['enter', label, context]
|
||||
] // Opening marker.
|
||||
|
||||
media = chunkedPush(media, events.slice(open + 1, open + offset + 3)) // Text open.
|
||||
|
||||
media = chunkedPush(media, [['enter', text, context]]) // Between.
|
||||
|
||||
media = chunkedPush(
|
||||
media,
|
||||
resolveAll(
|
||||
context.parser.constructs.insideSpan.null,
|
||||
events.slice(open + offset + 4, close - 3),
|
||||
context
|
||||
)
|
||||
) // Text close, marker close, label close.
|
||||
|
||||
media = chunkedPush(media, [
|
||||
['exit', text, context],
|
||||
events[close - 2],
|
||||
events[close - 1],
|
||||
['exit', label, context]
|
||||
]) // Reference, resource, or so.
|
||||
|
||||
media = chunkedPush(media, events.slice(close + 1)) // Media close.
|
||||
|
||||
media = chunkedPush(media, [['exit', group, context]])
|
||||
chunkedSplice(events, open, events.length, media)
|
||||
return events
|
||||
}
|
||||
|
||||
function tokenizeLabelEnd(effects, ok, nok) {
|
||||
var self = this
|
||||
var index = self.events.length
|
||||
var labelStart
|
||||
var defined // Find an opening.
|
||||
|
||||
while (index--) {
|
||||
if (
|
||||
(self.events[index][1].type === 'labelImage' ||
|
||||
self.events[index][1].type === 'labelLink') &&
|
||||
!self.events[index][1]._balanced
|
||||
) {
|
||||
labelStart = self.events[index][1]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
if (!labelStart) {
|
||||
return nok(code)
|
||||
} // It’s a balanced bracket, but contains a link.
|
||||
|
||||
if (labelStart._inactive) return balanced(code)
|
||||
defined =
|
||||
self.parser.defined.indexOf(
|
||||
normalizeIdentifier(
|
||||
self.sliceSerialize({
|
||||
start: labelStart.end,
|
||||
end: self.now()
|
||||
})
|
||||
)
|
||||
) > -1
|
||||
effects.enter('labelEnd')
|
||||
effects.enter('labelMarker')
|
||||
effects.consume(code)
|
||||
effects.exit('labelMarker')
|
||||
effects.exit('labelEnd')
|
||||
return afterLabelEnd
|
||||
}
|
||||
|
||||
function afterLabelEnd(code) {
|
||||
// Resource: `[asd](fgh)`.
|
||||
if (code === 40) {
|
||||
return effects.attempt(
|
||||
resourceConstruct,
|
||||
ok,
|
||||
defined ? ok : balanced
|
||||
)(code)
|
||||
} // Collapsed (`[asd][]`) or full (`[asd][fgh]`) reference?
|
||||
|
||||
if (code === 91) {
|
||||
return effects.attempt(
|
||||
fullReferenceConstruct,
|
||||
ok,
|
||||
defined
|
||||
? effects.attempt(collapsedReferenceConstruct, ok, balanced)
|
||||
: balanced
|
||||
)(code)
|
||||
} // Shortcut reference: `[asd]`?
|
||||
|
||||
return defined ? ok(code) : balanced(code)
|
||||
}
|
||||
|
||||
function balanced(code) {
|
||||
labelStart._balanced = true
|
||||
return nok(code)
|
||||
}
|
||||
}
|
||||
|
||||
function tokenizeResource(effects, ok, nok) {
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
effects.enter('resource')
|
||||
effects.enter('resourceMarker')
|
||||
effects.consume(code)
|
||||
effects.exit('resourceMarker')
|
||||
return factoryWhitespace(effects, open)
|
||||
}
|
||||
|
||||
function open(code) {
|
||||
if (code === 41) {
|
||||
return end(code)
|
||||
}
|
||||
|
||||
return factoryDestination(
|
||||
effects,
|
||||
destinationAfter,
|
||||
nok,
|
||||
'resourceDestination',
|
||||
'resourceDestinationLiteral',
|
||||
'resourceDestinationLiteralMarker',
|
||||
'resourceDestinationRaw',
|
||||
'resourceDestinationString',
|
||||
3
|
||||
)(code)
|
||||
}
|
||||
|
||||
function destinationAfter(code) {
|
||||
return markdownLineEndingOrSpace(code)
|
||||
? factoryWhitespace(effects, between)(code)
|
||||
: end(code)
|
||||
}
|
||||
|
||||
function between(code) {
|
||||
if (code === 34 || code === 39 || code === 40) {
|
||||
return factoryTitle(
|
||||
effects,
|
||||
factoryWhitespace(effects, end),
|
||||
nok,
|
||||
'resourceTitle',
|
||||
'resourceTitleMarker',
|
||||
'resourceTitleString'
|
||||
)(code)
|
||||
}
|
||||
|
||||
return end(code)
|
||||
}
|
||||
|
||||
function end(code) {
|
||||
if (code === 41) {
|
||||
effects.enter('resourceMarker')
|
||||
effects.consume(code)
|
||||
effects.exit('resourceMarker')
|
||||
effects.exit('resource')
|
||||
return ok
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
}
|
||||
|
||||
function tokenizeFullReference(effects, ok, nok) {
|
||||
var self = this
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
return factoryLabel.call(
|
||||
self,
|
||||
effects,
|
||||
afterLabel,
|
||||
nok,
|
||||
'reference',
|
||||
'referenceMarker',
|
||||
'referenceString'
|
||||
)(code)
|
||||
}
|
||||
|
||||
function afterLabel(code) {
|
||||
return self.parser.defined.indexOf(
|
||||
normalizeIdentifier(
|
||||
self.sliceSerialize(self.events[self.events.length - 1][1]).slice(1, -1)
|
||||
)
|
||||
) < 0
|
||||
? nok(code)
|
||||
: ok(code)
|
||||
}
|
||||
}
|
||||
|
||||
function tokenizeCollapsedReference(effects, ok, nok) {
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
effects.enter('reference')
|
||||
effects.enter('referenceMarker')
|
||||
effects.consume(code)
|
||||
effects.exit('referenceMarker')
|
||||
return open
|
||||
}
|
||||
|
||||
function open(code) {
|
||||
if (code === 93) {
|
||||
effects.enter('referenceMarker')
|
||||
effects.consume(code)
|
||||
effects.exit('referenceMarker')
|
||||
effects.exit('reference')
|
||||
return ok
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = labelEnd
|
||||
46
node_modules/micromark/dist/tokenize/label-start-image.js
generated
vendored
Normal file
46
node_modules/micromark/dist/tokenize/label-start-image.js
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
'use strict'
|
||||
|
||||
var labelEnd = require('./label-end.js')
|
||||
|
||||
var labelStartImage = {
|
||||
name: 'labelStartImage',
|
||||
tokenize: tokenizeLabelStartImage,
|
||||
resolveAll: labelEnd.resolveAll
|
||||
}
|
||||
|
||||
function tokenizeLabelStartImage(effects, ok, nok) {
|
||||
var self = this
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
effects.enter('labelImage')
|
||||
effects.enter('labelImageMarker')
|
||||
effects.consume(code)
|
||||
effects.exit('labelImageMarker')
|
||||
return open
|
||||
}
|
||||
|
||||
function open(code) {
|
||||
if (code === 91) {
|
||||
effects.enter('labelMarker')
|
||||
effects.consume(code)
|
||||
effects.exit('labelMarker')
|
||||
effects.exit('labelImage')
|
||||
return after
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function after(code) {
|
||||
/* c8 ignore next */
|
||||
return code === 94 &&
|
||||
/* c8 ignore next */
|
||||
'_hiddenFootnoteSupport' in self.parser.constructs
|
||||
? /* c8 ignore next */
|
||||
nok(code)
|
||||
: ok(code)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = labelStartImage
|
||||
35
node_modules/micromark/dist/tokenize/label-start-link.js
generated
vendored
Normal file
35
node_modules/micromark/dist/tokenize/label-start-link.js
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
'use strict'
|
||||
|
||||
var labelEnd = require('./label-end.js')
|
||||
|
||||
var labelStartLink = {
|
||||
name: 'labelStartLink',
|
||||
tokenize: tokenizeLabelStartLink,
|
||||
resolveAll: labelEnd.resolveAll
|
||||
}
|
||||
|
||||
function tokenizeLabelStartLink(effects, ok, nok) {
|
||||
var self = this
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
effects.enter('labelLink')
|
||||
effects.enter('labelMarker')
|
||||
effects.consume(code)
|
||||
effects.exit('labelMarker')
|
||||
effects.exit('labelLink')
|
||||
return after
|
||||
}
|
||||
|
||||
function after(code) {
|
||||
/* c8 ignore next */
|
||||
return code === 94 &&
|
||||
/* c8 ignore next */
|
||||
'_hiddenFootnoteSupport' in self.parser.constructs
|
||||
? /* c8 ignore next */
|
||||
nok(code)
|
||||
: ok(code)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = labelStartLink
|
||||
21
node_modules/micromark/dist/tokenize/line-ending.js
generated
vendored
Normal file
21
node_modules/micromark/dist/tokenize/line-ending.js
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
'use strict'
|
||||
|
||||
var factorySpace = require('./factory-space.js')
|
||||
|
||||
var lineEnding = {
|
||||
name: 'lineEnding',
|
||||
tokenize: tokenizeLineEnding
|
||||
}
|
||||
|
||||
function tokenizeLineEnding(effects, ok) {
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
effects.enter('lineEnding')
|
||||
effects.consume(code)
|
||||
effects.exit('lineEnding')
|
||||
return factorySpace(effects, ok, 'linePrefix')
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = lineEnding
|
||||
214
node_modules/micromark/dist/tokenize/list.js
generated
vendored
Normal file
214
node_modules/micromark/dist/tokenize/list.js
generated
vendored
Normal file
@@ -0,0 +1,214 @@
|
||||
'use strict'
|
||||
|
||||
var asciiDigit = require('../character/ascii-digit.js')
|
||||
var markdownSpace = require('../character/markdown-space.js')
|
||||
var prefixSize = require('../util/prefix-size.js')
|
||||
var sizeChunks = require('../util/size-chunks.js')
|
||||
var factorySpace = require('./factory-space.js')
|
||||
var partialBlankLine = require('./partial-blank-line.js')
|
||||
var thematicBreak = require('./thematic-break.js')
|
||||
|
||||
var list = {
|
||||
name: 'list',
|
||||
tokenize: tokenizeListStart,
|
||||
continuation: {
|
||||
tokenize: tokenizeListContinuation
|
||||
},
|
||||
exit: tokenizeListEnd
|
||||
}
|
||||
var listItemPrefixWhitespaceConstruct = {
|
||||
tokenize: tokenizeListItemPrefixWhitespace,
|
||||
partial: true
|
||||
}
|
||||
var indentConstruct = {
|
||||
tokenize: tokenizeIndent,
|
||||
partial: true
|
||||
}
|
||||
|
||||
function tokenizeListStart(effects, ok, nok) {
|
||||
var self = this
|
||||
var initialSize = prefixSize(self.events, 'linePrefix')
|
||||
var size = 0
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
var kind =
|
||||
self.containerState.type ||
|
||||
(code === 42 || code === 43 || code === 45
|
||||
? 'listUnordered'
|
||||
: 'listOrdered')
|
||||
|
||||
if (
|
||||
kind === 'listUnordered'
|
||||
? !self.containerState.marker || code === self.containerState.marker
|
||||
: asciiDigit(code)
|
||||
) {
|
||||
if (!self.containerState.type) {
|
||||
self.containerState.type = kind
|
||||
effects.enter(kind, {
|
||||
_container: true
|
||||
})
|
||||
}
|
||||
|
||||
if (kind === 'listUnordered') {
|
||||
effects.enter('listItemPrefix')
|
||||
return code === 42 || code === 45
|
||||
? effects.check(thematicBreak, nok, atMarker)(code)
|
||||
: atMarker(code)
|
||||
}
|
||||
|
||||
if (!self.interrupt || code === 49) {
|
||||
effects.enter('listItemPrefix')
|
||||
effects.enter('listItemValue')
|
||||
return inside(code)
|
||||
}
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function inside(code) {
|
||||
if (asciiDigit(code) && ++size < 10) {
|
||||
effects.consume(code)
|
||||
return inside
|
||||
}
|
||||
|
||||
if (
|
||||
(!self.interrupt || size < 2) &&
|
||||
(self.containerState.marker
|
||||
? code === self.containerState.marker
|
||||
: code === 41 || code === 46)
|
||||
) {
|
||||
effects.exit('listItemValue')
|
||||
return atMarker(code)
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function atMarker(code) {
|
||||
effects.enter('listItemMarker')
|
||||
effects.consume(code)
|
||||
effects.exit('listItemMarker')
|
||||
self.containerState.marker = self.containerState.marker || code
|
||||
return effects.check(
|
||||
partialBlankLine, // Can’t be empty when interrupting.
|
||||
self.interrupt ? nok : onBlank,
|
||||
effects.attempt(
|
||||
listItemPrefixWhitespaceConstruct,
|
||||
endOfPrefix,
|
||||
otherPrefix
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
function onBlank(code) {
|
||||
self.containerState.initialBlankLine = true
|
||||
initialSize++
|
||||
return endOfPrefix(code)
|
||||
}
|
||||
|
||||
function otherPrefix(code) {
|
||||
if (markdownSpace(code)) {
|
||||
effects.enter('listItemPrefixWhitespace')
|
||||
effects.consume(code)
|
||||
effects.exit('listItemPrefixWhitespace')
|
||||
return endOfPrefix
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function endOfPrefix(code) {
|
||||
self.containerState.size =
|
||||
initialSize + sizeChunks(self.sliceStream(effects.exit('listItemPrefix')))
|
||||
return ok(code)
|
||||
}
|
||||
}
|
||||
|
||||
function tokenizeListContinuation(effects, ok, nok) {
|
||||
var self = this
|
||||
self.containerState._closeFlow = undefined
|
||||
return effects.check(partialBlankLine, onBlank, notBlank)
|
||||
|
||||
function onBlank(code) {
|
||||
self.containerState.furtherBlankLines =
|
||||
self.containerState.furtherBlankLines ||
|
||||
self.containerState.initialBlankLine // We have a blank line.
|
||||
// Still, try to consume at most the items size.
|
||||
|
||||
return factorySpace(
|
||||
effects,
|
||||
ok,
|
||||
'listItemIndent',
|
||||
self.containerState.size + 1
|
||||
)(code)
|
||||
}
|
||||
|
||||
function notBlank(code) {
|
||||
if (self.containerState.furtherBlankLines || !markdownSpace(code)) {
|
||||
self.containerState.furtherBlankLines = self.containerState.initialBlankLine = undefined
|
||||
return notInCurrentItem(code)
|
||||
}
|
||||
|
||||
self.containerState.furtherBlankLines = self.containerState.initialBlankLine = undefined
|
||||
return effects.attempt(indentConstruct, ok, notInCurrentItem)(code)
|
||||
}
|
||||
|
||||
function notInCurrentItem(code) {
|
||||
// While we do continue, we signal that the flow should be closed.
|
||||
self.containerState._closeFlow = true // As we’re closing flow, we’re no longer interrupting.
|
||||
|
||||
self.interrupt = undefined
|
||||
return factorySpace(
|
||||
effects,
|
||||
effects.attempt(list, ok, nok),
|
||||
'linePrefix',
|
||||
self.parser.constructs.disable.null.indexOf('codeIndented') > -1
|
||||
? undefined
|
||||
: 4
|
||||
)(code)
|
||||
}
|
||||
}
|
||||
|
||||
function tokenizeIndent(effects, ok, nok) {
|
||||
var self = this
|
||||
return factorySpace(
|
||||
effects,
|
||||
afterPrefix,
|
||||
'listItemIndent',
|
||||
self.containerState.size + 1
|
||||
)
|
||||
|
||||
function afterPrefix(code) {
|
||||
return prefixSize(self.events, 'listItemIndent') ===
|
||||
self.containerState.size
|
||||
? ok(code)
|
||||
: nok(code)
|
||||
}
|
||||
}
|
||||
|
||||
function tokenizeListEnd(effects) {
|
||||
effects.exit(this.containerState.type)
|
||||
}
|
||||
|
||||
function tokenizeListItemPrefixWhitespace(effects, ok, nok) {
|
||||
var self = this
|
||||
return factorySpace(
|
||||
effects,
|
||||
afterPrefix,
|
||||
'listItemPrefixWhitespace',
|
||||
self.parser.constructs.disable.null.indexOf('codeIndented') > -1
|
||||
? undefined
|
||||
: 4 + 1
|
||||
)
|
||||
|
||||
function afterPrefix(code) {
|
||||
return markdownSpace(code) ||
|
||||
!prefixSize(self.events, 'listItemPrefixWhitespace')
|
||||
? nok(code)
|
||||
: ok(code)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = list
|
||||
19
node_modules/micromark/dist/tokenize/partial-blank-line.js
generated
vendored
Normal file
19
node_modules/micromark/dist/tokenize/partial-blank-line.js
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
'use strict'
|
||||
|
||||
var markdownLineEnding = require('../character/markdown-line-ending.js')
|
||||
var factorySpace = require('./factory-space.js')
|
||||
|
||||
var partialBlankLine = {
|
||||
tokenize: tokenizePartialBlankLine,
|
||||
partial: true
|
||||
}
|
||||
|
||||
function tokenizePartialBlankLine(effects, ok, nok) {
|
||||
return factorySpace(effects, afterWhitespace, 'linePrefix')
|
||||
|
||||
function afterWhitespace(code) {
|
||||
return code === null || markdownLineEnding(code) ? ok(code) : nok(code)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = partialBlankLine
|
||||
117
node_modules/micromark/dist/tokenize/setext-underline.js
generated
vendored
Normal file
117
node_modules/micromark/dist/tokenize/setext-underline.js
generated
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
'use strict'
|
||||
|
||||
var markdownLineEnding = require('../character/markdown-line-ending.js')
|
||||
var shallow = require('../util/shallow.js')
|
||||
var factorySpace = require('./factory-space.js')
|
||||
|
||||
var setextUnderline = {
|
||||
name: 'setextUnderline',
|
||||
tokenize: tokenizeSetextUnderline,
|
||||
resolveTo: resolveToSetextUnderline
|
||||
}
|
||||
|
||||
function resolveToSetextUnderline(events, context) {
|
||||
var index = events.length
|
||||
var content
|
||||
var text
|
||||
var definition
|
||||
var heading // Find the opening of the content.
|
||||
// It’ll always exist: we don’t tokenize if it isn’t there.
|
||||
|
||||
while (index--) {
|
||||
if (events[index][0] === 'enter') {
|
||||
if (events[index][1].type === 'content') {
|
||||
content = index
|
||||
break
|
||||
}
|
||||
|
||||
if (events[index][1].type === 'paragraph') {
|
||||
text = index
|
||||
}
|
||||
} // Exit
|
||||
else {
|
||||
if (events[index][1].type === 'content') {
|
||||
// Remove the content end (if needed we’ll add it later)
|
||||
events.splice(index, 1)
|
||||
}
|
||||
|
||||
if (!definition && events[index][1].type === 'definition') {
|
||||
definition = index
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
heading = {
|
||||
type: 'setextHeading',
|
||||
start: shallow(events[text][1].start),
|
||||
end: shallow(events[events.length - 1][1].end)
|
||||
} // Change the paragraph to setext heading text.
|
||||
|
||||
events[text][1].type = 'setextHeadingText' // If we have definitions in the content, we’ll keep on having content,
|
||||
// but we need move it.
|
||||
|
||||
if (definition) {
|
||||
events.splice(text, 0, ['enter', heading, context])
|
||||
events.splice(definition + 1, 0, ['exit', events[content][1], context])
|
||||
events[content][1].end = shallow(events[definition][1].end)
|
||||
} else {
|
||||
events[content][1] = heading
|
||||
} // Add the heading exit at the end.
|
||||
|
||||
events.push(['exit', heading, context])
|
||||
return events
|
||||
}
|
||||
|
||||
function tokenizeSetextUnderline(effects, ok, nok) {
|
||||
var self = this
|
||||
var index = self.events.length
|
||||
var marker
|
||||
var paragraph // Find an opening.
|
||||
|
||||
while (index--) {
|
||||
// Skip enter/exit of line ending, line prefix, and content.
|
||||
// We can now either have a definition or a paragraph.
|
||||
if (
|
||||
self.events[index][1].type !== 'lineEnding' &&
|
||||
self.events[index][1].type !== 'linePrefix' &&
|
||||
self.events[index][1].type !== 'content'
|
||||
) {
|
||||
paragraph = self.events[index][1].type === 'paragraph'
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
if (!self.lazy && (self.interrupt || paragraph)) {
|
||||
effects.enter('setextHeadingLine')
|
||||
effects.enter('setextHeadingLineSequence')
|
||||
marker = code
|
||||
return closingSequence(code)
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
function closingSequence(code) {
|
||||
if (code === marker) {
|
||||
effects.consume(code)
|
||||
return closingSequence
|
||||
}
|
||||
|
||||
effects.exit('setextHeadingLineSequence')
|
||||
return factorySpace(effects, closingSequenceEnd, 'lineSuffix')(code)
|
||||
}
|
||||
|
||||
function closingSequenceEnd(code) {
|
||||
if (code === null || markdownLineEnding(code)) {
|
||||
effects.exit('setextHeadingLine')
|
||||
return ok(code)
|
||||
}
|
||||
|
||||
return nok(code)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = setextUnderline
|
||||
53
node_modules/micromark/dist/tokenize/thematic-break.js
generated
vendored
Normal file
53
node_modules/micromark/dist/tokenize/thematic-break.js
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
'use strict'
|
||||
|
||||
var markdownLineEnding = require('../character/markdown-line-ending.js')
|
||||
var markdownSpace = require('../character/markdown-space.js')
|
||||
var factorySpace = require('./factory-space.js')
|
||||
|
||||
var thematicBreak = {
|
||||
name: 'thematicBreak',
|
||||
tokenize: tokenizeThematicBreak
|
||||
}
|
||||
|
||||
function tokenizeThematicBreak(effects, ok, nok) {
|
||||
var size = 0
|
||||
var marker
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
effects.enter('thematicBreak')
|
||||
marker = code
|
||||
return atBreak(code)
|
||||
}
|
||||
|
||||
function atBreak(code) {
|
||||
if (code === marker) {
|
||||
effects.enter('thematicBreakSequence')
|
||||
return sequence(code)
|
||||
}
|
||||
|
||||
if (markdownSpace(code)) {
|
||||
return factorySpace(effects, atBreak, 'whitespace')(code)
|
||||
}
|
||||
|
||||
if (size < 3 || (code !== null && !markdownLineEnding(code))) {
|
||||
return nok(code)
|
||||
}
|
||||
|
||||
effects.exit('thematicBreak')
|
||||
return ok(code)
|
||||
}
|
||||
|
||||
function sequence(code) {
|
||||
if (code === marker) {
|
||||
effects.consume(code)
|
||||
size++
|
||||
return sequence
|
||||
}
|
||||
|
||||
effects.exit('thematicBreakSequence')
|
||||
return atBreak(code)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = thematicBreak
|
||||
14
node_modules/micromark/dist/util/chunked-push.js
generated
vendored
Normal file
14
node_modules/micromark/dist/util/chunked-push.js
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
'use strict'
|
||||
|
||||
var chunkedSplice = require('./chunked-splice.js')
|
||||
|
||||
function chunkedPush(list, items) {
|
||||
if (list.length) {
|
||||
chunkedSplice(list, list.length, 0, items)
|
||||
return list
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
module.exports = chunkedPush
|
||||
38
node_modules/micromark/dist/util/chunked-splice.js
generated
vendored
Normal file
38
node_modules/micromark/dist/util/chunked-splice.js
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
'use strict'
|
||||
|
||||
var splice = require('../constant/splice.js')
|
||||
|
||||
// causes a stack overflow in V8 when trying to insert 100k items for instance.
|
||||
|
||||
function chunkedSplice(list, start, remove, items) {
|
||||
var end = list.length
|
||||
var chunkStart = 0
|
||||
var parameters // Make start between zero and `end` (included).
|
||||
|
||||
if (start < 0) {
|
||||
start = -start > end ? 0 : end + start
|
||||
} else {
|
||||
start = start > end ? end : start
|
||||
}
|
||||
|
||||
remove = remove > 0 ? remove : 0 // No need to chunk the items if there’s only a couple (10k) items.
|
||||
|
||||
if (items.length < 10000) {
|
||||
parameters = Array.from(items)
|
||||
parameters.unshift(start, remove)
|
||||
splice.apply(list, parameters)
|
||||
} else {
|
||||
// Delete `remove` items starting from `start`
|
||||
if (remove) splice.apply(list, [start, remove]) // Insert the items in chunks to not cause stack overflows.
|
||||
|
||||
while (chunkStart < items.length) {
|
||||
parameters = items.slice(chunkStart, chunkStart + 10000)
|
||||
parameters.unshift(start, 0)
|
||||
splice.apply(list, parameters)
|
||||
chunkStart += 10000
|
||||
start += 10000
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = chunkedSplice
|
||||
25
node_modules/micromark/dist/util/classify-character.js
generated
vendored
Normal file
25
node_modules/micromark/dist/util/classify-character.js
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
'use strict'
|
||||
|
||||
var markdownLineEndingOrSpace = require('../character/markdown-line-ending-or-space.js')
|
||||
var unicodePunctuation = require('../character/unicode-punctuation.js')
|
||||
var unicodeWhitespace = require('../character/unicode-whitespace.js')
|
||||
|
||||
// Classify whether a character is unicode whitespace, unicode punctuation, or
|
||||
// anything else.
|
||||
// Used for attention (emphasis, strong), whose sequences can open or close
|
||||
// based on the class of surrounding characters.
|
||||
function classifyCharacter(code) {
|
||||
if (
|
||||
code === null ||
|
||||
markdownLineEndingOrSpace(code) ||
|
||||
unicodeWhitespace(code)
|
||||
) {
|
||||
return 1
|
||||
}
|
||||
|
||||
if (unicodePunctuation(code)) {
|
||||
return 2
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = classifyCharacter
|
||||
49
node_modules/micromark/dist/util/combine-extensions.js
generated
vendored
Normal file
49
node_modules/micromark/dist/util/combine-extensions.js
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
'use strict'
|
||||
|
||||
var hasOwnProperty = require('../constant/has-own-property.js')
|
||||
var chunkedSplice = require('./chunked-splice.js')
|
||||
var miniflat = require('./miniflat.js')
|
||||
|
||||
function combineExtensions(extensions) {
|
||||
var all = {}
|
||||
var index = -1
|
||||
|
||||
while (++index < extensions.length) {
|
||||
extension(all, extensions[index])
|
||||
}
|
||||
|
||||
return all
|
||||
}
|
||||
|
||||
function extension(all, extension) {
|
||||
var hook
|
||||
var left
|
||||
var right
|
||||
var code
|
||||
|
||||
for (hook in extension) {
|
||||
left = hasOwnProperty.call(all, hook) ? all[hook] : (all[hook] = {})
|
||||
right = extension[hook]
|
||||
|
||||
for (code in right) {
|
||||
left[code] = constructs(
|
||||
miniflat(right[code]),
|
||||
hasOwnProperty.call(left, code) ? left[code] : []
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function constructs(list, existing) {
|
||||
var index = -1
|
||||
var before = []
|
||||
|
||||
while (++index < list.length) {
|
||||
;(list[index].add === 'after' ? existing : before).push(list[index])
|
||||
}
|
||||
|
||||
chunkedSplice(existing, 0, 0, before)
|
||||
return existing
|
||||
}
|
||||
|
||||
module.exports = combineExtensions
|
||||
34
node_modules/micromark/dist/util/combine-html-extensions.js
generated
vendored
Normal file
34
node_modules/micromark/dist/util/combine-html-extensions.js
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
'use strict'
|
||||
|
||||
var hasOwnProperty = require('../constant/has-own-property.js')
|
||||
|
||||
function combineHtmlExtensions(extensions) {
|
||||
var handlers = {}
|
||||
var index = -1
|
||||
|
||||
while (++index < extensions.length) {
|
||||
extension(handlers, extensions[index])
|
||||
}
|
||||
|
||||
return handlers
|
||||
}
|
||||
|
||||
function extension(handlers, extension) {
|
||||
var hook
|
||||
var left
|
||||
var right
|
||||
var type
|
||||
|
||||
for (hook in extension) {
|
||||
left = hasOwnProperty.call(handlers, hook)
|
||||
? handlers[hook]
|
||||
: (handlers[hook] = {})
|
||||
right = extension[hook]
|
||||
|
||||
for (type in right) {
|
||||
left[type] = right[type]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = combineHtmlExtensions
|
||||
316
node_modules/micromark/dist/util/create-tokenizer.js
generated
vendored
Normal file
316
node_modules/micromark/dist/util/create-tokenizer.js
generated
vendored
Normal file
@@ -0,0 +1,316 @@
|
||||
'use strict'
|
||||
|
||||
var assign = require('../constant/assign.js')
|
||||
var markdownLineEnding = require('../character/markdown-line-ending.js')
|
||||
var chunkedPush = require('./chunked-push.js')
|
||||
var chunkedSplice = require('./chunked-splice.js')
|
||||
var miniflat = require('./miniflat.js')
|
||||
var resolveAll = require('./resolve-all.js')
|
||||
var serializeChunks = require('./serialize-chunks.js')
|
||||
var shallow = require('./shallow.js')
|
||||
var sliceChunks = require('./slice-chunks.js')
|
||||
|
||||
// Create a tokenizer.
|
||||
// Tokenizers deal with one type of data (e.g., containers, flow, text).
|
||||
// The parser is the object dealing with it all.
|
||||
// `initialize` works like other constructs, except that only its `tokenize`
|
||||
// function is used, in which case it doesn’t receive an `ok` or `nok`.
|
||||
// `from` can be given to set the point before the first character, although
|
||||
// when further lines are indented, they must be set with `defineSkip`.
|
||||
function createTokenizer(parser, initialize, from) {
|
||||
var point = from
|
||||
? shallow(from)
|
||||
: {
|
||||
line: 1,
|
||||
column: 1,
|
||||
offset: 0
|
||||
}
|
||||
var columnStart = {}
|
||||
var resolveAllConstructs = []
|
||||
var chunks = []
|
||||
var stack = []
|
||||
|
||||
var effects = {
|
||||
consume: consume,
|
||||
enter: enter,
|
||||
exit: exit,
|
||||
attempt: constructFactory(onsuccessfulconstruct),
|
||||
check: constructFactory(onsuccessfulcheck),
|
||||
interrupt: constructFactory(onsuccessfulcheck, {
|
||||
interrupt: true
|
||||
}),
|
||||
lazy: constructFactory(onsuccessfulcheck, {
|
||||
lazy: true
|
||||
})
|
||||
} // State and tools for resolving and serializing.
|
||||
|
||||
var context = {
|
||||
previous: null,
|
||||
events: [],
|
||||
parser: parser,
|
||||
sliceStream: sliceStream,
|
||||
sliceSerialize: sliceSerialize,
|
||||
now: now,
|
||||
defineSkip: skip,
|
||||
write: write
|
||||
} // The state function.
|
||||
|
||||
var state = initialize.tokenize.call(context, effects) // Track which character we expect to be consumed, to catch bugs.
|
||||
|
||||
if (initialize.resolveAll) {
|
||||
resolveAllConstructs.push(initialize)
|
||||
} // Store where we are in the input stream.
|
||||
|
||||
point._index = 0
|
||||
point._bufferIndex = -1
|
||||
return context
|
||||
|
||||
function write(slice) {
|
||||
chunks = chunkedPush(chunks, slice)
|
||||
main() // Exit if we’re not done, resolve might change stuff.
|
||||
|
||||
if (chunks[chunks.length - 1] !== null) {
|
||||
return []
|
||||
}
|
||||
|
||||
addResult(initialize, 0) // Otherwise, resolve, and exit.
|
||||
|
||||
context.events = resolveAll(resolveAllConstructs, context.events, context)
|
||||
return context.events
|
||||
} //
|
||||
// Tools.
|
||||
//
|
||||
|
||||
function sliceSerialize(token) {
|
||||
return serializeChunks(sliceStream(token))
|
||||
}
|
||||
|
||||
function sliceStream(token) {
|
||||
return sliceChunks(chunks, token)
|
||||
}
|
||||
|
||||
function now() {
|
||||
return shallow(point)
|
||||
}
|
||||
|
||||
function skip(value) {
|
||||
columnStart[value.line] = value.column
|
||||
accountForPotentialSkip()
|
||||
} //
|
||||
// State management.
|
||||
//
|
||||
// Main loop (note that `_index` and `_bufferIndex` in `point` are modified by
|
||||
// `consume`).
|
||||
// Here is where we walk through the chunks, which either include strings of
|
||||
// several characters, or numerical character codes.
|
||||
// The reason to do this in a loop instead of a call is so the stack can
|
||||
// drain.
|
||||
|
||||
function main() {
|
||||
var chunkIndex
|
||||
var chunk
|
||||
|
||||
while (point._index < chunks.length) {
|
||||
chunk = chunks[point._index] // If we’re in a buffer chunk, loop through it.
|
||||
|
||||
if (typeof chunk === 'string') {
|
||||
chunkIndex = point._index
|
||||
|
||||
if (point._bufferIndex < 0) {
|
||||
point._bufferIndex = 0
|
||||
}
|
||||
|
||||
while (
|
||||
point._index === chunkIndex &&
|
||||
point._bufferIndex < chunk.length
|
||||
) {
|
||||
go(chunk.charCodeAt(point._bufferIndex))
|
||||
}
|
||||
} else {
|
||||
go(chunk)
|
||||
}
|
||||
}
|
||||
} // Deal with one code.
|
||||
|
||||
function go(code) {
|
||||
state = state(code)
|
||||
} // Move a character forward.
|
||||
|
||||
function consume(code) {
|
||||
if (markdownLineEnding(code)) {
|
||||
point.line++
|
||||
point.column = 1
|
||||
point.offset += code === -3 ? 2 : 1
|
||||
accountForPotentialSkip()
|
||||
} else if (code !== -1) {
|
||||
point.column++
|
||||
point.offset++
|
||||
} // Not in a string chunk.
|
||||
|
||||
if (point._bufferIndex < 0) {
|
||||
point._index++
|
||||
} else {
|
||||
point._bufferIndex++ // At end of string chunk.
|
||||
|
||||
if (point._bufferIndex === chunks[point._index].length) {
|
||||
point._bufferIndex = -1
|
||||
point._index++
|
||||
}
|
||||
} // Expose the previous character.
|
||||
|
||||
context.previous = code // Mark as consumed.
|
||||
} // Start a token.
|
||||
|
||||
function enter(type, fields) {
|
||||
var token = fields || {}
|
||||
token.type = type
|
||||
token.start = now()
|
||||
context.events.push(['enter', token, context])
|
||||
stack.push(token)
|
||||
return token
|
||||
} // Stop a token.
|
||||
|
||||
function exit(type) {
|
||||
var token = stack.pop()
|
||||
token.end = now()
|
||||
context.events.push(['exit', token, context])
|
||||
return token
|
||||
} // Use results.
|
||||
|
||||
function onsuccessfulconstruct(construct, info) {
|
||||
addResult(construct, info.from)
|
||||
} // Discard results.
|
||||
|
||||
function onsuccessfulcheck(construct, info) {
|
||||
info.restore()
|
||||
} // Factory to attempt/check/interrupt.
|
||||
|
||||
function constructFactory(onreturn, fields) {
|
||||
return hook // Handle either an object mapping codes to constructs, a list of
|
||||
// constructs, or a single construct.
|
||||
|
||||
function hook(constructs, returnState, bogusState) {
|
||||
var listOfConstructs
|
||||
var constructIndex
|
||||
var currentConstruct
|
||||
var info
|
||||
return constructs.tokenize || 'length' in constructs
|
||||
? handleListOfConstructs(miniflat(constructs))
|
||||
: handleMapOfConstructs
|
||||
|
||||
function handleMapOfConstructs(code) {
|
||||
if (code in constructs || null in constructs) {
|
||||
return handleListOfConstructs(
|
||||
constructs.null
|
||||
? /* c8 ignore next */
|
||||
miniflat(constructs[code]).concat(miniflat(constructs.null))
|
||||
: constructs[code]
|
||||
)(code)
|
||||
}
|
||||
|
||||
return bogusState(code)
|
||||
}
|
||||
|
||||
function handleListOfConstructs(list) {
|
||||
listOfConstructs = list
|
||||
constructIndex = 0
|
||||
return handleConstruct(list[constructIndex])
|
||||
}
|
||||
|
||||
function handleConstruct(construct) {
|
||||
return start
|
||||
|
||||
function start(code) {
|
||||
// To do: not nede to store if there is no bogus state, probably?
|
||||
// Currently doesn’t work because `inspect` in document does a check
|
||||
// w/o a bogus, which doesn’t make sense. But it does seem to help perf
|
||||
// by not storing.
|
||||
info = store()
|
||||
currentConstruct = construct
|
||||
|
||||
if (!construct.partial) {
|
||||
context.currentConstruct = construct
|
||||
}
|
||||
|
||||
if (
|
||||
construct.name &&
|
||||
context.parser.constructs.disable.null.indexOf(construct.name) > -1
|
||||
) {
|
||||
return nok()
|
||||
}
|
||||
|
||||
return construct.tokenize.call(
|
||||
fields ? assign({}, context, fields) : context,
|
||||
effects,
|
||||
ok,
|
||||
nok
|
||||
)(code)
|
||||
}
|
||||
}
|
||||
|
||||
function ok(code) {
|
||||
onreturn(currentConstruct, info)
|
||||
return returnState
|
||||
}
|
||||
|
||||
function nok(code) {
|
||||
info.restore()
|
||||
|
||||
if (++constructIndex < listOfConstructs.length) {
|
||||
return handleConstruct(listOfConstructs[constructIndex])
|
||||
}
|
||||
|
||||
return bogusState
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function addResult(construct, from) {
|
||||
if (construct.resolveAll && resolveAllConstructs.indexOf(construct) < 0) {
|
||||
resolveAllConstructs.push(construct)
|
||||
}
|
||||
|
||||
if (construct.resolve) {
|
||||
chunkedSplice(
|
||||
context.events,
|
||||
from,
|
||||
context.events.length - from,
|
||||
construct.resolve(context.events.slice(from), context)
|
||||
)
|
||||
}
|
||||
|
||||
if (construct.resolveTo) {
|
||||
context.events = construct.resolveTo(context.events, context)
|
||||
}
|
||||
}
|
||||
|
||||
function store() {
|
||||
var startPoint = now()
|
||||
var startPrevious = context.previous
|
||||
var startCurrentConstruct = context.currentConstruct
|
||||
var startEventsIndex = context.events.length
|
||||
var startStack = Array.from(stack)
|
||||
return {
|
||||
restore: restore,
|
||||
from: startEventsIndex
|
||||
}
|
||||
|
||||
function restore() {
|
||||
point = startPoint
|
||||
context.previous = startPrevious
|
||||
context.currentConstruct = startCurrentConstruct
|
||||
context.events.length = startEventsIndex
|
||||
stack = startStack
|
||||
accountForPotentialSkip()
|
||||
}
|
||||
}
|
||||
|
||||
function accountForPotentialSkip() {
|
||||
if (point.line in columnStart && point.column < 2) {
|
||||
point.column = columnStart[point.line]
|
||||
point.offset += columnStart[point.line] - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = createTokenizer
|
||||
11
node_modules/micromark/dist/util/miniflat.js
generated
vendored
Normal file
11
node_modules/micromark/dist/util/miniflat.js
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
'use strict'
|
||||
|
||||
function miniflat(value) {
|
||||
return value === null || value === undefined
|
||||
? []
|
||||
: 'length' in value
|
||||
? value
|
||||
: [value]
|
||||
}
|
||||
|
||||
module.exports = miniflat
|
||||
12
node_modules/micromark/dist/util/move-point.js
generated
vendored
Normal file
12
node_modules/micromark/dist/util/move-point.js
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
'use strict'
|
||||
|
||||
// chunks (replacement characters, tabs, or line endings).
|
||||
|
||||
function movePoint(point, offset) {
|
||||
point.column += offset
|
||||
point.offset += offset
|
||||
point._bufferIndex += offset
|
||||
return point
|
||||
}
|
||||
|
||||
module.exports = movePoint
|
||||
18
node_modules/micromark/dist/util/normalize-identifier.js
generated
vendored
Normal file
18
node_modules/micromark/dist/util/normalize-identifier.js
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
'use strict'
|
||||
|
||||
function normalizeIdentifier(value) {
|
||||
return (
|
||||
value // Collapse Markdown whitespace.
|
||||
.replace(/[\t\n\r ]+/g, ' ') // Trim.
|
||||
.replace(/^ | $/g, '') // Some characters are considered “uppercase”, but if their lowercase
|
||||
// counterpart is uppercased will result in a different uppercase
|
||||
// character.
|
||||
// Hence, to get that form, we perform both lower- and uppercase.
|
||||
// Upper case makes sure keys will not interact with default prototypal
|
||||
// methods: no object method is uppercase.
|
||||
.toLowerCase()
|
||||
.toUpperCase()
|
||||
)
|
||||
}
|
||||
|
||||
module.exports = normalizeIdentifier
|
||||
62
node_modules/micromark/dist/util/normalize-uri.js
generated
vendored
Normal file
62
node_modules/micromark/dist/util/normalize-uri.js
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
'use strict'
|
||||
|
||||
var asciiAlphanumeric = require('../character/ascii-alphanumeric.js')
|
||||
var fromCharCode = require('../constant/from-char-code.js')
|
||||
|
||||
// encoded sequences.
|
||||
|
||||
function normalizeUri(value) {
|
||||
var index = -1
|
||||
var result = []
|
||||
var start = 0
|
||||
var skip = 0
|
||||
var code
|
||||
var next
|
||||
var replace
|
||||
|
||||
while (++index < value.length) {
|
||||
code = value.charCodeAt(index) // A correct percent encoded value.
|
||||
|
||||
if (
|
||||
code === 37 &&
|
||||
asciiAlphanumeric(value.charCodeAt(index + 1)) &&
|
||||
asciiAlphanumeric(value.charCodeAt(index + 2))
|
||||
) {
|
||||
skip = 2
|
||||
} // ASCII.
|
||||
else if (code < 128) {
|
||||
if (!/[!#$&-;=?-Z_a-z~]/.test(fromCharCode(code))) {
|
||||
replace = fromCharCode(code)
|
||||
}
|
||||
} // Astral.
|
||||
else if (code > 55295 && code < 57344) {
|
||||
next = value.charCodeAt(index + 1) // A correct surrogate pair.
|
||||
|
||||
if (code < 56320 && next > 56319 && next < 57344) {
|
||||
replace = fromCharCode(code, next)
|
||||
skip = 1
|
||||
} // Lone surrogate.
|
||||
else {
|
||||
replace = '\uFFFD'
|
||||
}
|
||||
} // Unicode.
|
||||
else {
|
||||
replace = fromCharCode(code)
|
||||
}
|
||||
|
||||
if (replace) {
|
||||
result.push(value.slice(start, index), encodeURIComponent(replace))
|
||||
start = index + skip + 1
|
||||
replace = undefined
|
||||
}
|
||||
|
||||
if (skip) {
|
||||
index += skip
|
||||
skip = 0
|
||||
}
|
||||
}
|
||||
|
||||
return result.join('') + value.slice(start)
|
||||
}
|
||||
|
||||
module.exports = normalizeUri
|
||||
11
node_modules/micromark/dist/util/prefix-size.js
generated
vendored
Normal file
11
node_modules/micromark/dist/util/prefix-size.js
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
'use strict'
|
||||
|
||||
var sizeChunks = require('./size-chunks.js')
|
||||
|
||||
function prefixSize(events, type) {
|
||||
var tail = events[events.length - 1]
|
||||
if (!tail || tail[1].type !== type) return 0
|
||||
return sizeChunks(tail[2].sliceStream(tail[1]))
|
||||
}
|
||||
|
||||
module.exports = prefixSize
|
||||
13
node_modules/micromark/dist/util/regex-check.js
generated
vendored
Normal file
13
node_modules/micromark/dist/util/regex-check.js
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
'use strict'
|
||||
|
||||
var fromCharCode = require('../constant/from-char-code.js')
|
||||
|
||||
function regexCheck(regex) {
|
||||
return check
|
||||
|
||||
function check(code) {
|
||||
return regex.test(fromCharCode(code))
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = regexCheck
|
||||
20
node_modules/micromark/dist/util/resolve-all.js
generated
vendored
Normal file
20
node_modules/micromark/dist/util/resolve-all.js
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
'use strict'
|
||||
|
||||
function resolveAll(constructs, events, context) {
|
||||
var called = []
|
||||
var index = -1
|
||||
var resolve
|
||||
|
||||
while (++index < constructs.length) {
|
||||
resolve = constructs[index].resolveAll
|
||||
|
||||
if (resolve && called.indexOf(resolve) < 0) {
|
||||
events = resolve(events, context)
|
||||
called.push(resolve)
|
||||
}
|
||||
}
|
||||
|
||||
return events
|
||||
}
|
||||
|
||||
module.exports = resolveAll
|
||||
26
node_modules/micromark/dist/util/safe-from-int.js
generated
vendored
Normal file
26
node_modules/micromark/dist/util/safe-from-int.js
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
'use strict'
|
||||
|
||||
var fromCharCode = require('../constant/from-char-code.js')
|
||||
|
||||
function safeFromInt(value, base) {
|
||||
var code = parseInt(value, base)
|
||||
|
||||
if (
|
||||
// C0 except for HT, LF, FF, CR, space
|
||||
code < 9 ||
|
||||
code === 11 ||
|
||||
(code > 13 && code < 32) || // Control character (DEL) of the basic block and C1 controls.
|
||||
(code > 126 && code < 160) || // Lone high surrogates and low surrogates.
|
||||
(code > 55295 && code < 57344) || // Noncharacters.
|
||||
(code > 64975 && code < 65008) ||
|
||||
(code & 65535) === 65535 ||
|
||||
(code & 65535) === 65534 || // Out of range
|
||||
code > 1114111
|
||||
) {
|
||||
return '\uFFFD'
|
||||
}
|
||||
|
||||
return fromCharCode(code)
|
||||
}
|
||||
|
||||
module.exports = safeFromInt
|
||||
40
node_modules/micromark/dist/util/serialize-chunks.js
generated
vendored
Normal file
40
node_modules/micromark/dist/util/serialize-chunks.js
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
'use strict'
|
||||
|
||||
var fromCharCode = require('../constant/from-char-code.js')
|
||||
|
||||
function serializeChunks(chunks) {
|
||||
var index = -1
|
||||
var result = []
|
||||
var chunk
|
||||
var value
|
||||
var atTab
|
||||
|
||||
while (++index < chunks.length) {
|
||||
chunk = chunks[index]
|
||||
|
||||
if (typeof chunk === 'string') {
|
||||
value = chunk
|
||||
} else if (chunk === -5) {
|
||||
value = '\r'
|
||||
} else if (chunk === -4) {
|
||||
value = '\n'
|
||||
} else if (chunk === -3) {
|
||||
value = '\r' + '\n'
|
||||
} else if (chunk === -2) {
|
||||
value = '\t'
|
||||
} else if (chunk === -1) {
|
||||
if (atTab) continue
|
||||
value = ' '
|
||||
} else {
|
||||
// Currently only replacement character.
|
||||
value = fromCharCode(chunk)
|
||||
}
|
||||
|
||||
atTab = chunk === -2
|
||||
result.push(value)
|
||||
}
|
||||
|
||||
return result.join('')
|
||||
}
|
||||
|
||||
module.exports = serializeChunks
|
||||
9
node_modules/micromark/dist/util/shallow.js
generated
vendored
Normal file
9
node_modules/micromark/dist/util/shallow.js
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
'use strict'
|
||||
|
||||
var assign = require('../constant/assign.js')
|
||||
|
||||
function shallow(object) {
|
||||
return assign({}, object)
|
||||
}
|
||||
|
||||
module.exports = shallow
|
||||
16
node_modules/micromark/dist/util/size-chunks.js
generated
vendored
Normal file
16
node_modules/micromark/dist/util/size-chunks.js
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
'use strict'
|
||||
|
||||
// Counts tabs based on their expanded size, and CR+LF as one character.
|
||||
|
||||
function sizeChunks(chunks) {
|
||||
var index = -1
|
||||
var size = 0
|
||||
|
||||
while (++index < chunks.length) {
|
||||
size += typeof chunks[index] === 'string' ? chunks[index].length : 1
|
||||
}
|
||||
|
||||
return size
|
||||
}
|
||||
|
||||
module.exports = sizeChunks
|
||||
27
node_modules/micromark/dist/util/slice-chunks.js
generated
vendored
Normal file
27
node_modules/micromark/dist/util/slice-chunks.js
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
'use strict'
|
||||
|
||||
function sliceChunks(chunks, token) {
|
||||
var startIndex = token.start._index
|
||||
var startBufferIndex = token.start._bufferIndex
|
||||
var endIndex = token.end._index
|
||||
var endBufferIndex = token.end._bufferIndex
|
||||
var view
|
||||
|
||||
if (startIndex === endIndex) {
|
||||
view = [chunks[startIndex].slice(startBufferIndex, endBufferIndex)]
|
||||
} else {
|
||||
view = chunks.slice(startIndex, endIndex)
|
||||
|
||||
if (startBufferIndex > -1) {
|
||||
view[0] = view[0].slice(startBufferIndex)
|
||||
}
|
||||
|
||||
if (endBufferIndex > 0) {
|
||||
view.push(chunks[endIndex].slice(0, endBufferIndex))
|
||||
}
|
||||
}
|
||||
|
||||
return view
|
||||
}
|
||||
|
||||
module.exports = sliceChunks
|
||||
199
node_modules/micromark/dist/util/subtokenize.js
generated
vendored
Normal file
199
node_modules/micromark/dist/util/subtokenize.js
generated
vendored
Normal file
@@ -0,0 +1,199 @@
|
||||
'use strict'
|
||||
|
||||
var assign = require('../constant/assign.js')
|
||||
var chunkedSplice = require('./chunked-splice.js')
|
||||
var shallow = require('./shallow.js')
|
||||
|
||||
function subtokenize(events) {
|
||||
var jumps = {}
|
||||
var index = -1
|
||||
var event
|
||||
var lineIndex
|
||||
var otherIndex
|
||||
var otherEvent
|
||||
var parameters
|
||||
var subevents
|
||||
var more
|
||||
|
||||
while (++index < events.length) {
|
||||
while (index in jumps) {
|
||||
index = jumps[index]
|
||||
}
|
||||
|
||||
event = events[index] // Add a hook for the GFM tasklist extension, which needs to know if text
|
||||
// is in the first content of a list item.
|
||||
|
||||
if (
|
||||
index &&
|
||||
event[1].type === 'chunkFlow' &&
|
||||
events[index - 1][1].type === 'listItemPrefix'
|
||||
) {
|
||||
subevents = event[1]._tokenizer.events
|
||||
otherIndex = 0
|
||||
|
||||
if (
|
||||
otherIndex < subevents.length &&
|
||||
subevents[otherIndex][1].type === 'lineEndingBlank'
|
||||
) {
|
||||
otherIndex += 2
|
||||
}
|
||||
|
||||
if (
|
||||
otherIndex < subevents.length &&
|
||||
subevents[otherIndex][1].type === 'content'
|
||||
) {
|
||||
while (++otherIndex < subevents.length) {
|
||||
if (subevents[otherIndex][1].type === 'content') {
|
||||
break
|
||||
}
|
||||
|
||||
if (subevents[otherIndex][1].type === 'chunkText') {
|
||||
subevents[otherIndex][1].isInFirstContentOfListItem = true
|
||||
otherIndex++
|
||||
}
|
||||
}
|
||||
}
|
||||
} // Enter.
|
||||
|
||||
if (event[0] === 'enter') {
|
||||
if (event[1].contentType) {
|
||||
assign(jumps, subcontent(events, index))
|
||||
index = jumps[index]
|
||||
more = true
|
||||
}
|
||||
} // Exit.
|
||||
else if (event[1]._container || event[1]._movePreviousLineEndings) {
|
||||
otherIndex = index
|
||||
lineIndex = undefined
|
||||
|
||||
while (otherIndex--) {
|
||||
otherEvent = events[otherIndex]
|
||||
|
||||
if (
|
||||
otherEvent[1].type === 'lineEnding' ||
|
||||
otherEvent[1].type === 'lineEndingBlank'
|
||||
) {
|
||||
if (otherEvent[0] === 'enter') {
|
||||
if (lineIndex) {
|
||||
events[lineIndex][1].type = 'lineEndingBlank'
|
||||
}
|
||||
|
||||
otherEvent[1].type = 'lineEnding'
|
||||
lineIndex = otherIndex
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if (lineIndex) {
|
||||
// Fix position.
|
||||
event[1].end = shallow(events[lineIndex][1].start) // Switch container exit w/ line endings.
|
||||
|
||||
parameters = events.slice(lineIndex, index)
|
||||
parameters.unshift(event)
|
||||
chunkedSplice(events, lineIndex, index - lineIndex + 1, parameters)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return !more
|
||||
}
|
||||
|
||||
function subcontent(events, eventIndex) {
|
||||
var token = events[eventIndex][1]
|
||||
var context = events[eventIndex][2]
|
||||
var startPosition = eventIndex - 1
|
||||
var startPositions = []
|
||||
var tokenizer =
|
||||
token._tokenizer || context.parser[token.contentType](token.start)
|
||||
var childEvents = tokenizer.events
|
||||
var jumps = []
|
||||
var gaps = {}
|
||||
var stream
|
||||
var previous
|
||||
var index
|
||||
var entered
|
||||
var end
|
||||
var adjust // Loop forward through the linked tokens to pass them in order to the
|
||||
// subtokenizer.
|
||||
|
||||
while (token) {
|
||||
// Find the position of the event for this token.
|
||||
while (events[++startPosition][1] !== token) {
|
||||
// Empty.
|
||||
}
|
||||
|
||||
startPositions.push(startPosition)
|
||||
|
||||
if (!token._tokenizer) {
|
||||
stream = context.sliceStream(token)
|
||||
|
||||
if (!token.next) {
|
||||
stream.push(null)
|
||||
}
|
||||
|
||||
if (previous) {
|
||||
tokenizer.defineSkip(token.start)
|
||||
}
|
||||
|
||||
if (token.isInFirstContentOfListItem) {
|
||||
tokenizer._gfmTasklistFirstContentOfListItem = true
|
||||
}
|
||||
|
||||
tokenizer.write(stream)
|
||||
|
||||
if (token.isInFirstContentOfListItem) {
|
||||
tokenizer._gfmTasklistFirstContentOfListItem = undefined
|
||||
}
|
||||
} // Unravel the next token.
|
||||
|
||||
previous = token
|
||||
token = token.next
|
||||
} // Now, loop back through all events (and linked tokens), to figure out which
|
||||
// parts belong where.
|
||||
|
||||
token = previous
|
||||
index = childEvents.length
|
||||
|
||||
while (index--) {
|
||||
// Make sure we’ve at least seen something (final eol is part of the last
|
||||
// token).
|
||||
if (childEvents[index][0] === 'enter') {
|
||||
entered = true
|
||||
} else if (
|
||||
// Find a void token that includes a break.
|
||||
entered &&
|
||||
childEvents[index][1].type === childEvents[index - 1][1].type &&
|
||||
childEvents[index][1].start.line !== childEvents[index][1].end.line
|
||||
) {
|
||||
add(childEvents.slice(index + 1, end))
|
||||
// Help GC.
|
||||
token._tokenizer = token.next = undefined
|
||||
token = token.previous
|
||||
end = index + 1
|
||||
}
|
||||
}
|
||||
|
||||
// Help GC.
|
||||
tokenizer.events = token._tokenizer = token.next = undefined // Do head:
|
||||
|
||||
add(childEvents.slice(0, end))
|
||||
index = -1
|
||||
adjust = 0
|
||||
|
||||
while (++index < jumps.length) {
|
||||
gaps[adjust + jumps[index][0]] = adjust + jumps[index][1]
|
||||
adjust += jumps[index][1] - jumps[index][0] - 1
|
||||
}
|
||||
|
||||
return gaps
|
||||
|
||||
function add(slice) {
|
||||
var start = startPositions.pop()
|
||||
jumps.unshift([start, start + slice.length - 1])
|
||||
chunkedSplice(events, start, 2, slice)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = subtokenize
|
||||
Reference in New Issue
Block a user