planning
All checks were successful
Publish To Prod / deploy_and_publish (push) Successful in 35s

This commit is contained in:
2024-10-14 09:15:30 +02:00
parent bcba00a730
commit 6e64e138e2
21059 changed files with 2317811 additions and 1 deletions

7
node_modules/micromark/lib/character/ascii-alpha.js generated vendored Normal file
View File

@@ -0,0 +1,7 @@
'use strict'
var regexCheck = require('../util/regex-check.js')
var asciiAlpha = regexCheck(/[A-Za-z]/)
module.exports = asciiAlpha

3
node_modules/micromark/lib/character/ascii-alpha.mjs generated vendored Normal file
View File

@@ -0,0 +1,3 @@
import check from '../util/regex-check.mjs'
export default check(/[A-Za-z]/)

View File

@@ -0,0 +1,7 @@
'use strict'
var regexCheck = require('../util/regex-check.js')
var asciiAlphanumeric = regexCheck(/[\dA-Za-z]/)
module.exports = asciiAlphanumeric

View File

@@ -0,0 +1,3 @@
import check from '../util/regex-check.mjs'
export default check(/[\dA-Za-z]/)

7
node_modules/micromark/lib/character/ascii-atext.js generated vendored Normal file
View File

@@ -0,0 +1,7 @@
'use strict'
var regexCheck = require('../util/regex-check.js')
var asciiAtext = regexCheck(/[#-'*+\--9=?A-Z^-~]/)
module.exports = asciiAtext

3
node_modules/micromark/lib/character/ascii-atext.mjs generated vendored Normal file
View File

@@ -0,0 +1,3 @@
import check from '../util/regex-check.mjs'
export default check(/[#-'*+\--9=?A-Z^-~]/)

14
node_modules/micromark/lib/character/ascii-control.js generated vendored Normal file
View File

@@ -0,0 +1,14 @@
'use strict'
var codes = require('./codes.js')
// Note: EOF is seen as ASCII control here, because `null < 32 == true`.
function asciiControl(code) {
return (
// Special whitespace codes (which have negative values), C0 and Control
// character DEL
code < codes.space || code === codes.del
)
}
module.exports = asciiControl

12
node_modules/micromark/lib/character/ascii-control.mjs generated vendored Normal file
View File

@@ -0,0 +1,12 @@
export default asciiControl
import codes from './codes.mjs'
// Note: EOF is seen as ASCII control here, because `null < 32 == true`.
function asciiControl(code) {
return (
// Special whitespace codes (which have negative values), C0 and Control
// character DEL
code < codes.space || code === codes.del
)
}

7
node_modules/micromark/lib/character/ascii-digit.js generated vendored Normal file
View File

@@ -0,0 +1,7 @@
'use strict'
var regexCheck = require('../util/regex-check.js')
var asciiDigit = regexCheck(/\d/)
module.exports = asciiDigit

3
node_modules/micromark/lib/character/ascii-digit.mjs generated vendored Normal file
View File

@@ -0,0 +1,3 @@
import check from '../util/regex-check.mjs'
export default check(/\d/)

View File

@@ -0,0 +1,7 @@
'use strict'
var regexCheck = require('../util/regex-check.js')
var asciiHexDigit = regexCheck(/[\dA-Fa-f]/)
module.exports = asciiHexDigit

View File

@@ -0,0 +1,3 @@
import check from '../util/regex-check.mjs'
export default check(/[\dA-Fa-f]/)

View File

@@ -0,0 +1,7 @@
'use strict'
var regexCheck = require('../util/regex-check.js')
var asciiPunctuation = regexCheck(/[!-/:-@[-`{-~]/)
module.exports = asciiPunctuation

View File

@@ -0,0 +1,3 @@
import check from '../util/regex-check.mjs'
export default check(/[!-/:-@[-`{-~]/)

148
node_modules/micromark/lib/character/codes.d.ts generated vendored Normal file
View File

@@ -0,0 +1,148 @@
// This module is generated by `script/`.
export type Code = null | number
// @for-script: REMOVE_ALL_THING_BELOW
export interface Codes {
carriageReturn: -5
lineFeed: -4
carriageReturnLineFeed: -3
horizontalTab: -2
virtualSpace: -1
eof: null
nul: 0
soh: 1
stx: 2
etx: 3
eot: 4
enq: 5
ack: 6
bel: 7
bs: 8
ht: 9
lf: 10
vt: 11
ff: 12
cr: 13
so: 14
si: 15
dle: 16
dc1: 17
dc2: 18
dc3: 19
dc4: 20
nak: 21
syn: 22
etb: 23
can: 24
em: 25
sub: 26
esc: 27
fs: 28
gs: 29
rs: 30
us: 31
space: 32
exclamationMark: 33
quotationMark: 34
numberSign: 35
dollarSign: 36
percentSign: 37
ampersand: 38
apostrophe: 39
leftParenthesis: 40
rightParenthesis: 41
asterisk: 42
plusSign: 43
comma: 44
dash: 45
dot: 46
slash: 47
digit0: 48
digit1: 49
digit2: 50
digit3: 51
digit4: 52
digit5: 53
digit6: 54
digit7: 55
digit8: 56
digit9: 57
colon: 58
semicolon: 59
lessThan: 60
equalsTo: 61
greaterThan: 62
questionMark: 63
atSign: 64
uppercaseA: 65
uppercaseB: 66
uppercaseC: 67
uppercaseD: 68
uppercaseE: 69
uppercaseF: 70
uppercaseG: 71
uppercaseH: 72
uppercaseI: 73
uppercaseJ: 74
uppercaseK: 75
uppercaseL: 76
uppercaseM: 77
uppercaseN: 78
uppercaseO: 79
uppercaseP: 80
uppercaseQ: 81
uppercaseR: 82
uppercaseS: 83
uppercaseT: 84
uppercaseU: 85
uppercaseV: 86
uppercaseW: 87
uppercaseX: 88
uppercaseY: 89
uppercaseZ: 90
leftSquareBracket: 91
backslash: 92
rightSquareBracket: 93
caret: 94
underscore: 95
graveAccent: 96
lowercaseA: 97
lowercaseB: 98
lowercaseC: 99
lowercaseD: 100
lowercaseE: 101
lowercaseF: 102
lowercaseG: 103
lowercaseH: 104
lowercaseI: 105
lowercaseJ: 106
lowercaseK: 107
lowercaseL: 108
lowercaseM: 109
lowercaseN: 110
lowercaseO: 111
lowercaseP: 112
lowercaseQ: 113
lowercaseR: 114
lowercaseS: 115
lowercaseT: 116
lowercaseU: 117
lowercaseV: 118
lowercaseW: 119
lowercaseX: 120
lowercaseY: 121
lowercaseZ: 122
leftCurlyBrace: 123
verticalBar: 124
rightCurlyBrace: 125
tilde: 126
del: 127
byteOrderMarker: 65279
replacementCharacter: 65533
}
declare const value: Codes
export default value

158
node_modules/micromark/lib/character/codes.js generated vendored Normal file
View File

@@ -0,0 +1,158 @@
'use strict'
// This module is compiled away!
//
// micromark works based on character codes.
// This module contains constants for the ASCII block and the replacement
// character.
// A couple of them are handled in a special way, such as the line endings
// (CR, LF, and CR+LF, commonly known as end-of-line: EOLs), the tab (horizontal
// tab) and its expansion based on what column its at (virtual space),
// and the end-of-file (eof) character.
// As values are preprocessed before handling them, the actual characters LF,
// CR, HT, and NUL (which is present as the replacement character), are
// guaranteed to not exist.
//
// Unicode basic latin block.
var codes = {
carriageReturn: -5,
lineFeed: -4,
carriageReturnLineFeed: -3,
horizontalTab: -2,
virtualSpace: -1,
eof: null,
nul: 0,
soh: 1,
stx: 2,
etx: 3,
eot: 4,
enq: 5,
ack: 6,
bel: 7,
bs: 8,
ht: 9, // `\t`
lf: 10, // `\n`
vt: 11, // `\v`
ff: 12, // `\f`
cr: 13, // `\r`
so: 14,
si: 15,
dle: 16,
dc1: 17,
dc2: 18,
dc3: 19,
dc4: 20,
nak: 21,
syn: 22,
etb: 23,
can: 24,
em: 25,
sub: 26,
esc: 27,
fs: 28,
gs: 29,
rs: 30,
us: 31,
space: 32,
exclamationMark: 33, // `!`
quotationMark: 34, // `"`
numberSign: 35, // `#`
dollarSign: 36, // `$`
percentSign: 37, // `%`
ampersand: 38, // `&`
apostrophe: 39, // `'`
leftParenthesis: 40, // `(`
rightParenthesis: 41, // `)`
asterisk: 42, // `*`
plusSign: 43, // `+`
comma: 44, // `,`
dash: 45, // `-`
dot: 46, // `.`
slash: 47, // `/`
digit0: 48, // `0`
digit1: 49, // `1`
digit2: 50, // `2`
digit3: 51, // `3`
digit4: 52, // `4`
digit5: 53, // `5`
digit6: 54, // `6`
digit7: 55, // `7`
digit8: 56, // `8`
digit9: 57, // `9`
colon: 58, // `:`
semicolon: 59, // `;`
lessThan: 60, // `<`
equalsTo: 61, // `=`
greaterThan: 62, // `>`
questionMark: 63, // `?`
atSign: 64, // `@`
uppercaseA: 65, // `A`
uppercaseB: 66, // `B`
uppercaseC: 67, // `C`
uppercaseD: 68, // `D`
uppercaseE: 69, // `E`
uppercaseF: 70, // `F`
uppercaseG: 71, // `G`
uppercaseH: 72, // `H`
uppercaseI: 73, // `I`
uppercaseJ: 74, // `J`
uppercaseK: 75, // `K`
uppercaseL: 76, // `L`
uppercaseM: 77, // `M`
uppercaseN: 78, // `N`
uppercaseO: 79, // `O`
uppercaseP: 80, // `P`
uppercaseQ: 81, // `Q`
uppercaseR: 82, // `R`
uppercaseS: 83, // `S`
uppercaseT: 84, // `T`
uppercaseU: 85, // `U`
uppercaseV: 86, // `V`
uppercaseW: 87, // `W`
uppercaseX: 88, // `X`
uppercaseY: 89, // `Y`
uppercaseZ: 90, // `Z`
leftSquareBracket: 91, // `[`
backslash: 92, // `\`
rightSquareBracket: 93, // `]`
caret: 94, // `^`
underscore: 95, // `_`
graveAccent: 96, // `` ` ``
lowercaseA: 97, // `a`
lowercaseB: 98, // `b`
lowercaseC: 99, // `c`
lowercaseD: 100, // `d`
lowercaseE: 101, // `e`
lowercaseF: 102, // `f`
lowercaseG: 103, // `g`
lowercaseH: 104, // `h`
lowercaseI: 105, // `i`
lowercaseJ: 106, // `j`
lowercaseK: 107, // `k`
lowercaseL: 108, // `l`
lowercaseM: 109, // `m`
lowercaseN: 110, // `n`
lowercaseO: 111, // `o`
lowercaseP: 112, // `p`
lowercaseQ: 113, // `q`
lowercaseR: 114, // `r`
lowercaseS: 115, // `s`
lowercaseT: 116, // `t`
lowercaseU: 117, // `u`
lowercaseV: 118, // `v`
lowercaseW: 119, // `w`
lowercaseX: 120, // `x`
lowercaseY: 121, // `y`
lowercaseZ: 122, // `z`
leftCurlyBrace: 123, // `{`
verticalBar: 124, // `|`
rightCurlyBrace: 125, // `}`
tilde: 126, // `~`
del: 127,
// Unicode Specials block.
byteOrderMarker: 65279,
// Unicode Specials block.
replacementCharacter: 65533 // `<60>`
}
module.exports = codes

154
node_modules/micromark/lib/character/codes.mjs generated vendored Normal file
View File

@@ -0,0 +1,154 @@
// This module is compiled away!
//
// micromark works based on character codes.
// This module contains constants for the ASCII block and the replacement
// character.
// A couple of them are handled in a special way, such as the line endings
// (CR, LF, and CR+LF, commonly known as end-of-line: EOLs), the tab (horizontal
// tab) and its expansion based on what column its at (virtual space),
// and the end-of-file (eof) character.
// As values are preprocessed before handling them, the actual characters LF,
// CR, HT, and NUL (which is present as the replacement character), are
// guaranteed to not exist.
//
// Unicode basic latin block.
export default {
carriageReturn: -5,
lineFeed: -4,
carriageReturnLineFeed: -3,
horizontalTab: -2,
virtualSpace: -1,
eof: null,
nul: 0,
soh: 1,
stx: 2,
etx: 3,
eot: 4,
enq: 5,
ack: 6,
bel: 7,
bs: 8,
ht: 9, // `\t`
lf: 10, // `\n`
vt: 11, // `\v`
ff: 12, // `\f`
cr: 13, // `\r`
so: 14,
si: 15,
dle: 16,
dc1: 17,
dc2: 18,
dc3: 19,
dc4: 20,
nak: 21,
syn: 22,
etb: 23,
can: 24,
em: 25,
sub: 26,
esc: 27,
fs: 28,
gs: 29,
rs: 30,
us: 31,
space: 32,
exclamationMark: 33, // `!`
quotationMark: 34, // `"`
numberSign: 35, // `#`
dollarSign: 36, // `$`
percentSign: 37, // `%`
ampersand: 38, // `&`
apostrophe: 39, // `'`
leftParenthesis: 40, // `(`
rightParenthesis: 41, // `)`
asterisk: 42, // `*`
plusSign: 43, // `+`
comma: 44, // `,`
dash: 45, // `-`
dot: 46, // `.`
slash: 47, // `/`
digit0: 48, // `0`
digit1: 49, // `1`
digit2: 50, // `2`
digit3: 51, // `3`
digit4: 52, // `4`
digit5: 53, // `5`
digit6: 54, // `6`
digit7: 55, // `7`
digit8: 56, // `8`
digit9: 57, // `9`
colon: 58, // `:`
semicolon: 59, // `;`
lessThan: 60, // `<`
equalsTo: 61, // `=`
greaterThan: 62, // `>`
questionMark: 63, // `?`
atSign: 64, // `@`
uppercaseA: 65, // `A`
uppercaseB: 66, // `B`
uppercaseC: 67, // `C`
uppercaseD: 68, // `D`
uppercaseE: 69, // `E`
uppercaseF: 70, // `F`
uppercaseG: 71, // `G`
uppercaseH: 72, // `H`
uppercaseI: 73, // `I`
uppercaseJ: 74, // `J`
uppercaseK: 75, // `K`
uppercaseL: 76, // `L`
uppercaseM: 77, // `M`
uppercaseN: 78, // `N`
uppercaseO: 79, // `O`
uppercaseP: 80, // `P`
uppercaseQ: 81, // `Q`
uppercaseR: 82, // `R`
uppercaseS: 83, // `S`
uppercaseT: 84, // `T`
uppercaseU: 85, // `U`
uppercaseV: 86, // `V`
uppercaseW: 87, // `W`
uppercaseX: 88, // `X`
uppercaseY: 89, // `Y`
uppercaseZ: 90, // `Z`
leftSquareBracket: 91, // `[`
backslash: 92, // `\`
rightSquareBracket: 93, // `]`
caret: 94, // `^`
underscore: 95, // `_`
graveAccent: 96, // `` ` ``
lowercaseA: 97, // `a`
lowercaseB: 98, // `b`
lowercaseC: 99, // `c`
lowercaseD: 100, // `d`
lowercaseE: 101, // `e`
lowercaseF: 102, // `f`
lowercaseG: 103, // `g`
lowercaseH: 104, // `h`
lowercaseI: 105, // `i`
lowercaseJ: 106, // `j`
lowercaseK: 107, // `k`
lowercaseL: 108, // `l`
lowercaseM: 109, // `m`
lowercaseN: 110, // `n`
lowercaseO: 111, // `o`
lowercaseP: 112, // `p`
lowercaseQ: 113, // `q`
lowercaseR: 114, // `r`
lowercaseS: 115, // `s`
lowercaseT: 116, // `t`
lowercaseU: 117, // `u`
lowercaseV: 118, // `v`
lowercaseW: 119, // `w`
lowercaseX: 120, // `x`
lowercaseY: 121, // `y`
lowercaseZ: 122, // `z`
leftCurlyBrace: 123, // `{`
verticalBar: 124, // `|`
rightCurlyBrace: 125, // `}`
tilde: 126, // `~`
del: 127,
// Unicode Specials block.
byteOrderMarker: 65279,
// Unicode Specials block.
replacementCharacter: 65533 // `<60>`
}

View File

@@ -0,0 +1,9 @@
'use strict'
var codes = require('./codes.js')
function markdownLineEndingOrSpace(code) {
return code < codes.nul || code === codes.space
}
module.exports = markdownLineEndingOrSpace

View File

@@ -0,0 +1,7 @@
export default markdownLineEndingOrSpace
import codes from './codes.mjs'
function markdownLineEndingOrSpace(code) {
return code < codes.nul || code === codes.space
}

View File

@@ -0,0 +1,9 @@
'use strict'
var codes = require('./codes.js')
function markdownLineEnding(code) {
return code < codes.horizontalTab
}
module.exports = markdownLineEnding

View File

@@ -0,0 +1,7 @@
export default markdownLineEnding
import codes from './codes.mjs'
function markdownLineEnding(code) {
return code < codes.horizontalTab
}

13
node_modules/micromark/lib/character/markdown-space.js generated vendored Normal file
View File

@@ -0,0 +1,13 @@
'use strict'
var codes = require('./codes.js')
function markdownSpace(code) {
return (
code === codes.horizontalTab ||
code === codes.virtualSpace ||
code === codes.space
)
}
module.exports = markdownSpace

View File

@@ -0,0 +1,11 @@
export default markdownSpace
import codes from './codes.mjs'
function markdownSpace(code) {
return (
code === codes.horizontalTab ||
code === codes.virtualSpace ||
code === codes.space
)
}

View File

@@ -0,0 +1,10 @@
'use strict'
var unicodePunctuationRegex = require('../constant/unicode-punctuation-regex.js')
var regexCheck = require('../util/regex-check.js')
// Size note: removing ASCII from the regex and using `ascii-punctuation` here
// In fact adds to the bundle size.
var unicodePunctuation = regexCheck(unicodePunctuationRegex)
module.exports = unicodePunctuation

View File

@@ -0,0 +1,6 @@
import unicodePunctuation from '../constant/unicode-punctuation-regex.mjs'
import check from '../util/regex-check.mjs'
// Size note: removing ASCII from the regex and using `ascii-punctuation` here
// In fact adds to the bundle size.
export default check(unicodePunctuation)

View File

@@ -0,0 +1,7 @@
'use strict'
var regexCheck = require('../util/regex-check.js')
var unicodeWhitespace = regexCheck(/\s/)
module.exports = unicodeWhitespace

View File

@@ -0,0 +1,3 @@
import check from '../util/regex-check.mjs'
export default check(/\s/)

210
node_modules/micromark/lib/character/values.d.ts generated vendored Normal file
View File

@@ -0,0 +1,210 @@
// This module is generated by `script/`.
export type Value =
| '\t'
| '\n'
| '\r'
| ' '
| '!'
| '"'
| '#'
| '$'
| '%'
| '&'
| "'"
| '('
| ')'
| '*'
| '+'
| ','
| '-'
| '.'
| '/'
| '0'
| '1'
| '2'
| '3'
| '4'
| '5'
| '6'
| '7'
| '8'
| '9'
| ':'
| ';'
| '<'
| '='
| '>'
| '?'
| '@'
| 'A'
| 'B'
| 'C'
| 'D'
| 'E'
| 'F'
| 'G'
| 'H'
| 'I'
| 'J'
| 'K'
| 'L'
| 'M'
| 'N'
| 'O'
| 'P'
| 'Q'
| 'R'
| 'S'
| 'T'
| 'U'
| 'V'
| 'W'
| 'X'
| 'Y'
| 'Z'
| '['
| '\\'
| ']'
| '^'
| '_'
| '`'
| 'a'
| 'b'
| 'c'
| 'd'
| 'e'
| 'f'
| 'g'
| 'h'
| 'i'
| 'j'
| 'k'
| 'l'
| 'm'
| 'n'
| 'o'
| 'p'
| 'q'
| 'r'
| 's'
| 't'
| 'u'
| 'v'
| 'w'
| 'x'
| 'y'
| 'z'
| '{'
| '|'
| '}'
| '~'
| '<27>'
// @for-script: REMOVE_ALL_THING_BELOW
export interface Values {
ht: '\t'
lf: '\n'
cr: '\r'
space: ' '
exclamationMark: '!'
quotationMark: '"'
numberSign: '#'
dollarSign: '$'
percentSign: '%'
ampersand: '&'
apostrophe: "'"
leftParenthesis: '('
rightParenthesis: ')'
asterisk: '*'
plusSign: '+'
comma: ','
dash: '-'
dot: '.'
slash: '/'
digit0: '0'
digit1: '1'
digit2: '2'
digit3: '3'
digit4: '4'
digit5: '5'
digit6: '6'
digit7: '7'
digit8: '8'
digit9: '9'
colon: ':'
semicolon: ';'
lessThan: '<'
equalsTo: '='
greaterThan: '>'
questionMark: '?'
atSign: '@'
uppercaseA: 'A'
uppercaseB: 'B'
uppercaseC: 'C'
uppercaseD: 'D'
uppercaseE: 'E'
uppercaseF: 'F'
uppercaseG: 'G'
uppercaseH: 'H'
uppercaseI: 'I'
uppercaseJ: 'J'
uppercaseK: 'K'
uppercaseL: 'L'
uppercaseM: 'M'
uppercaseN: 'N'
uppercaseO: 'O'
uppercaseP: 'P'
uppercaseQ: 'Q'
uppercaseR: 'R'
uppercaseS: 'S'
uppercaseT: 'T'
uppercaseU: 'U'
uppercaseV: 'V'
uppercaseW: 'W'
uppercaseX: 'X'
uppercaseY: 'Y'
uppercaseZ: 'Z'
leftSquareBracket: '['
backslash: '\\'
rightSquareBracket: ']'
caret: '^'
underscore: '_'
graveAccent: '`'
lowercaseA: 'a'
lowercaseB: 'b'
lowercaseC: 'c'
lowercaseD: 'd'
lowercaseE: 'e'
lowercaseF: 'f'
lowercaseG: 'g'
lowercaseH: 'h'
lowercaseI: 'i'
lowercaseJ: 'j'
lowercaseK: 'k'
lowercaseL: 'l'
lowercaseM: 'm'
lowercaseN: 'n'
lowercaseO: 'o'
lowercaseP: 'p'
lowercaseQ: 'q'
lowercaseR: 'r'
lowercaseS: 's'
lowercaseT: 't'
lowercaseU: 'u'
lowercaseV: 'v'
lowercaseW: 'w'
lowercaseX: 'x'
lowercaseY: 'y'
lowercaseZ: 'z'
leftCurlyBrace: '{'
verticalBar: '|'
rightCurlyBrace: '}'
tilde: '~'
replacementCharacter: '<27>'
}
declare const value: Values
export default value

111
node_modules/micromark/lib/character/values.js generated vendored Normal file
View File

@@ -0,0 +1,111 @@
'use strict'
// This module is compiled away!
//
// While micromark works based on character codes, this module includes the
// string versions of em.
// The C0 block, except for LF, CR, HT, and w/ the replacement character added,
// are available here.
var values = {
ht: '\t',
lf: '\n',
cr: '\r',
space: ' ',
exclamationMark: '!',
quotationMark: '"',
numberSign: '#',
dollarSign: '$',
percentSign: '%',
ampersand: '&',
apostrophe: "'",
leftParenthesis: '(',
rightParenthesis: ')',
asterisk: '*',
plusSign: '+',
comma: ',',
dash: '-',
dot: '.',
slash: '/',
digit0: '0',
digit1: '1',
digit2: '2',
digit3: '3',
digit4: '4',
digit5: '5',
digit6: '6',
digit7: '7',
digit8: '8',
digit9: '9',
colon: ':',
semicolon: ';',
lessThan: '<',
equalsTo: '=',
greaterThan: '>',
questionMark: '?',
atSign: '@',
uppercaseA: 'A',
uppercaseB: 'B',
uppercaseC: 'C',
uppercaseD: 'D',
uppercaseE: 'E',
uppercaseF: 'F',
uppercaseG: 'G',
uppercaseH: 'H',
uppercaseI: 'I',
uppercaseJ: 'J',
uppercaseK: 'K',
uppercaseL: 'L',
uppercaseM: 'M',
uppercaseN: 'N',
uppercaseO: 'O',
uppercaseP: 'P',
uppercaseQ: 'Q',
uppercaseR: 'R',
uppercaseS: 'S',
uppercaseT: 'T',
uppercaseU: 'U',
uppercaseV: 'V',
uppercaseW: 'W',
uppercaseX: 'X',
uppercaseY: 'Y',
uppercaseZ: 'Z',
leftSquareBracket: '[',
backslash: '\\',
rightSquareBracket: ']',
caret: '^',
underscore: '_',
graveAccent: '`',
lowercaseA: 'a',
lowercaseB: 'b',
lowercaseC: 'c',
lowercaseD: 'd',
lowercaseE: 'e',
lowercaseF: 'f',
lowercaseG: 'g',
lowercaseH: 'h',
lowercaseI: 'i',
lowercaseJ: 'j',
lowercaseK: 'k',
lowercaseL: 'l',
lowercaseM: 'm',
lowercaseN: 'n',
lowercaseO: 'o',
lowercaseP: 'p',
lowercaseQ: 'q',
lowercaseR: 'r',
lowercaseS: 's',
lowercaseT: 't',
lowercaseU: 'u',
lowercaseV: 'v',
lowercaseW: 'w',
lowercaseX: 'x',
lowercaseY: 'y',
lowercaseZ: 'z',
leftCurlyBrace: '{',
verticalBar: '|',
rightCurlyBrace: '}',
tilde: '~',
replacementCharacter: '<27>'
}
module.exports = values

107
node_modules/micromark/lib/character/values.mjs generated vendored Normal file
View File

@@ -0,0 +1,107 @@
// This module is compiled away!
//
// While micromark works based on character codes, this module includes the
// string versions of em.
// The C0 block, except for LF, CR, HT, and w/ the replacement character added,
// are available here.
export default {
ht: '\t',
lf: '\n',
cr: '\r',
space: ' ',
exclamationMark: '!',
quotationMark: '"',
numberSign: '#',
dollarSign: '$',
percentSign: '%',
ampersand: '&',
apostrophe: "'",
leftParenthesis: '(',
rightParenthesis: ')',
asterisk: '*',
plusSign: '+',
comma: ',',
dash: '-',
dot: '.',
slash: '/',
digit0: '0',
digit1: '1',
digit2: '2',
digit3: '3',
digit4: '4',
digit5: '5',
digit6: '6',
digit7: '7',
digit8: '8',
digit9: '9',
colon: ':',
semicolon: ';',
lessThan: '<',
equalsTo: '=',
greaterThan: '>',
questionMark: '?',
atSign: '@',
uppercaseA: 'A',
uppercaseB: 'B',
uppercaseC: 'C',
uppercaseD: 'D',
uppercaseE: 'E',
uppercaseF: 'F',
uppercaseG: 'G',
uppercaseH: 'H',
uppercaseI: 'I',
uppercaseJ: 'J',
uppercaseK: 'K',
uppercaseL: 'L',
uppercaseM: 'M',
uppercaseN: 'N',
uppercaseO: 'O',
uppercaseP: 'P',
uppercaseQ: 'Q',
uppercaseR: 'R',
uppercaseS: 'S',
uppercaseT: 'T',
uppercaseU: 'U',
uppercaseV: 'V',
uppercaseW: 'W',
uppercaseX: 'X',
uppercaseY: 'Y',
uppercaseZ: 'Z',
leftSquareBracket: '[',
backslash: '\\',
rightSquareBracket: ']',
caret: '^',
underscore: '_',
graveAccent: '`',
lowercaseA: 'a',
lowercaseB: 'b',
lowercaseC: 'c',
lowercaseD: 'd',
lowercaseE: 'e',
lowercaseF: 'f',
lowercaseG: 'g',
lowercaseH: 'h',
lowercaseI: 'i',
lowercaseJ: 'j',
lowercaseK: 'k',
lowercaseL: 'l',
lowercaseM: 'm',
lowercaseN: 'n',
lowercaseO: 'o',
lowercaseP: 'p',
lowercaseQ: 'q',
lowercaseR: 'r',
lowercaseS: 's',
lowercaseT: 't',
lowercaseU: 'u',
lowercaseV: 'v',
lowercaseW: 'w',
lowercaseX: 'x',
lowercaseY: 'y',
lowercaseZ: 'z',
leftCurlyBrace: '{',
verticalBar: '|',
rightCurlyBrace: '}',
tilde: '~',
replacementCharacter: '<27>'
}

810
node_modules/micromark/lib/compile/html.js generated vendored Normal file
View File

@@ -0,0 +1,810 @@
'use strict'
var decodeEntity = require('parse-entities/decode-entity.js')
var codes = require('../character/codes.js')
var assign = require('../constant/assign.js')
var constants = require('../constant/constants.js')
var hasOwnProperty = require('../constant/has-own-property.js')
var types = require('../constant/types.js')
var combineHtmlExtensions = require('../util/combine-html-extensions.js')
var chunkedPush = require('../util/chunked-push.js')
var miniflat = require('../util/miniflat.js')
var normalizeIdentifier = require('../util/normalize-identifier.js')
var normalizeUri = require('../util/normalize-uri.js')
var safeFromInt = require('../util/safe-from-int.js')
function _interopDefaultLegacy(e) {
return e && typeof e === 'object' && 'default' in e ? e : {default: e}
}
var decodeEntity__default = /*#__PURE__*/ _interopDefaultLegacy(decodeEntity)
// While micromark is a lexer/tokenizer, the common case of going from markdown
// This ensures that certain characters which have special meaning in HTML are
// dealt with.
// Technically, we can skip `>` and `"` in many cases, but CM includes them.
var characterReferences = {'"': 'quot', '&': 'amp', '<': 'lt', '>': 'gt'}
// These two are allowlists of essentially safe protocols for full URLs in
// respectively the `href` (on `<a>`) and `src` (on `<img>`) attributes.
// They are based on what is allowed on GitHub,
// <https://github.com/syntax-tree/hast-util-sanitize/blob/9275b21/lib/github.json#L31>
var protocolHref = /^(https?|ircs?|mailto|xmpp)$/i
var protocolSrc = /^https?$/i
function compileHtml(options) {
// Configuration.
// Includes `htmlExtensions` (an array of extensions), `defaultLineEnding` (a
// preferred EOL), `allowDangerousProtocol` (whether to allow potential
// dangerous protocols), and `allowDangerousHtml` (whether to allow potential
// dangerous HTML).
var settings = options || {}
// Tags is needed because according to markdown, links and emphasis and
// whatnot can exist in images, however, as HTML doesnt allow content in
// images, the tags are ignored in the `alt` attribute, but the content
// remains.
var tags = true
// An object to track identifiers to media (URLs and titles) defined with
// definitions.
var definitions = {}
// A lot of the handlers need to capture some of the output data, modify it
// somehow, and then deal with it.
// We do that by tracking a stack of buffers, that can be opened (with
// `buffer`) and closed (with `resume`) to access them.
var buffers = [[]]
// As we can have links in images and the other way around, where the deepest
// ones are closed first, we need to track which one were in.
var mediaStack = []
// Same for tightness, which is specific to lists.
// We need to track if were currently in a tight or loose container.
var tightStack = []
var defaultHandlers = {
enter: {
blockQuote: onenterblockquote,
codeFenced: onentercodefenced,
codeFencedFenceInfo: buffer,
codeFencedFenceMeta: buffer,
codeIndented: onentercodeindented,
codeText: onentercodetext,
content: onentercontent,
definition: onenterdefinition,
definitionDestinationString: onenterdefinitiondestinationstring,
definitionLabelString: buffer,
definitionTitleString: buffer,
emphasis: onenteremphasis,
htmlFlow: onenterhtmlflow,
htmlText: onenterhtml,
image: onenterimage,
label: buffer,
link: onenterlink,
listItemMarker: onenterlistitemmarker,
listItemValue: onenterlistitemvalue,
listOrdered: onenterlistordered,
listUnordered: onenterlistunordered,
paragraph: onenterparagraph,
reference: buffer,
resource: onenterresource,
resourceDestinationString: onenterresourcedestinationstring,
resourceTitleString: buffer,
setextHeading: onentersetextheading,
strong: onenterstrong
},
exit: {
atxHeading: onexitatxheading,
atxHeadingSequence: onexitatxheadingsequence,
autolinkEmail: onexitautolinkemail,
autolinkProtocol: onexitautolinkprotocol,
blockQuote: onexitblockquote,
characterEscapeValue: onexitdata,
characterReferenceMarkerHexadecimal: onexitcharacterreferencemarker,
characterReferenceMarkerNumeric: onexitcharacterreferencemarker,
characterReferenceValue: onexitcharacterreferencevalue,
codeFenced: onexitflowcode,
codeFencedFence: onexitcodefencedfence,
codeFencedFenceInfo: onexitcodefencedfenceinfo,
codeFencedFenceMeta: resume,
codeFlowValue: onexitcodeflowvalue,
codeIndented: onexitflowcode,
codeText: onexitcodetext,
codeTextData: onexitdata,
data: onexitdata,
definition: onexitdefinition,
definitionDestinationString: onexitdefinitiondestinationstring,
definitionLabelString: onexitdefinitionlabelstring,
definitionTitleString: onexitdefinitiontitlestring,
emphasis: onexitemphasis,
hardBreakEscape: onexithardbreak,
hardBreakTrailing: onexithardbreak,
htmlFlow: onexithtml,
htmlFlowData: onexitdata,
htmlText: onexithtml,
htmlTextData: onexitdata,
image: onexitmedia,
label: onexitlabel,
labelText: onexitlabeltext,
lineEnding: onexitlineending,
link: onexitmedia,
listOrdered: onexitlistordered,
listUnordered: onexitlistunordered,
paragraph: onexitparagraph,
reference: resume,
referenceString: onexitreferencestring,
resource: resume,
resourceDestinationString: onexitresourcedestinationstring,
resourceTitleString: onexitresourcetitlestring,
setextHeading: onexitsetextheading,
setextHeadingLineSequence: onexitsetextheadinglinesequence,
setextHeadingText: onexitsetextheadingtext,
strong: onexitstrong,
thematicBreak: onexitthematicbreak
}
}
// Combine the HTML extensions with the default handlers.
// An HTML extension is an object whose fields are either `enter` or `exit`
// (reflecting whether a token is entered or exited).
// The values at such objects are names of tokens mapping to handlers.
// Handlers are called, respectively when a token is opener or closed, with
// that token, and a context as `this`.
var handlers = combineHtmlExtensions(
[defaultHandlers].concat(miniflat(settings.htmlExtensions))
)
// Handlers do often need to keep track of some state.
// That state is provided here as a key-value store (an object).
var data = {tightStack: tightStack}
// The context for handlers references a couple of useful functions.
// In handlers from extensions, those can be accessed at `this`.
// For the handlers here, they can be accessed directly.
var context = {
lineEndingIfNeeded: lineEndingIfNeeded,
options: settings,
encode: encode,
raw: raw,
tag: tag,
buffer: buffer,
resume: resume,
setData: setData,
getData: getData
}
// Generally, micromark copies line endings (`'\r'`, `'\n'`, `'\r\n'`) in the
// markdown document over to the compiled HTML.
// In some cases, such as `> a`, CommonMark requires that extra line endings
// are added: `<blockquote>\n<p>a</p>\n</blockquote>`.
// This variable hold the default line ending when given (or `undefined`),
// and in the latter case will be updated to the first found line ending if
// there is one.
var lineEndingStyle = settings.defaultLineEnding
// Return the function that handles a slice of events.
return compile
// Deal w/ a slice of events.
// Return either the empty string if theres nothing of note to return, or the
// result when done.
function compile(events) {
// As definitions can come after references, we need to figure out the media
// (urls and titles) defined by them before handling the references.
// So, we do sort of what HTML does: put metadata at the start (in head), and
// then put content after (`body`).
var head = []
var body = []
var index
var start
var listStack
var handler
var result
index = -1
start = 0
listStack = []
while (++index < events.length) {
// Figure out the line ending style used in the document.
if (
!lineEndingStyle &&
(events[index][1].type === types.lineEnding ||
events[index][1].type === types.lineEndingBlank)
) {
lineEndingStyle = events[index][2].sliceSerialize(events[index][1])
}
// Preprocess lists to infer whether the list is loose or not.
if (
events[index][1].type === types.listOrdered ||
events[index][1].type === types.listUnordered
) {
if (events[index][0] === 'enter') {
listStack.push(index)
} else {
prepareList(events.slice(listStack.pop(), index))
}
}
// Move definitions to the front.
if (events[index][1].type === types.definition) {
if (events[index][0] === 'enter') {
body = chunkedPush(body, events.slice(start, index))
start = index
} else {
head = chunkedPush(head, events.slice(start, index + 1))
start = index + 1
}
}
}
head = chunkedPush(head, body)
head = chunkedPush(head, events.slice(start))
result = head
index = -1
// Handle the start of the document, if defined.
if (handlers.enter.null) {
handlers.enter.null.call(context)
}
// Handle all events.
while (++index < events.length) {
handler = handlers[result[index][0]]
if (hasOwnProperty.call(handler, result[index][1].type)) {
handler[result[index][1].type].call(
assign({sliceSerialize: result[index][2].sliceSerialize}, context),
result[index][1]
)
}
}
// Handle the end of the document, if defined.
if (handlers.exit.null) {
handlers.exit.null.call(context)
}
return buffers[0].join('')
}
// Figure out whether lists are loose or not.
function prepareList(slice) {
var length = slice.length - 1 // Skip close.
var index = 0 // Skip open.
var containerBalance = 0
var loose
var atMarker
var event
while (++index < length) {
event = slice[index]
if (event[1]._container) {
atMarker = undefined
if (event[0] === 'enter') {
containerBalance++
} else {
containerBalance--
}
} else if (event[1].type === types.listItemPrefix) {
if (event[0] === 'exit') {
atMarker = true
}
} else if (event[1].type === types.linePrefix);
else if (event[1].type === types.lineEndingBlank) {
if (event[0] === 'enter' && !containerBalance) {
if (atMarker) {
atMarker = undefined
} else {
loose = true
}
}
} else {
atMarker = undefined
}
}
slice[0][1]._loose = loose
}
// Set data into the key-value store.
function setData(key, value) {
data[key] = value
}
// Get data from the key-value store.
function getData(key) {
return data[key]
}
// Capture some of the output data.
function buffer() {
buffers.push([])
}
// Stop capturing and access the output data.
function resume() {
return buffers.pop().join('')
}
// Output (parts of) HTML tags.
function tag(value) {
if (!tags) return
setData('lastWasTag', true)
buffers[buffers.length - 1].push(value)
}
// Output raw data.
function raw(value) {
setData('lastWasTag')
buffers[buffers.length - 1].push(value)
}
// Output an extra line ending.
function lineEnding() {
raw(lineEndingStyle || '\n')
}
// Output an extra line ending if the previous value wasnt EOF/EOL.
function lineEndingIfNeeded() {
var buffer = buffers[buffers.length - 1]
var slice = buffer[buffer.length - 1]
var previous = slice ? slice.charCodeAt(slice.length - 1) : codes.eof
if (
previous === codes.lf ||
previous === codes.cr ||
previous === codes.eof
) {
return
}
lineEnding()
}
// Make a value safe for injection in HTML (except w/ `ignoreEncode`).
function encode(value) {
return getData('ignoreEncode') ? value : value.replace(/["&<>]/g, replace)
function replace(value) {
return '&' + characterReferences[value] + ';'
}
}
// Make a value safe for injection as a URL.
// This does encode unsafe characters with percent-encoding, skipping already
// encoded sequences (`normalizeUri`).
// Further unsafe characters are encoded as character references (`encode`).
// Finally, if the URL includes an unknown protocol (such as a dangerous
// example, `javascript:`), the value is ignored.
function url(url, protocol) {
var value = encode(normalizeUri(url || ''))
var colon = value.indexOf(':')
var questionMark = value.indexOf('?')
var numberSign = value.indexOf('#')
var slash = value.indexOf('/')
if (
settings.allowDangerousProtocol ||
// If there is no protocol, its relative.
colon < 0 ||
// If the first colon is after a `?`, `#`, or `/`, its not a protocol.
(slash > -1 && colon > slash) ||
(questionMark > -1 && colon > questionMark) ||
(numberSign > -1 && colon > numberSign) ||
// It is a protocol, it should be allowed.
protocol.test(value.slice(0, colon))
) {
return value
}
return ''
}
//
// Handlers.
//
function onenterlistordered(token) {
tightStack.push(!token._loose)
lineEndingIfNeeded()
tag('<ol')
setData('expectFirstItem', true)
}
function onenterlistunordered(token) {
tightStack.push(!token._loose)
lineEndingIfNeeded()
tag('<ul')
setData('expectFirstItem', true)
}
function onenterlistitemvalue(token) {
var value
if (getData('expectFirstItem')) {
value = parseInt(this.sliceSerialize(token), constants.numericBaseDecimal)
if (value !== 1) {
tag(' start="' + encode(String(value)) + '"')
}
}
}
function onenterlistitemmarker() {
if (getData('expectFirstItem')) {
tag('>')
} else {
onexitlistitem()
}
lineEndingIfNeeded()
tag('<li>')
setData('expectFirstItem')
// “Hack” to prevent a line ending from showing up if the item is empty.
setData('lastWasTag')
}
function onexitlistordered() {
onexitlistitem()
tightStack.pop()
lineEnding()
tag('</ol>')
}
function onexitlistunordered() {
onexitlistitem()
tightStack.pop()
lineEnding()
tag('</ul>')
}
function onexitlistitem() {
if (getData('lastWasTag') && !getData('slurpAllLineEndings')) {
lineEndingIfNeeded()
}
tag('</li>')
setData('slurpAllLineEndings')
}
function onenterblockquote() {
tightStack.push(false)
lineEndingIfNeeded()
tag('<blockquote>')
}
function onexitblockquote() {
tightStack.pop()
lineEndingIfNeeded()
tag('</blockquote>')
setData('slurpAllLineEndings')
}
function onenterparagraph() {
if (!tightStack[tightStack.length - 1]) {
lineEndingIfNeeded()
tag('<p>')
}
setData('slurpAllLineEndings')
}
function onexitparagraph() {
if (tightStack[tightStack.length - 1]) {
setData('slurpAllLineEndings', true)
} else {
tag('</p>')
}
}
function onentercodefenced() {
lineEndingIfNeeded()
tag('<pre><code')
setData('fencesCount', 0)
}
function onexitcodefencedfenceinfo() {
var value = resume()
tag(' class="language-' + value + '"')
}
function onexitcodefencedfence() {
if (!getData('fencesCount')) {
tag('>')
setData('fencedCodeInside', true)
setData('slurpOneLineEnding', true)
}
setData('fencesCount', getData('fencesCount') + 1)
}
function onentercodeindented() {
lineEndingIfNeeded()
tag('<pre><code>')
}
function onexitflowcode() {
// Send an extra line feed if we saw data.
if (getData('flowCodeSeenData')) lineEndingIfNeeded()
tag('</code></pre>')
if (getData('fencesCount') < 2) lineEndingIfNeeded()
setData('flowCodeSeenData')
setData('fencesCount')
setData('slurpOneLineEnding')
}
function onenterimage() {
mediaStack.push({image: true})
tags = undefined // Disallow tags.
}
function onenterlink() {
mediaStack.push({})
}
function onexitlabeltext(token) {
mediaStack[mediaStack.length - 1].labelId = this.sliceSerialize(token)
}
function onexitlabel() {
mediaStack[mediaStack.length - 1].label = resume()
}
function onexitreferencestring(token) {
mediaStack[mediaStack.length - 1].referenceId = this.sliceSerialize(token)
}
function onenterresource() {
buffer() // We can have line endings in the resource, ignore them.
mediaStack[mediaStack.length - 1].destination = ''
}
function onenterresourcedestinationstring() {
buffer()
// Ignore encoding the result, as well first percent encode the url and
// encode manually after.
setData('ignoreEncode', true)
}
function onexitresourcedestinationstring() {
mediaStack[mediaStack.length - 1].destination = resume()
setData('ignoreEncode')
}
function onexitresourcetitlestring() {
mediaStack[mediaStack.length - 1].title = resume()
}
function onexitmedia() {
var index = mediaStack.length - 1 // Skip current.
var media = mediaStack[index]
var context =
media.destination === undefined
? definitions[normalizeIdentifier(media.referenceId || media.labelId)]
: media
tags = true
while (index--) {
if (mediaStack[index].image) {
tags = undefined
break
}
}
if (media.image) {
tag('<img src="' + url(context.destination, protocolSrc) + '" alt="')
raw(media.label)
tag('"')
} else {
tag('<a href="' + url(context.destination, protocolHref) + '"')
}
tag(context.title ? ' title="' + context.title + '"' : '')
if (media.image) {
tag(' />')
} else {
tag('>')
raw(media.label)
tag('</a>')
}
mediaStack.pop()
}
function onenterdefinition() {
buffer()
mediaStack.push({})
}
function onexitdefinitionlabelstring(token) {
// Discard label, use the source content instead.
resume()
mediaStack[mediaStack.length - 1].labelId = this.sliceSerialize(token)
}
function onenterdefinitiondestinationstring() {
buffer()
setData('ignoreEncode', true)
}
function onexitdefinitiondestinationstring() {
mediaStack[mediaStack.length - 1].destination = resume()
setData('ignoreEncode')
}
function onexitdefinitiontitlestring() {
mediaStack[mediaStack.length - 1].title = resume()
}
function onexitdefinition() {
var id = normalizeIdentifier(mediaStack[mediaStack.length - 1].labelId)
resume()
if (!hasOwnProperty.call(definitions, id)) {
definitions[id] = mediaStack[mediaStack.length - 1]
}
mediaStack.pop()
}
function onentercontent() {
setData('slurpAllLineEndings', true)
}
function onexitatxheadingsequence(token) {
// Exit for further sequences.
if (getData('headingRank')) return
setData('headingRank', this.sliceSerialize(token).length)
lineEndingIfNeeded()
tag('<h' + getData('headingRank') + '>')
}
function onentersetextheading() {
buffer()
setData('slurpAllLineEndings')
}
function onexitsetextheadingtext() {
setData('slurpAllLineEndings', true)
}
function onexitatxheading() {
tag('</h' + getData('headingRank') + '>')
setData('headingRank')
}
function onexitsetextheadinglinesequence(token) {
setData(
'headingRank',
this.sliceSerialize(token).charCodeAt(0) === codes.equalsTo ? 1 : 2
)
}
function onexitsetextheading() {
var value = resume()
lineEndingIfNeeded()
tag('<h' + getData('headingRank') + '>')
raw(value)
tag('</h' + getData('headingRank') + '>')
setData('slurpAllLineEndings')
setData('headingRank')
}
function onexitdata(token) {
raw(encode(this.sliceSerialize(token)))
}
function onexitlineending(token) {
if (getData('slurpAllLineEndings')) {
return
}
if (getData('slurpOneLineEnding')) {
setData('slurpOneLineEnding')
return
}
if (getData('inCodeText')) {
raw(' ')
return
}
raw(encode(this.sliceSerialize(token)))
}
function onexitcodeflowvalue(token) {
raw(encode(this.sliceSerialize(token)))
setData('flowCodeSeenData', true)
}
function onexithardbreak() {
tag('<br />')
}
function onenterhtmlflow() {
lineEndingIfNeeded()
onenterhtml()
}
function onexithtml() {
setData('ignoreEncode')
}
function onenterhtml() {
if (settings.allowDangerousHtml) {
setData('ignoreEncode', true)
}
}
function onenteremphasis() {
tag('<em>')
}
function onenterstrong() {
tag('<strong>')
}
function onentercodetext() {
setData('inCodeText', true)
tag('<code>')
}
function onexitcodetext() {
setData('inCodeText')
tag('</code>')
}
function onexitemphasis() {
tag('</em>')
}
function onexitstrong() {
tag('</strong>')
}
function onexitthematicbreak() {
lineEndingIfNeeded()
tag('<hr />')
}
function onexitcharacterreferencemarker(token) {
setData('characterReferenceType', token.type)
}
function onexitcharacterreferencevalue(token) {
var value = this.sliceSerialize(token)
value = getData('characterReferenceType')
? safeFromInt(
value,
getData('characterReferenceType') ===
types.characterReferenceMarkerNumeric
? constants.numericBaseDecimal
: constants.numericBaseHexadecimal
)
: decodeEntity__default['default'](value)
raw(encode(value))
setData('characterReferenceType')
}
function onexitautolinkprotocol(token) {
var uri = this.sliceSerialize(token)
tag('<a href="' + url(uri, protocolHref) + '">')
raw(encode(uri))
tag('</a>')
}
function onexitautolinkemail(token) {
var uri = this.sliceSerialize(token)
tag('<a href="' + url('mailto:' + uri, protocolHref) + '">')
raw(encode(uri))
tag('</a>')
}
}
module.exports = compileHtml

813
node_modules/micromark/lib/compile/html.mjs generated vendored Normal file
View File

@@ -0,0 +1,813 @@
// While micromark is a lexer/tokenizer, the common case of going from markdown
// to html is currently built in as this module, even though the parts can be
// used separately to build ASTs, CSTs, or many other output formats.
//
// Having an HTML compiler built in is useful because it allows us to check for
// compliancy to CommonMark, the de facto norm of markdown, specified in roughly
// 600 input/output cases.
//
// This module has an interface which accepts lists of events instead of the
// whole at once, however, because markdown cant be truly streaming, we buffer
// events before processing and outputting the final result.
export default compileHtml
import decodeEntity from 'parse-entities/decode-entity.js'
import codes from '../character/codes.mjs'
import assign from '../constant/assign.mjs'
import constants from '../constant/constants.mjs'
import own from '../constant/has-own-property.mjs'
import types from '../constant/types.mjs'
import combineHtmlExtensions from '../util/combine-html-extensions.mjs'
import chunkedPush from '../util/chunked-push.mjs'
import miniflat from '../util/miniflat.mjs'
import normalizeIdentifier from '../util/normalize-identifier.mjs'
import normalizeUri from '../util/normalize-uri.mjs'
import safeFromInt from '../util/safe-from-int.mjs'
// This ensures that certain characters which have special meaning in HTML are
// dealt with.
// Technically, we can skip `>` and `"` in many cases, but CM includes them.
var characterReferences = {'"': 'quot', '&': 'amp', '<': 'lt', '>': 'gt'}
// These two are allowlists of essentially safe protocols for full URLs in
// respectively the `href` (on `<a>`) and `src` (on `<img>`) attributes.
// They are based on what is allowed on GitHub,
// <https://github.com/syntax-tree/hast-util-sanitize/blob/9275b21/lib/github.json#L31>
var protocolHref = /^(https?|ircs?|mailto|xmpp)$/i
var protocolSrc = /^https?$/i
function compileHtml(options) {
// Configuration.
// Includes `htmlExtensions` (an array of extensions), `defaultLineEnding` (a
// preferred EOL), `allowDangerousProtocol` (whether to allow potential
// dangerous protocols), and `allowDangerousHtml` (whether to allow potential
// dangerous HTML).
var settings = options || {}
// Tags is needed because according to markdown, links and emphasis and
// whatnot can exist in images, however, as HTML doesnt allow content in
// images, the tags are ignored in the `alt` attribute, but the content
// remains.
var tags = true
// An object to track identifiers to media (URLs and titles) defined with
// definitions.
var definitions = {}
// A lot of the handlers need to capture some of the output data, modify it
// somehow, and then deal with it.
// We do that by tracking a stack of buffers, that can be opened (with
// `buffer`) and closed (with `resume`) to access them.
var buffers = [[]]
// As we can have links in images and the other way around, where the deepest
// ones are closed first, we need to track which one were in.
var mediaStack = []
// Same for tightness, which is specific to lists.
// We need to track if were currently in a tight or loose container.
var tightStack = []
var defaultHandlers = {
enter: {
blockQuote: onenterblockquote,
codeFenced: onentercodefenced,
codeFencedFenceInfo: buffer,
codeFencedFenceMeta: buffer,
codeIndented: onentercodeindented,
codeText: onentercodetext,
content: onentercontent,
definition: onenterdefinition,
definitionDestinationString: onenterdefinitiondestinationstring,
definitionLabelString: buffer,
definitionTitleString: buffer,
emphasis: onenteremphasis,
htmlFlow: onenterhtmlflow,
htmlText: onenterhtml,
image: onenterimage,
label: buffer,
link: onenterlink,
listItemMarker: onenterlistitemmarker,
listItemValue: onenterlistitemvalue,
listOrdered: onenterlistordered,
listUnordered: onenterlistunordered,
paragraph: onenterparagraph,
reference: buffer,
resource: onenterresource,
resourceDestinationString: onenterresourcedestinationstring,
resourceTitleString: buffer,
setextHeading: onentersetextheading,
strong: onenterstrong
},
exit: {
atxHeading: onexitatxheading,
atxHeadingSequence: onexitatxheadingsequence,
autolinkEmail: onexitautolinkemail,
autolinkProtocol: onexitautolinkprotocol,
blockQuote: onexitblockquote,
characterEscapeValue: onexitdata,
characterReferenceMarkerHexadecimal: onexitcharacterreferencemarker,
characterReferenceMarkerNumeric: onexitcharacterreferencemarker,
characterReferenceValue: onexitcharacterreferencevalue,
codeFenced: onexitflowcode,
codeFencedFence: onexitcodefencedfence,
codeFencedFenceInfo: onexitcodefencedfenceinfo,
codeFencedFenceMeta: resume,
codeFlowValue: onexitcodeflowvalue,
codeIndented: onexitflowcode,
codeText: onexitcodetext,
codeTextData: onexitdata,
data: onexitdata,
definition: onexitdefinition,
definitionDestinationString: onexitdefinitiondestinationstring,
definitionLabelString: onexitdefinitionlabelstring,
definitionTitleString: onexitdefinitiontitlestring,
emphasis: onexitemphasis,
hardBreakEscape: onexithardbreak,
hardBreakTrailing: onexithardbreak,
htmlFlow: onexithtml,
htmlFlowData: onexitdata,
htmlText: onexithtml,
htmlTextData: onexitdata,
image: onexitmedia,
label: onexitlabel,
labelText: onexitlabeltext,
lineEnding: onexitlineending,
link: onexitmedia,
listOrdered: onexitlistordered,
listUnordered: onexitlistunordered,
paragraph: onexitparagraph,
reference: resume,
referenceString: onexitreferencestring,
resource: resume,
resourceDestinationString: onexitresourcedestinationstring,
resourceTitleString: onexitresourcetitlestring,
setextHeading: onexitsetextheading,
setextHeadingLineSequence: onexitsetextheadinglinesequence,
setextHeadingText: onexitsetextheadingtext,
strong: onexitstrong,
thematicBreak: onexitthematicbreak
}
}
// Combine the HTML extensions with the default handlers.
// An HTML extension is an object whose fields are either `enter` or `exit`
// (reflecting whether a token is entered or exited).
// The values at such objects are names of tokens mapping to handlers.
// Handlers are called, respectively when a token is opener or closed, with
// that token, and a context as `this`.
var handlers = combineHtmlExtensions(
[defaultHandlers].concat(miniflat(settings.htmlExtensions))
)
// Handlers do often need to keep track of some state.
// That state is provided here as a key-value store (an object).
var data = {tightStack: tightStack}
// The context for handlers references a couple of useful functions.
// In handlers from extensions, those can be accessed at `this`.
// For the handlers here, they can be accessed directly.
var context = {
lineEndingIfNeeded: lineEndingIfNeeded,
options: settings,
encode: encode,
raw: raw,
tag: tag,
buffer: buffer,
resume: resume,
setData: setData,
getData: getData
}
// Generally, micromark copies line endings (`'\r'`, `'\n'`, `'\r\n'`) in the
// markdown document over to the compiled HTML.
// In some cases, such as `> a`, CommonMark requires that extra line endings
// are added: `<blockquote>\n<p>a</p>\n</blockquote>`.
// This variable hold the default line ending when given (or `undefined`),
// and in the latter case will be updated to the first found line ending if
// there is one.
var lineEndingStyle = settings.defaultLineEnding
// Return the function that handles a slice of events.
return compile
// Deal w/ a slice of events.
// Return either the empty string if theres nothing of note to return, or the
// result when done.
function compile(events) {
// As definitions can come after references, we need to figure out the media
// (urls and titles) defined by them before handling the references.
// So, we do sort of what HTML does: put metadata at the start (in head), and
// then put content after (`body`).
var head = []
var body = []
var index
var start
var listStack
var handler
var result
index = -1
start = 0
listStack = []
while (++index < events.length) {
// Figure out the line ending style used in the document.
if (
!lineEndingStyle &&
(events[index][1].type === types.lineEnding ||
events[index][1].type === types.lineEndingBlank)
) {
lineEndingStyle = events[index][2].sliceSerialize(events[index][1])
}
// Preprocess lists to infer whether the list is loose or not.
if (
events[index][1].type === types.listOrdered ||
events[index][1].type === types.listUnordered
) {
if (events[index][0] === 'enter') {
listStack.push(index)
} else {
prepareList(events.slice(listStack.pop(), index))
}
}
// Move definitions to the front.
if (events[index][1].type === types.definition) {
if (events[index][0] === 'enter') {
body = chunkedPush(body, events.slice(start, index))
start = index
} else {
head = chunkedPush(head, events.slice(start, index + 1))
start = index + 1
}
}
}
head = chunkedPush(head, body)
head = chunkedPush(head, events.slice(start))
result = head
index = -1
// Handle the start of the document, if defined.
if (handlers.enter.null) {
handlers.enter.null.call(context)
}
// Handle all events.
while (++index < events.length) {
handler = handlers[result[index][0]]
if (own.call(handler, result[index][1].type)) {
handler[result[index][1].type].call(
assign({sliceSerialize: result[index][2].sliceSerialize}, context),
result[index][1]
)
}
}
// Handle the end of the document, if defined.
if (handlers.exit.null) {
handlers.exit.null.call(context)
}
return buffers[0].join('')
}
// Figure out whether lists are loose or not.
function prepareList(slice) {
var length = slice.length - 1 // Skip close.
var index = 0 // Skip open.
var containerBalance = 0
var loose
var atMarker
var event
while (++index < length) {
event = slice[index]
if (event[1]._container) {
atMarker = undefined
if (event[0] === 'enter') {
containerBalance++
} else {
containerBalance--
}
} else if (event[1].type === types.listItemPrefix) {
if (event[0] === 'exit') {
atMarker = true
}
} else if (event[1].type === types.linePrefix) {
// Ignore
} else if (event[1].type === types.lineEndingBlank) {
if (event[0] === 'enter' && !containerBalance) {
if (atMarker) {
atMarker = undefined
} else {
loose = true
}
}
} else {
atMarker = undefined
}
}
slice[0][1]._loose = loose
}
// Set data into the key-value store.
function setData(key, value) {
data[key] = value
}
// Get data from the key-value store.
function getData(key) {
return data[key]
}
// Capture some of the output data.
function buffer() {
buffers.push([])
}
// Stop capturing and access the output data.
function resume() {
return buffers.pop().join('')
}
// Output (parts of) HTML tags.
function tag(value) {
if (!tags) return
setData('lastWasTag', true)
buffers[buffers.length - 1].push(value)
}
// Output raw data.
function raw(value) {
setData('lastWasTag')
buffers[buffers.length - 1].push(value)
}
// Output an extra line ending.
function lineEnding() {
raw(lineEndingStyle || '\n')
}
// Output an extra line ending if the previous value wasnt EOF/EOL.
function lineEndingIfNeeded() {
var buffer = buffers[buffers.length - 1]
var slice = buffer[buffer.length - 1]
var previous = slice ? slice.charCodeAt(slice.length - 1) : codes.eof
if (
previous === codes.lf ||
previous === codes.cr ||
previous === codes.eof
) {
return
}
lineEnding()
}
// Make a value safe for injection in HTML (except w/ `ignoreEncode`).
function encode(value) {
return getData('ignoreEncode') ? value : value.replace(/["&<>]/g, replace)
function replace(value) {
return '&' + characterReferences[value] + ';'
}
}
// Make a value safe for injection as a URL.
// This does encode unsafe characters with percent-encoding, skipping already
// encoded sequences (`normalizeUri`).
// Further unsafe characters are encoded as character references (`encode`).
// Finally, if the URL includes an unknown protocol (such as a dangerous
// example, `javascript:`), the value is ignored.
function url(url, protocol) {
var value = encode(normalizeUri(url || ''))
var colon = value.indexOf(':')
var questionMark = value.indexOf('?')
var numberSign = value.indexOf('#')
var slash = value.indexOf('/')
if (
settings.allowDangerousProtocol ||
// If there is no protocol, its relative.
colon < 0 ||
// If the first colon is after a `?`, `#`, or `/`, its not a protocol.
(slash > -1 && colon > slash) ||
(questionMark > -1 && colon > questionMark) ||
(numberSign > -1 && colon > numberSign) ||
// It is a protocol, it should be allowed.
protocol.test(value.slice(0, colon))
) {
return value
}
return ''
}
//
// Handlers.
//
function onenterlistordered(token) {
tightStack.push(!token._loose)
lineEndingIfNeeded()
tag('<ol')
setData('expectFirstItem', true)
}
function onenterlistunordered(token) {
tightStack.push(!token._loose)
lineEndingIfNeeded()
tag('<ul')
setData('expectFirstItem', true)
}
function onenterlistitemvalue(token) {
var value
if (getData('expectFirstItem')) {
value = parseInt(this.sliceSerialize(token), constants.numericBaseDecimal)
if (value !== 1) {
tag(' start="' + encode(String(value)) + '"')
}
}
}
function onenterlistitemmarker() {
if (getData('expectFirstItem')) {
tag('>')
} else {
onexitlistitem()
}
lineEndingIfNeeded()
tag('<li>')
setData('expectFirstItem')
// “Hack” to prevent a line ending from showing up if the item is empty.
setData('lastWasTag')
}
function onexitlistordered() {
onexitlistitem()
tightStack.pop()
lineEnding()
tag('</ol>')
}
function onexitlistunordered() {
onexitlistitem()
tightStack.pop()
lineEnding()
tag('</ul>')
}
function onexitlistitem() {
if (getData('lastWasTag') && !getData('slurpAllLineEndings')) {
lineEndingIfNeeded()
}
tag('</li>')
setData('slurpAllLineEndings')
}
function onenterblockquote() {
tightStack.push(false)
lineEndingIfNeeded()
tag('<blockquote>')
}
function onexitblockquote() {
tightStack.pop()
lineEndingIfNeeded()
tag('</blockquote>')
setData('slurpAllLineEndings')
}
function onenterparagraph() {
if (!tightStack[tightStack.length - 1]) {
lineEndingIfNeeded()
tag('<p>')
}
setData('slurpAllLineEndings')
}
function onexitparagraph() {
if (tightStack[tightStack.length - 1]) {
setData('slurpAllLineEndings', true)
} else {
tag('</p>')
}
}
function onentercodefenced() {
lineEndingIfNeeded()
tag('<pre><code')
setData('fencesCount', 0)
}
function onexitcodefencedfenceinfo() {
var value = resume()
tag(' class="language-' + value + '"')
}
function onexitcodefencedfence() {
if (!getData('fencesCount')) {
tag('>')
setData('fencedCodeInside', true)
setData('slurpOneLineEnding', true)
}
setData('fencesCount', getData('fencesCount') + 1)
}
function onentercodeindented() {
lineEndingIfNeeded()
tag('<pre><code>')
}
function onexitflowcode() {
// Send an extra line feed if we saw data.
if (getData('flowCodeSeenData')) lineEndingIfNeeded()
tag('</code></pre>')
if (getData('fencesCount') < 2) lineEndingIfNeeded()
setData('flowCodeSeenData')
setData('fencesCount')
setData('slurpOneLineEnding')
}
function onenterimage() {
mediaStack.push({image: true})
tags = undefined // Disallow tags.
}
function onenterlink() {
mediaStack.push({})
}
function onexitlabeltext(token) {
mediaStack[mediaStack.length - 1].labelId = this.sliceSerialize(token)
}
function onexitlabel() {
mediaStack[mediaStack.length - 1].label = resume()
}
function onexitreferencestring(token) {
mediaStack[mediaStack.length - 1].referenceId = this.sliceSerialize(token)
}
function onenterresource() {
buffer() // We can have line endings in the resource, ignore them.
mediaStack[mediaStack.length - 1].destination = ''
}
function onenterresourcedestinationstring() {
buffer()
// Ignore encoding the result, as well first percent encode the url and
// encode manually after.
setData('ignoreEncode', true)
}
function onexitresourcedestinationstring() {
mediaStack[mediaStack.length - 1].destination = resume()
setData('ignoreEncode')
}
function onexitresourcetitlestring() {
mediaStack[mediaStack.length - 1].title = resume()
}
function onexitmedia() {
var index = mediaStack.length - 1 // Skip current.
var media = mediaStack[index]
var context =
media.destination === undefined
? definitions[normalizeIdentifier(media.referenceId || media.labelId)]
: media
tags = true
while (index--) {
if (mediaStack[index].image) {
tags = undefined
break
}
}
if (media.image) {
tag('<img src="' + url(context.destination, protocolSrc) + '" alt="')
raw(media.label)
tag('"')
} else {
tag('<a href="' + url(context.destination, protocolHref) + '"')
}
tag(context.title ? ' title="' + context.title + '"' : '')
if (media.image) {
tag(' />')
} else {
tag('>')
raw(media.label)
tag('</a>')
}
mediaStack.pop()
}
function onenterdefinition() {
buffer()
mediaStack.push({})
}
function onexitdefinitionlabelstring(token) {
// Discard label, use the source content instead.
resume()
mediaStack[mediaStack.length - 1].labelId = this.sliceSerialize(token)
}
function onenterdefinitiondestinationstring() {
buffer()
setData('ignoreEncode', true)
}
function onexitdefinitiondestinationstring() {
mediaStack[mediaStack.length - 1].destination = resume()
setData('ignoreEncode')
}
function onexitdefinitiontitlestring() {
mediaStack[mediaStack.length - 1].title = resume()
}
function onexitdefinition() {
var id = normalizeIdentifier(mediaStack[mediaStack.length - 1].labelId)
resume()
if (!own.call(definitions, id)) {
definitions[id] = mediaStack[mediaStack.length - 1]
}
mediaStack.pop()
}
function onentercontent() {
setData('slurpAllLineEndings', true)
}
function onexitatxheadingsequence(token) {
// Exit for further sequences.
if (getData('headingRank')) return
setData('headingRank', this.sliceSerialize(token).length)
lineEndingIfNeeded()
tag('<h' + getData('headingRank') + '>')
}
function onentersetextheading() {
buffer()
setData('slurpAllLineEndings')
}
function onexitsetextheadingtext() {
setData('slurpAllLineEndings', true)
}
function onexitatxheading() {
tag('</h' + getData('headingRank') + '>')
setData('headingRank')
}
function onexitsetextheadinglinesequence(token) {
setData(
'headingRank',
this.sliceSerialize(token).charCodeAt(0) === codes.equalsTo ? 1 : 2
)
}
function onexitsetextheading() {
var value = resume()
lineEndingIfNeeded()
tag('<h' + getData('headingRank') + '>')
raw(value)
tag('</h' + getData('headingRank') + '>')
setData('slurpAllLineEndings')
setData('headingRank')
}
function onexitdata(token) {
raw(encode(this.sliceSerialize(token)))
}
function onexitlineending(token) {
if (getData('slurpAllLineEndings')) {
return
}
if (getData('slurpOneLineEnding')) {
setData('slurpOneLineEnding')
return
}
if (getData('inCodeText')) {
raw(' ')
return
}
raw(encode(this.sliceSerialize(token)))
}
function onexitcodeflowvalue(token) {
raw(encode(this.sliceSerialize(token)))
setData('flowCodeSeenData', true)
}
function onexithardbreak() {
tag('<br />')
}
function onenterhtmlflow() {
lineEndingIfNeeded()
onenterhtml()
}
function onexithtml() {
setData('ignoreEncode')
}
function onenterhtml() {
if (settings.allowDangerousHtml) {
setData('ignoreEncode', true)
}
}
function onenteremphasis() {
tag('<em>')
}
function onenterstrong() {
tag('<strong>')
}
function onentercodetext() {
setData('inCodeText', true)
tag('<code>')
}
function onexitcodetext() {
setData('inCodeText')
tag('</code>')
}
function onexitemphasis() {
tag('</em>')
}
function onexitstrong() {
tag('</strong>')
}
function onexitthematicbreak() {
lineEndingIfNeeded()
tag('<hr />')
}
function onexitcharacterreferencemarker(token) {
setData('characterReferenceType', token.type)
}
function onexitcharacterreferencevalue(token) {
var value = this.sliceSerialize(token)
value = getData('characterReferenceType')
? safeFromInt(
value,
getData('characterReferenceType') ===
types.characterReferenceMarkerNumeric
? constants.numericBaseDecimal
: constants.numericBaseHexadecimal
)
: decodeEntity(value)
raw(encode(value))
setData('characterReferenceType')
}
function onexitautolinkprotocol(token) {
var uri = this.sliceSerialize(token)
tag('<a href="' + url(uri, protocolHref) + '">')
raw(encode(uri))
tag('</a>')
}
function onexitautolinkemail(token) {
var uri = this.sliceSerialize(token)
tag('<a href="' + url('mailto:' + uri, protocolHref) + '">')
raw(encode(uri))
tag('</a>')
}
}

5
node_modules/micromark/lib/constant/assign.js generated vendored Normal file
View File

@@ -0,0 +1,5 @@
'use strict'
var assign = Object.assign
module.exports = assign

1
node_modules/micromark/lib/constant/assign.mjs generated vendored Normal file
View File

@@ -0,0 +1 @@
export default Object.assign

65
node_modules/micromark/lib/constant/constants.d.ts generated vendored Normal file
View File

@@ -0,0 +1,65 @@
// This module is generated by `script/`.
export type Constant =
| 1
| 2
| 6
| 63
| 32
| 'CDATA['
| 7
| 31
| 3
| 'flow'
| 'content'
| 'string'
| 'text'
| 4
| 5
| 8
| 999
| 10
| 16
| 10000
// @for-script: REMOVE_ALL_THING_BELOW
export interface Constants {
attentionSideBefore: 1
attentionSideAfter: 2
atxHeadingOpeningFenceSizeMax: 6
autolinkDomainSizeMax: 63
autolinkSchemeSizeMax: 32
cdataOpeningString: 'CDATA['
characterGroupWhitespace: 1
characterGroupPunctuation: 2
characterReferenceDecimalSizeMax: 7
characterReferenceHexadecimalSizeMax: 6
characterReferenceNamedSizeMax: 31
codeFencedSequenceSizeMin: 3
contentTypeFlow: 'flow'
contentTypeContent: 'content'
contentTypeString: 'string'
contentTypeText: 'text'
hardBreakPrefixSizeMin: 2
htmlRaw: 1
htmlComment: 2
htmlInstruction: 3
htmlDeclaration: 4
htmlCdata: 5
htmlBasic: 6
htmlComplete: 7
htmlRawSizeMax: 8
linkResourceDestinationBalanceMax: 3
linkReferenceSizeMax: 999
listItemValueSizeMax: 10
numericBaseDecimal: 10
numericBaseHexadecimal: 16
tabSize: 4
thematicBreakMarkerCountMin: 3
v8MaxSafeChunkSize: 10000
}
declare const value: Constants
export default value

45
node_modules/micromark/lib/constant/constants.js generated vendored Normal file
View File

@@ -0,0 +1,45 @@
'use strict'
// This module is compiled away!
//
// Parsing markdown comes with a couple of constants, such as minimum or maximum
// sizes of certain sequences.
// Additionally, there are a couple symbols used inside micromark.
// These are all defined here, but compiled away by scripts.
var constants = {
attentionSideBefore: 1, // Symbol to mark an attention sequence as before content: `*a`
attentionSideAfter: 2, // Symbol to mark an attention sequence as after content: `a*`
atxHeadingOpeningFenceSizeMax: 6, // 6 number signs is fine, 7 isnt.
autolinkDomainSizeMax: 63, // 63 characters is fine, 64 is too many.
autolinkSchemeSizeMax: 32, // 32 characters is fine, 33 is too many.
cdataOpeningString: 'CDATA[', // And preceded by `<![`.
characterGroupWhitespace: 1, // Symbol used to indicate a character is whitespace
characterGroupPunctuation: 2, // Symbol used to indicate a character is whitespace
characterReferenceDecimalSizeMax: 7, // `&#9999999;`.
characterReferenceHexadecimalSizeMax: 6, // `&#xff9999;`.
characterReferenceNamedSizeMax: 31, // `&CounterClockwiseContourIntegral;`.
codeFencedSequenceSizeMin: 3, // At least 3 ticks or tildes are needed.
contentTypeFlow: 'flow',
contentTypeContent: 'content',
contentTypeString: 'string',
contentTypeText: 'text',
hardBreakPrefixSizeMin: 2, // At least 2 trailing spaces are needed.
htmlRaw: 1, // Symbol for `<script>`
htmlComment: 2, // Symbol for `<!---->`
htmlInstruction: 3, // Symbol for `<?php?>`
htmlDeclaration: 4, // Symbol for `<!doctype>`
htmlCdata: 5, // Symbol for `<![CDATA[]]>`
htmlBasic: 6, // Symbol for `<div`
htmlComplete: 7, // Symbol for `<x>`
htmlRawSizeMax: 8, // Length of `textarea`.
linkResourceDestinationBalanceMax: 3, // See: <https://spec.commonmark.org/0.29/#link-destination>
linkReferenceSizeMax: 999, // See: <https://spec.commonmark.org/0.29/#link-label>
listItemValueSizeMax: 10, // See: <https://spec.commonmark.org/0.29/#ordered-list-marker>
numericBaseDecimal: 10,
numericBaseHexadecimal: 0x10,
tabSize: 4, // Tabs have a hard-coded size of 4, per CommonMark.
thematicBreakMarkerCountMin: 3, // At least 3 asterisks, dashes, or underscores are needed.
v8MaxSafeChunkSize: 10000 // V8 (and potentially others) have problems injecting giant arrays into other arrays, hence we operate in chunks.
}
module.exports = constants

41
node_modules/micromark/lib/constant/constants.mjs generated vendored Normal file
View File

@@ -0,0 +1,41 @@
// This module is compiled away!
//
// Parsing markdown comes with a couple of constants, such as minimum or maximum
// sizes of certain sequences.
// Additionally, there are a couple symbols used inside micromark.
// These are all defined here, but compiled away by scripts.
export default {
attentionSideBefore: 1, // Symbol to mark an attention sequence as before content: `*a`
attentionSideAfter: 2, // Symbol to mark an attention sequence as after content: `a*`
atxHeadingOpeningFenceSizeMax: 6, // 6 number signs is fine, 7 isnt.
autolinkDomainSizeMax: 63, // 63 characters is fine, 64 is too many.
autolinkSchemeSizeMax: 32, // 32 characters is fine, 33 is too many.
cdataOpeningString: 'CDATA[', // And preceded by `<![`.
characterGroupWhitespace: 1, // Symbol used to indicate a character is whitespace
characterGroupPunctuation: 2, // Symbol used to indicate a character is whitespace
characterReferenceDecimalSizeMax: 7, // `&#9999999;`.
characterReferenceHexadecimalSizeMax: 6, // `&#xff9999;`.
characterReferenceNamedSizeMax: 31, // `&CounterClockwiseContourIntegral;`.
codeFencedSequenceSizeMin: 3, // At least 3 ticks or tildes are needed.
contentTypeFlow: 'flow',
contentTypeContent: 'content',
contentTypeString: 'string',
contentTypeText: 'text',
hardBreakPrefixSizeMin: 2, // At least 2 trailing spaces are needed.
htmlRaw: 1, // Symbol for `<script>`
htmlComment: 2, // Symbol for `<!---->`
htmlInstruction: 3, // Symbol for `<?php?>`
htmlDeclaration: 4, // Symbol for `<!doctype>`
htmlCdata: 5, // Symbol for `<![CDATA[]]>`
htmlBasic: 6, // Symbol for `<div`
htmlComplete: 7, // Symbol for `<x>`
htmlRawSizeMax: 8, // Length of `textarea`.
linkResourceDestinationBalanceMax: 3, // See: <https://spec.commonmark.org/0.29/#link-destination>
linkReferenceSizeMax: 999, // See: <https://spec.commonmark.org/0.29/#link-label>
listItemValueSizeMax: 10, // See: <https://spec.commonmark.org/0.29/#ordered-list-marker>
numericBaseDecimal: 10,
numericBaseHexadecimal: 0x10,
tabSize: 4, // Tabs have a hard-coded size of 4, per CommonMark.
thematicBreakMarkerCountMin: 3, // At least 3 asterisks, dashes, or underscores are needed.
v8MaxSafeChunkSize: 10000 // V8 (and potentially others) have problems injecting giant arrays into other arrays, hence we operate in chunks.
}

View File

@@ -0,0 +1,5 @@
'use strict'
var fromCharCode = String.fromCharCode
module.exports = fromCharCode

View File

@@ -0,0 +1 @@
export default String.fromCharCode

View File

@@ -0,0 +1,5 @@
'use strict'
var own = {}.hasOwnProperty
module.exports = own

View File

@@ -0,0 +1 @@
export default {}.hasOwnProperty

View File

@@ -0,0 +1,69 @@
'use strict'
// This module is copied from <https://spec.commonmark.org/0.29/#html-blocks>.
var basics = [
'address',
'article',
'aside',
'base',
'basefont',
'blockquote',
'body',
'caption',
'center',
'col',
'colgroup',
'dd',
'details',
'dialog',
'dir',
'div',
'dl',
'dt',
'fieldset',
'figcaption',
'figure',
'footer',
'form',
'frame',
'frameset',
'h1',
'h2',
'h3',
'h4',
'h5',
'h6',
'head',
'header',
'hr',
'html',
'iframe',
'legend',
'li',
'link',
'main',
'menu',
'menuitem',
'nav',
'noframes',
'ol',
'optgroup',
'option',
'p',
'param',
'section',
'source',
'summary',
'table',
'tbody',
'td',
'tfoot',
'th',
'thead',
'title',
'tr',
'track',
'ul'
]
module.exports = basics

View File

@@ -0,0 +1,65 @@
// This module is copied from <https://spec.commonmark.org/0.29/#html-blocks>.
export default [
'address',
'article',
'aside',
'base',
'basefont',
'blockquote',
'body',
'caption',
'center',
'col',
'colgroup',
'dd',
'details',
'dialog',
'dir',
'div',
'dl',
'dt',
'fieldset',
'figcaption',
'figure',
'footer',
'form',
'frame',
'frameset',
'h1',
'h2',
'h3',
'h4',
'h5',
'h6',
'head',
'header',
'hr',
'html',
'iframe',
'legend',
'li',
'link',
'main',
'menu',
'menuitem',
'nav',
'noframes',
'ol',
'optgroup',
'option',
'p',
'param',
'section',
'source',
'summary',
'table',
'tbody',
'td',
'tfoot',
'th',
'thead',
'title',
'tr',
'track',
'ul'
]

View File

@@ -0,0 +1,6 @@
'use strict'
// This module is copied from <https://spec.commonmark.org/0.29/#html-blocks>.
var raws = ['pre', 'script', 'style', 'textarea']
module.exports = raws

View File

@@ -0,0 +1,2 @@
// This module is copied from <https://spec.commonmark.org/0.29/#html-blocks>.
export default ['pre', 'script', 'style', 'textarea']

5
node_modules/micromark/lib/constant/splice.js generated vendored Normal file
View File

@@ -0,0 +1,5 @@
'use strict'
var splice = [].splice
module.exports = splice

1
node_modules/micromark/lib/constant/splice.mjs generated vendored Normal file
View File

@@ -0,0 +1 @@
export default [].splice

114
node_modules/micromark/lib/constant/types.d.ts generated vendored Normal file
View File

@@ -0,0 +1,114 @@
// This module is generated by `script/`.
export type Type = string
// @for-script: REMOVE_ALL_THING_BELOW
export interface Types {
data: 'data'
whitespace: 'whitespace'
lineEnding: 'lineEnding'
lineEndingBlank: 'lineEndingBlank'
linePrefix: 'linePrefix'
lineSuffix: 'lineSuffix'
atxHeading: 'atxHeading'
atxHeadingSequence: 'atxHeadingSequence'
atxHeadingText: 'atxHeadingText'
autolink: 'autolink'
autolinkEmail: 'autolinkEmail'
autolinkMarker: 'autolinkMarker'
autolinkProtocol: 'autolinkProtocol'
characterEscape: 'characterEscape'
characterEscapeValue: 'characterEscapeValue'
characterReference: 'characterReference'
characterReferenceMarker: 'characterReferenceMarker'
characterReferenceMarkerNumeric: 'characterReferenceMarkerNumeric'
characterReferenceMarkerHexadecimal: 'characterReferenceMarkerHexadecimal'
characterReferenceValue: 'characterReferenceValue'
codeFenced: 'codeFenced'
codeFencedFence: 'codeFencedFence'
codeFencedFenceSequence: 'codeFencedFenceSequence'
codeFencedFenceInfo: 'codeFencedFenceInfo'
codeFencedFenceMeta: 'codeFencedFenceMeta'
codeFlowValue: 'codeFlowValue'
codeIndented: 'codeIndented'
codeText: 'codeText'
codeTextData: 'codeTextData'
codeTextPadding: 'codeTextPadding'
codeTextSequence: 'codeTextSequence'
content: 'content'
definition: 'definition'
definitionDestination: 'definitionDestination'
definitionDestinationLiteral: 'definitionDestinationLiteral'
definitionDestinationLiteralMarker: 'definitionDestinationLiteralMarker'
definitionDestinationRaw: 'definitionDestinationRaw'
definitionDestinationString: 'definitionDestinationString'
definitionLabel: 'definitionLabel'
definitionLabelMarker: 'definitionLabelMarker'
definitionLabelString: 'definitionLabelString'
definitionMarker: 'definitionMarker'
definitionTitle: 'definitionTitle'
definitionTitleMarker: 'definitionTitleMarker'
definitionTitleString: 'definitionTitleString'
emphasis: 'emphasis'
emphasisSequence: 'emphasisSequence'
emphasisText: 'emphasisText'
escapeMarker: 'escapeMarker'
hardBreakEscape: 'hardBreakEscape'
hardBreakTrailing: 'hardBreakTrailing'
htmlFlow: 'htmlFlow'
htmlFlowData: 'htmlFlowData'
htmlText: 'htmlText'
htmlTextData: 'htmlTextData'
image: 'image'
label: 'label'
labelText: 'labelText'
labelLink: 'labelLink'
labelImage: 'labelImage'
labelMarker: 'labelMarker'
labelImageMarker: 'labelImageMarker'
labelEnd: 'labelEnd'
link: 'link'
paragraph: 'paragraph'
reference: 'reference'
referenceMarker: 'referenceMarker'
referenceString: 'referenceString'
resource: 'resource'
resourceDestination: 'resourceDestination'
resourceDestinationLiteral: 'resourceDestinationLiteral'
resourceDestinationLiteralMarker: 'resourceDestinationLiteralMarker'
resourceDestinationRaw: 'resourceDestinationRaw'
resourceDestinationString: 'resourceDestinationString'
resourceMarker: 'resourceMarker'
resourceTitle: 'resourceTitle'
resourceTitleMarker: 'resourceTitleMarker'
resourceTitleString: 'resourceTitleString'
setextHeading: 'setextHeading'
setextHeadingText: 'setextHeadingText'
setextHeadingLine: 'setextHeadingLine'
setextHeadingLineSequence: 'setextHeadingLineSequence'
strong: 'strong'
strongSequence: 'strongSequence'
strongText: 'strongText'
thematicBreak: 'thematicBreak'
thematicBreakSequence: 'thematicBreakSequence'
blockQuote: 'blockQuote'
blockQuotePrefix: 'blockQuotePrefix'
blockQuoteMarker: 'blockQuoteMarker'
blockQuotePrefixWhitespace: 'blockQuotePrefixWhitespace'
listOrdered: 'listOrdered'
listUnordered: 'listUnordered'
listItemIndent: 'listItemIndent'
listItemMarker: 'listItemMarker'
listItemPrefix: 'listItemPrefix'
listItemPrefixWhitespace: 'listItemPrefixWhitespace'
listItemValue: 'listItemValue'
chunkContent: 'chunkContent'
chunkFlow: 'chunkFlow'
chunkText: 'chunkText'
chunkString: 'chunkString'
}
declare const value: Types
export default value

452
node_modules/micromark/lib/constant/types.js generated vendored Normal file
View File

@@ -0,0 +1,452 @@
'use strict'
// This module is compiled away!
//
// Here is the list of all types of tokens exposed by micromark, with a short
// explanation of what they include and where they are found.
// In picking names, generally, the rule is to be as explicit as possible
// instead of reusing names.
// For example, there is a `definitionDestination` and a `resourceDestination`,
// instead of one shared name.
var types = {
// Generic type for data, such as in a title, a destination, etc.
data: 'data',
// Generic type for syntactic whitespace (tabs, virtual spaces, spaces).
// Such as, between a fenced code fence and an info string.
whitespace: 'whitespace',
// Generic type for line endings (line feed, carriage return, carriage return +
// line feed).
lineEnding: 'lineEnding',
// A line ending, but ending a blank line.
lineEndingBlank: 'lineEndingBlank',
// Generic type for whitespace (tabs, virtual spaces, spaces) at the start of a
// line.
linePrefix: 'linePrefix',
// Generic type for whitespace (tabs, virtual spaces, spaces) at the end of a
// line.
lineSuffix: 'lineSuffix',
// Whole ATX heading:
//
// ```markdown
// #
// ## Alpha
// ### Bravo ###
// ```
//
// Includes `atxHeadingSequence`, `whitespace`, `atxHeadingText`.
atxHeading: 'atxHeading',
// Sequence of number signs in an ATX heading (`###`).
atxHeadingSequence: 'atxHeadingSequence',
// Content in an ATX heading (`alpha`).
// Includes text.
atxHeadingText: 'atxHeadingText',
// Whole autolink (`<https://example.com>` or `<admin@example.com>`)
// Includes `autolinkMarker` and `autolinkProtocol` or `autolinkEmail`.
autolink: 'autolink',
// Email autolink w/o markers (`admin@example.com`)
autolinkEmail: 'autolinkEmail',
// Marker around an `autolinkProtocol` or `autolinkEmail` (`<` or `>`).
autolinkMarker: 'autolinkMarker',
// Protocol autolink w/o markers (`https://example.com`)
autolinkProtocol: 'autolinkProtocol',
// A whole character escape (`\-`).
// Includes `escapeMarker` and `characterEscapeValue`.
characterEscape: 'characterEscape',
// The escaped character (`-`).
characterEscapeValue: 'characterEscapeValue',
// A whole character reference (`&amp;`, `&#8800;`, or `&#x1D306;`).
// Includes `characterReferenceMarker`, an optional
// `characterReferenceMarkerNumeric`, in which case an optional
// `characterReferenceMarkerHexadecimal`, and a `characterReferenceValue`.
characterReference: 'characterReference',
// The start or end marker (`&` or `;`).
characterReferenceMarker: 'characterReferenceMarker',
// Mark reference as numeric (`#`).
characterReferenceMarkerNumeric: 'characterReferenceMarkerNumeric',
// Mark reference as numeric (`x` or `X`).
characterReferenceMarkerHexadecimal: 'characterReferenceMarkerHexadecimal',
// Value of character reference w/o markers (`amp`, `8800`, or `1D306`).
characterReferenceValue: 'characterReferenceValue',
// Whole fenced code:
//
// ````markdown
// ```js
// alert(1)
// ```
// ````
codeFenced: 'codeFenced',
// A fenced code fence, including whitespace, sequence, info, and meta
// (` ```js `).
codeFencedFence: 'codeFencedFence',
// Sequence of grave accent or tilde characters (` ``` `) in a fence.
codeFencedFenceSequence: 'codeFencedFenceSequence',
// Info word (`js`) in a fence.
// Includes string.
codeFencedFenceInfo: 'codeFencedFenceInfo',
// Meta words (`highlight="1"`) in a fence.
// Includes string.
codeFencedFenceMeta: 'codeFencedFenceMeta',
// A line of code.
codeFlowValue: 'codeFlowValue',
// Whole indented code:
//
// ```markdown
// alert(1)
// ```
//
// Includes `lineEnding`, `linePrefix`, and `codeFlowValue`.
codeIndented: 'codeIndented',
// A text code (``` `alpha` ```).
// Includes `codeTextSequence`, `codeTextData`, `lineEnding`, and can include
// `codeTextPadding`.
codeText: 'codeText',
codeTextData: 'codeTextData',
// A space or line ending right after or before a tick.
codeTextPadding: 'codeTextPadding',
// A text code fence (` `` `).
codeTextSequence: 'codeTextSequence',
// Whole content:
//
// ```markdown
// [a]: b
// c
// =
// d
// ```
//
// Includes `paragraph` and `definition`.
content: 'content',
// Whole definition:
//
// ```markdown
// [micromark]: https://github.com/micromark/micromark
// ```
//
// Includes `definitionLabel`, `definitionMarker`, `whitespace`,
// `definitionDestination`, and optionally `lineEnding` and `definitionTitle`.
definition: 'definition',
// Destination of a definition (`https://github.com/micromark/micromark` or
// `<https://github.com/micromark/micromark>`).
// Includes `definitionDestinationLiteral` or `definitionDestinationRaw`.
definitionDestination: 'definitionDestination',
// Enclosed destination of a definition
// (`<https://github.com/micromark/micromark>`).
// Includes `definitionDestinationLiteralMarker` and optionally
// `definitionDestinationString`.
definitionDestinationLiteral: 'definitionDestinationLiteral',
// Markers of an enclosed definition destination (`<` or `>`).
definitionDestinationLiteralMarker: 'definitionDestinationLiteralMarker',
// Unenclosed destination of a definition
// (`https://github.com/micromark/micromark`).
// Includes `definitionDestinationString`.
definitionDestinationRaw: 'definitionDestinationRaw',
// Text in an destination (`https://github.com/micromark/micromark`).
// Includes string.
definitionDestinationString: 'definitionDestinationString',
// Label of a definition (`[micromark]`).
// Includes `definitionLabelMarker` and `definitionLabelString`.
definitionLabel: 'definitionLabel',
// Markers of a definition label (`[` or `]`).
definitionLabelMarker: 'definitionLabelMarker',
// Value of a definition label (`micromark`).
// Includes string.
definitionLabelString: 'definitionLabelString',
// Marker between a label and a destination (`:`).
definitionMarker: 'definitionMarker',
// Title of a definition (`"x"`, `'y'`, or `(z)`).
// Includes `definitionTitleMarker` and optionally `definitionTitleString`.
definitionTitle: 'definitionTitle',
// Marker around a title of a definition (`"`, `'`, `(`, or `)`).
definitionTitleMarker: 'definitionTitleMarker',
// Data without markers in a title (`z`).
// Includes string.
definitionTitleString: 'definitionTitleString',
// Emphasis (`*alpha*`).
// Includes `emphasisSequence` and `emphasisText`.
emphasis: 'emphasis',
// Sequence of emphasis markers (`*` or `_`).
emphasisSequence: 'emphasisSequence',
// Emphasis text (`alpha`).
// Includes text.
emphasisText: 'emphasisText',
// The character escape marker (`\`).
escapeMarker: 'escapeMarker',
// A hard break created with a backslash (`\\n`).
// Includes `escapeMarker` (does not include the line ending)
hardBreakEscape: 'hardBreakEscape',
// A hard break created with trailing spaces (` \n`).
// Does not include the line ending.
hardBreakTrailing: 'hardBreakTrailing',
// Flow HTML:
//
// ```markdown
// <div
// ```
//
// Inlcudes `lineEnding`, `htmlFlowData`.
htmlFlow: 'htmlFlow',
htmlFlowData: 'htmlFlowData',
// HTML in text (the tag in `a <i> b`).
// Includes `lineEnding`, `htmlTextData`.
htmlText: 'htmlText',
htmlTextData: 'htmlTextData',
// Whole image (`![alpha](bravo)`, `![alpha][bravo]`, `![alpha][]`, or
// `![alpha]`).
// Includes `label` and an optional `resource` or `reference`.
image: 'image',
// Whole link label (`[*alpha*]`).
// Includes `labelLink` or `labelImage`, `labelText`, and `labelEnd`.
label: 'label',
// Text in an label (`*alpha*`).
// Includes text.
labelText: 'labelText',
// Start a link label (`[`).
// Includes a `labelMarker`.
labelLink: 'labelLink',
// Start an image label (`![`).
// Includes `labelImageMarker` and `labelMarker`.
labelImage: 'labelImage',
// Marker of a label (`[` or `]`).
labelMarker: 'labelMarker',
// Marker to start an image (`!`).
labelImageMarker: 'labelImageMarker',
// End a label (`]`).
// Includes `labelMarker`.
labelEnd: 'labelEnd',
// Whole link (`[alpha](bravo)`, `[alpha][bravo]`, `[alpha][]`, or `[alpha]`).
// Includes `label` and an optional `resource` or `reference`.
link: 'link',
// Whole paragraph:
//
// ```markdown
// alpha
// bravo.
// ```
//
// Includes text.
paragraph: 'paragraph',
// A reference (`[alpha]` or `[]`).
// Includes `referenceMarker` and an optional `referenceString`.
reference: 'reference',
// A reference marker (`[` or `]`).
referenceMarker: 'referenceMarker',
// Reference text (`alpha`).
// Includes string.
referenceString: 'referenceString',
// A resource (`(https://example.com "alpha")`).
// Includes `resourceMarker`, an optional `resourceDestination` with an optional
// `whitespace` and `resourceTitle`.
resource: 'resource',
// A resource destination (`https://example.com`).
// Includes `resourceDestinationLiteral` or `resourceDestinationRaw`.
resourceDestination: 'resourceDestination',
// A literal resource destination (`<https://example.com>`).
// Includes `resourceDestinationLiteralMarker` and optionally
// `resourceDestinationString`.
resourceDestinationLiteral: 'resourceDestinationLiteral',
// A resource destination marker (`<` or `>`).
resourceDestinationLiteralMarker: 'resourceDestinationLiteralMarker',
// A raw resource destination (`https://example.com`).
// Includes `resourceDestinationString`.
resourceDestinationRaw: 'resourceDestinationRaw',
// Resource destination text (`https://example.com`).
// Includes string.
resourceDestinationString: 'resourceDestinationString',
// A resource marker (`(` or `)`).
resourceMarker: 'resourceMarker',
// A resource title (`"alpha"`, `'alpha'`, or `(alpha)`).
// Includes `resourceTitleMarker` and optionally `resourceTitleString`.
resourceTitle: 'resourceTitle',
// A resource title marker (`"`, `'`, `(`, or `)`).
resourceTitleMarker: 'resourceTitleMarker',
// Resource destination title (`alpha`).
// Includes string.
resourceTitleString: 'resourceTitleString',
// Whole setext heading:
//
// ```markdown
// alpha
// bravo
// =====
// ```
//
// Includes `setextHeadingText`, `lineEnding`, `linePrefix`, and
// `setextHeadingLine`.
setextHeading: 'setextHeading',
// Content in a setext heading (`alpha\nbravo`).
// Includes text.
setextHeadingText: 'setextHeadingText',
// Underline in a setext heading, including whitespace suffix (`==`).
// Includes `setextHeadingLineSequence`.
setextHeadingLine: 'setextHeadingLine',
// Sequence of equals or dash characters in underline in a setext heading (`-`).
setextHeadingLineSequence: 'setextHeadingLineSequence',
// Strong (`**alpha**`).
// Includes `strongSequence` and `strongText`.
strong: 'strong',
// Sequence of strong markers (`**` or `__`).
strongSequence: 'strongSequence',
// Strong text (`alpha`).
// Includes text.
strongText: 'strongText',
// Whole thematic break:
//
// ```markdown
// * * *
// ```
//
// Includes `thematicBreakSequence` and `whitespace`.
thematicBreak: 'thematicBreak',
// A sequence of one or more thematic break markers (`***`).
thematicBreakSequence: 'thematicBreakSequence',
// Whole block quote:
//
// ```markdown
// > a
// >
// > b
// ```
//
// Includes `blockQuotePrefix` and flow.
blockQuote: 'blockQuote',
// The `>` or `> ` of a block quote.
blockQuotePrefix: 'blockQuotePrefix',
// The `>` of a block quote prefix.
blockQuoteMarker: 'blockQuoteMarker',
// The optional ` ` of a block quote prefix.
blockQuotePrefixWhitespace: 'blockQuotePrefixWhitespace',
// Whole unordered list:
//
// ```markdown
// - a
// b
// ```
//
// Includes `listItemPrefix`, flow, and optionally `listItemIndent` on further
// lines.
listOrdered: 'listOrdered',
// Whole ordered list:
//
// ```markdown
// 1. a
// b
// ```
//
// Includes `listItemPrefix`, flow, and optionally `listItemIndent` on further
// lines.
listUnordered: 'listUnordered',
// The indent of further list item lines.
listItemIndent: 'listItemIndent',
// A marker, as in, `*`, `+`, `-`, `.`, or `)`.
listItemMarker: 'listItemMarker',
// The thing that starts a list item, such as `1. `.
// Includes `listItemValue` if ordered, `listItemMarker`, and
// `listItemPrefixWhitespace` (unless followed by a line ending).
listItemPrefix: 'listItemPrefix',
// The whitespace after a marker.
listItemPrefixWhitespace: 'listItemPrefixWhitespace',
// The numerical value of an ordered item.
listItemValue: 'listItemValue',
// Internal types used for subtokenizers, compiled away
chunkContent: 'chunkContent',
chunkFlow: 'chunkFlow',
chunkText: 'chunkText',
chunkString: 'chunkString'
}
module.exports = types

448
node_modules/micromark/lib/constant/types.mjs generated vendored Normal file
View File

@@ -0,0 +1,448 @@
// This module is compiled away!
//
// Here is the list of all types of tokens exposed by micromark, with a short
// explanation of what they include and where they are found.
// In picking names, generally, the rule is to be as explicit as possible
// instead of reusing names.
// For example, there is a `definitionDestination` and a `resourceDestination`,
// instead of one shared name.
export default {
// Generic type for data, such as in a title, a destination, etc.
data: 'data',
// Generic type for syntactic whitespace (tabs, virtual spaces, spaces).
// Such as, between a fenced code fence and an info string.
whitespace: 'whitespace',
// Generic type for line endings (line feed, carriage return, carriage return +
// line feed).
lineEnding: 'lineEnding',
// A line ending, but ending a blank line.
lineEndingBlank: 'lineEndingBlank',
// Generic type for whitespace (tabs, virtual spaces, spaces) at the start of a
// line.
linePrefix: 'linePrefix',
// Generic type for whitespace (tabs, virtual spaces, spaces) at the end of a
// line.
lineSuffix: 'lineSuffix',
// Whole ATX heading:
//
// ```markdown
// #
// ## Alpha
// ### Bravo ###
// ```
//
// Includes `atxHeadingSequence`, `whitespace`, `atxHeadingText`.
atxHeading: 'atxHeading',
// Sequence of number signs in an ATX heading (`###`).
atxHeadingSequence: 'atxHeadingSequence',
// Content in an ATX heading (`alpha`).
// Includes text.
atxHeadingText: 'atxHeadingText',
// Whole autolink (`<https://example.com>` or `<admin@example.com>`)
// Includes `autolinkMarker` and `autolinkProtocol` or `autolinkEmail`.
autolink: 'autolink',
// Email autolink w/o markers (`admin@example.com`)
autolinkEmail: 'autolinkEmail',
// Marker around an `autolinkProtocol` or `autolinkEmail` (`<` or `>`).
autolinkMarker: 'autolinkMarker',
// Protocol autolink w/o markers (`https://example.com`)
autolinkProtocol: 'autolinkProtocol',
// A whole character escape (`\-`).
// Includes `escapeMarker` and `characterEscapeValue`.
characterEscape: 'characterEscape',
// The escaped character (`-`).
characterEscapeValue: 'characterEscapeValue',
// A whole character reference (`&amp;`, `&#8800;`, or `&#x1D306;`).
// Includes `characterReferenceMarker`, an optional
// `characterReferenceMarkerNumeric`, in which case an optional
// `characterReferenceMarkerHexadecimal`, and a `characterReferenceValue`.
characterReference: 'characterReference',
// The start or end marker (`&` or `;`).
characterReferenceMarker: 'characterReferenceMarker',
// Mark reference as numeric (`#`).
characterReferenceMarkerNumeric: 'characterReferenceMarkerNumeric',
// Mark reference as numeric (`x` or `X`).
characterReferenceMarkerHexadecimal: 'characterReferenceMarkerHexadecimal',
// Value of character reference w/o markers (`amp`, `8800`, or `1D306`).
characterReferenceValue: 'characterReferenceValue',
// Whole fenced code:
//
// ````markdown
// ```js
// alert(1)
// ```
// ````
codeFenced: 'codeFenced',
// A fenced code fence, including whitespace, sequence, info, and meta
// (` ```js `).
codeFencedFence: 'codeFencedFence',
// Sequence of grave accent or tilde characters (` ``` `) in a fence.
codeFencedFenceSequence: 'codeFencedFenceSequence',
// Info word (`js`) in a fence.
// Includes string.
codeFencedFenceInfo: 'codeFencedFenceInfo',
// Meta words (`highlight="1"`) in a fence.
// Includes string.
codeFencedFenceMeta: 'codeFencedFenceMeta',
// A line of code.
codeFlowValue: 'codeFlowValue',
// Whole indented code:
//
// ```markdown
// alert(1)
// ```
//
// Includes `lineEnding`, `linePrefix`, and `codeFlowValue`.
codeIndented: 'codeIndented',
// A text code (``` `alpha` ```).
// Includes `codeTextSequence`, `codeTextData`, `lineEnding`, and can include
// `codeTextPadding`.
codeText: 'codeText',
codeTextData: 'codeTextData',
// A space or line ending right after or before a tick.
codeTextPadding: 'codeTextPadding',
// A text code fence (` `` `).
codeTextSequence: 'codeTextSequence',
// Whole content:
//
// ```markdown
// [a]: b
// c
// =
// d
// ```
//
// Includes `paragraph` and `definition`.
content: 'content',
// Whole definition:
//
// ```markdown
// [micromark]: https://github.com/micromark/micromark
// ```
//
// Includes `definitionLabel`, `definitionMarker`, `whitespace`,
// `definitionDestination`, and optionally `lineEnding` and `definitionTitle`.
definition: 'definition',
// Destination of a definition (`https://github.com/micromark/micromark` or
// `<https://github.com/micromark/micromark>`).
// Includes `definitionDestinationLiteral` or `definitionDestinationRaw`.
definitionDestination: 'definitionDestination',
// Enclosed destination of a definition
// (`<https://github.com/micromark/micromark>`).
// Includes `definitionDestinationLiteralMarker` and optionally
// `definitionDestinationString`.
definitionDestinationLiteral: 'definitionDestinationLiteral',
// Markers of an enclosed definition destination (`<` or `>`).
definitionDestinationLiteralMarker: 'definitionDestinationLiteralMarker',
// Unenclosed destination of a definition
// (`https://github.com/micromark/micromark`).
// Includes `definitionDestinationString`.
definitionDestinationRaw: 'definitionDestinationRaw',
// Text in an destination (`https://github.com/micromark/micromark`).
// Includes string.
definitionDestinationString: 'definitionDestinationString',
// Label of a definition (`[micromark]`).
// Includes `definitionLabelMarker` and `definitionLabelString`.
definitionLabel: 'definitionLabel',
// Markers of a definition label (`[` or `]`).
definitionLabelMarker: 'definitionLabelMarker',
// Value of a definition label (`micromark`).
// Includes string.
definitionLabelString: 'definitionLabelString',
// Marker between a label and a destination (`:`).
definitionMarker: 'definitionMarker',
// Title of a definition (`"x"`, `'y'`, or `(z)`).
// Includes `definitionTitleMarker` and optionally `definitionTitleString`.
definitionTitle: 'definitionTitle',
// Marker around a title of a definition (`"`, `'`, `(`, or `)`).
definitionTitleMarker: 'definitionTitleMarker',
// Data without markers in a title (`z`).
// Includes string.
definitionTitleString: 'definitionTitleString',
// Emphasis (`*alpha*`).
// Includes `emphasisSequence` and `emphasisText`.
emphasis: 'emphasis',
// Sequence of emphasis markers (`*` or `_`).
emphasisSequence: 'emphasisSequence',
// Emphasis text (`alpha`).
// Includes text.
emphasisText: 'emphasisText',
// The character escape marker (`\`).
escapeMarker: 'escapeMarker',
// A hard break created with a backslash (`\\n`).
// Includes `escapeMarker` (does not include the line ending)
hardBreakEscape: 'hardBreakEscape',
// A hard break created with trailing spaces (` \n`).
// Does not include the line ending.
hardBreakTrailing: 'hardBreakTrailing',
// Flow HTML:
//
// ```markdown
// <div
// ```
//
// Inlcudes `lineEnding`, `htmlFlowData`.
htmlFlow: 'htmlFlow',
htmlFlowData: 'htmlFlowData',
// HTML in text (the tag in `a <i> b`).
// Includes `lineEnding`, `htmlTextData`.
htmlText: 'htmlText',
htmlTextData: 'htmlTextData',
// Whole image (`![alpha](bravo)`, `![alpha][bravo]`, `![alpha][]`, or
// `![alpha]`).
// Includes `label` and an optional `resource` or `reference`.
image: 'image',
// Whole link label (`[*alpha*]`).
// Includes `labelLink` or `labelImage`, `labelText`, and `labelEnd`.
label: 'label',
// Text in an label (`*alpha*`).
// Includes text.
labelText: 'labelText',
// Start a link label (`[`).
// Includes a `labelMarker`.
labelLink: 'labelLink',
// Start an image label (`![`).
// Includes `labelImageMarker` and `labelMarker`.
labelImage: 'labelImage',
// Marker of a label (`[` or `]`).
labelMarker: 'labelMarker',
// Marker to start an image (`!`).
labelImageMarker: 'labelImageMarker',
// End a label (`]`).
// Includes `labelMarker`.
labelEnd: 'labelEnd',
// Whole link (`[alpha](bravo)`, `[alpha][bravo]`, `[alpha][]`, or `[alpha]`).
// Includes `label` and an optional `resource` or `reference`.
link: 'link',
// Whole paragraph:
//
// ```markdown
// alpha
// bravo.
// ```
//
// Includes text.
paragraph: 'paragraph',
// A reference (`[alpha]` or `[]`).
// Includes `referenceMarker` and an optional `referenceString`.
reference: 'reference',
// A reference marker (`[` or `]`).
referenceMarker: 'referenceMarker',
// Reference text (`alpha`).
// Includes string.
referenceString: 'referenceString',
// A resource (`(https://example.com "alpha")`).
// Includes `resourceMarker`, an optional `resourceDestination` with an optional
// `whitespace` and `resourceTitle`.
resource: 'resource',
// A resource destination (`https://example.com`).
// Includes `resourceDestinationLiteral` or `resourceDestinationRaw`.
resourceDestination: 'resourceDestination',
// A literal resource destination (`<https://example.com>`).
// Includes `resourceDestinationLiteralMarker` and optionally
// `resourceDestinationString`.
resourceDestinationLiteral: 'resourceDestinationLiteral',
// A resource destination marker (`<` or `>`).
resourceDestinationLiteralMarker: 'resourceDestinationLiteralMarker',
// A raw resource destination (`https://example.com`).
// Includes `resourceDestinationString`.
resourceDestinationRaw: 'resourceDestinationRaw',
// Resource destination text (`https://example.com`).
// Includes string.
resourceDestinationString: 'resourceDestinationString',
// A resource marker (`(` or `)`).
resourceMarker: 'resourceMarker',
// A resource title (`"alpha"`, `'alpha'`, or `(alpha)`).
// Includes `resourceTitleMarker` and optionally `resourceTitleString`.
resourceTitle: 'resourceTitle',
// A resource title marker (`"`, `'`, `(`, or `)`).
resourceTitleMarker: 'resourceTitleMarker',
// Resource destination title (`alpha`).
// Includes string.
resourceTitleString: 'resourceTitleString',
// Whole setext heading:
//
// ```markdown
// alpha
// bravo
// =====
// ```
//
// Includes `setextHeadingText`, `lineEnding`, `linePrefix`, and
// `setextHeadingLine`.
setextHeading: 'setextHeading',
// Content in a setext heading (`alpha\nbravo`).
// Includes text.
setextHeadingText: 'setextHeadingText',
// Underline in a setext heading, including whitespace suffix (`==`).
// Includes `setextHeadingLineSequence`.
setextHeadingLine: 'setextHeadingLine',
// Sequence of equals or dash characters in underline in a setext heading (`-`).
setextHeadingLineSequence: 'setextHeadingLineSequence',
// Strong (`**alpha**`).
// Includes `strongSequence` and `strongText`.
strong: 'strong',
// Sequence of strong markers (`**` or `__`).
strongSequence: 'strongSequence',
// Strong text (`alpha`).
// Includes text.
strongText: 'strongText',
// Whole thematic break:
//
// ```markdown
// * * *
// ```
//
// Includes `thematicBreakSequence` and `whitespace`.
thematicBreak: 'thematicBreak',
// A sequence of one or more thematic break markers (`***`).
thematicBreakSequence: 'thematicBreakSequence',
// Whole block quote:
//
// ```markdown
// > a
// >
// > b
// ```
//
// Includes `blockQuotePrefix` and flow.
blockQuote: 'blockQuote',
// The `>` or `> ` of a block quote.
blockQuotePrefix: 'blockQuotePrefix',
// The `>` of a block quote prefix.
blockQuoteMarker: 'blockQuoteMarker',
// The optional ` ` of a block quote prefix.
blockQuotePrefixWhitespace: 'blockQuotePrefixWhitespace',
// Whole unordered list:
//
// ```markdown
// - a
// b
// ```
//
// Includes `listItemPrefix`, flow, and optionally `listItemIndent` on further
// lines.
listOrdered: 'listOrdered',
// Whole ordered list:
//
// ```markdown
// 1. a
// b
// ```
//
// Includes `listItemPrefix`, flow, and optionally `listItemIndent` on further
// lines.
listUnordered: 'listUnordered',
// The indent of further list item lines.
listItemIndent: 'listItemIndent',
// A marker, as in, `*`, `+`, `-`, `.`, or `)`.
listItemMarker: 'listItemMarker',
// The thing that starts a list item, such as `1. `.
// Includes `listItemValue` if ordered, `listItemMarker`, and
// `listItemPrefixWhitespace` (unless followed by a line ending).
listItemPrefix: 'listItemPrefix',
// The whitespace after a marker.
listItemPrefixWhitespace: 'listItemPrefixWhitespace',
// The numerical value of an ordered item.
listItemValue: 'listItemValue',
// Internal types used for subtokenizers, compiled away
chunkContent: 'chunkContent',
chunkFlow: 'chunkFlow',
chunkText: 'chunkText',
chunkString: 'chunkString'
}

View File

@@ -0,0 +1,11 @@
'use strict'
// This module is generated by `script/`.
//
// CommonMark handles attention (emphasis, strong) markers based on what comes
// before or after them.
// One such difference is if those characters are Unicode punctuation.
// This script is generated from the Unicode data.
var unicodePunctuation = /[!-\/:-@\[-`\{-~\xA1\xA7\xAB\xB6\xB7\xBB\xBF\u037E\u0387\u055A-\u055F\u0589\u058A\u05BE\u05C0\u05C3\u05C6\u05F3\u05F4\u0609\u060A\u060C\u060D\u061B\u061E\u061F\u066A-\u066D\u06D4\u0700-\u070D\u07F7-\u07F9\u0830-\u083E\u085E\u0964\u0965\u0970\u09FD\u0A76\u0AF0\u0C77\u0C84\u0DF4\u0E4F\u0E5A\u0E5B\u0F04-\u0F12\u0F14\u0F3A-\u0F3D\u0F85\u0FD0-\u0FD4\u0FD9\u0FDA\u104A-\u104F\u10FB\u1360-\u1368\u1400\u166E\u169B\u169C\u16EB-\u16ED\u1735\u1736\u17D4-\u17D6\u17D8-\u17DA\u1800-\u180A\u1944\u1945\u1A1E\u1A1F\u1AA0-\u1AA6\u1AA8-\u1AAD\u1B5A-\u1B60\u1BFC-\u1BFF\u1C3B-\u1C3F\u1C7E\u1C7F\u1CC0-\u1CC7\u1CD3\u2010-\u2027\u2030-\u2043\u2045-\u2051\u2053-\u205E\u207D\u207E\u208D\u208E\u2308-\u230B\u2329\u232A\u2768-\u2775\u27C5\u27C6\u27E6-\u27EF\u2983-\u2998\u29D8-\u29DB\u29FC\u29FD\u2CF9-\u2CFC\u2CFE\u2CFF\u2D70\u2E00-\u2E2E\u2E30-\u2E4F\u2E52\u3001-\u3003\u3008-\u3011\u3014-\u301F\u3030\u303D\u30A0\u30FB\uA4FE\uA4FF\uA60D-\uA60F\uA673\uA67E\uA6F2-\uA6F7\uA874-\uA877\uA8CE\uA8CF\uA8F8-\uA8FA\uA8FC\uA92E\uA92F\uA95F\uA9C1-\uA9CD\uA9DE\uA9DF\uAA5C-\uAA5F\uAADE\uAADF\uAAF0\uAAF1\uABEB\uFD3E\uFD3F\uFE10-\uFE19\uFE30-\uFE52\uFE54-\uFE61\uFE63\uFE68\uFE6A\uFE6B\uFF01-\uFF03\uFF05-\uFF0A\uFF0C-\uFF0F\uFF1A\uFF1B\uFF1F\uFF20\uFF3B-\uFF3D\uFF3F\uFF5B\uFF5D\uFF5F-\uFF65]/
module.exports = unicodePunctuation

View File

@@ -0,0 +1,7 @@
// This module is generated by `script/`.
//
// CommonMark handles attention (emphasis, strong) markers based on what comes
// before or after them.
// One such difference is if those characters are Unicode punctuation.
// This script is generated from the Unicode data.
export default /[!-/:-@[-`{-~\u00A1\u00A7\u00AB\u00B6\u00B7\u00BB\u00BF\u037E\u0387\u055A-\u055F\u0589\u058A\u05BE\u05C0\u05C3\u05C6\u05F3\u05F4\u0609\u060A\u060C\u060D\u061B\u061E\u061F\u066A-\u066D\u06D4\u0700-\u070D\u07F7-\u07F9\u0830-\u083E\u085E\u0964\u0965\u0970\u09FD\u0A76\u0AF0\u0C77\u0C84\u0DF4\u0E4F\u0E5A\u0E5B\u0F04-\u0F12\u0F14\u0F3A-\u0F3D\u0F85\u0FD0-\u0FD4\u0FD9\u0FDA\u104A-\u104F\u10FB\u1360-\u1368\u1400\u166E\u169B\u169C\u16EB-\u16ED\u1735\u1736\u17D4-\u17D6\u17D8-\u17DA\u1800-\u180A\u1944\u1945\u1A1E\u1A1F\u1AA0-\u1AA6\u1AA8-\u1AAD\u1B5A-\u1B60\u1BFC-\u1BFF\u1C3B-\u1C3F\u1C7E\u1C7F\u1CC0-\u1CC7\u1CD3\u2010-\u2027\u2030-\u2043\u2045-\u2051\u2053-\u205E\u207D\u207E\u208D\u208E\u2308-\u230B\u2329\u232A\u2768-\u2775\u27C5\u27C6\u27E6-\u27EF\u2983-\u2998\u29D8-\u29DB\u29FC\u29FD\u2CF9-\u2CFC\u2CFE\u2CFF\u2D70\u2E00-\u2E2E\u2E30-\u2E4F\u2E52\u3001-\u3003\u3008-\u3011\u3014-\u301F\u3030\u303D\u30A0\u30FB\uA4FE\uA4FF\uA60D-\uA60F\uA673\uA67E\uA6F2-\uA6F7\uA874-\uA877\uA8CE\uA8CF\uA8F8-\uA8FA\uA8FC\uA92E\uA92F\uA95F\uA9C1-\uA9CD\uA9DE\uA9DF\uAA5C-\uAA5F\uAADE\uAADF\uAAF0\uAAF1\uABEB\uFD3E\uFD3F\uFE10-\uFE19\uFE30-\uFE52\uFE54-\uFE61\uFE63\uFE68\uFE6A\uFE6B\uFF01-\uFF03\uFF05-\uFF0A\uFF0C-\uFF0F\uFF1A\uFF1B\uFF1F\uFF20\uFF3B-\uFF3D\uFF3F\uFF5B\uFF5D\uFF5F-\uFF65]/

98
node_modules/micromark/lib/constructs.js generated vendored Normal file
View File

@@ -0,0 +1,98 @@
'use strict'
Object.defineProperty(exports, '__esModule', {value: true})
var text$1 = require('./initialize/text.js')
var attention = require('./tokenize/attention.js')
var autolink = require('./tokenize/autolink.js')
var blockQuote = require('./tokenize/block-quote.js')
var characterEscape = require('./tokenize/character-escape.js')
var characterReference = require('./tokenize/character-reference.js')
var codeFenced = require('./tokenize/code-fenced.js')
var codeIndented = require('./tokenize/code-indented.js')
var codeText = require('./tokenize/code-text.js')
var definition = require('./tokenize/definition.js')
var hardBreakEscape = require('./tokenize/hard-break-escape.js')
var headingAtx = require('./tokenize/heading-atx.js')
var htmlFlow = require('./tokenize/html-flow.js')
var htmlText = require('./tokenize/html-text.js')
var labelEnd = require('./tokenize/label-end.js')
var labelStartImage = require('./tokenize/label-start-image.js')
var labelStartLink = require('./tokenize/label-start-link.js')
var lineEnding = require('./tokenize/line-ending.js')
var list = require('./tokenize/list.js')
var setextUnderline = require('./tokenize/setext-underline.js')
var thematicBreak = require('./tokenize/thematic-break.js')
var document = {
42: list, // Asterisk
43: list, // Plus sign
45: list, // Dash
48: list, // 0
49: list, // 1
50: list, // 2
51: list, // 3
52: list, // 4
53: list, // 5
54: list, // 6
55: list, // 7
56: list, // 8
57: list, // 9
62: blockQuote // Greater than
}
var contentInitial = {
91: definition // Left square bracket
}
var flowInitial = {
'-2': codeIndented, // Horizontal tab
'-1': codeIndented, // Virtual space
32: codeIndented // Space
}
var flow = {
35: headingAtx, // Number sign
42: thematicBreak, // Asterisk
45: [setextUnderline, thematicBreak], // Dash
60: htmlFlow, // Less than
61: setextUnderline, // Equals to
95: thematicBreak, // Underscore
96: codeFenced, // Grave accent
126: codeFenced // Tilde
}
var string = {
38: characterReference, // Ampersand
92: characterEscape // Backslash
}
var text = {
'-5': lineEnding, // Carriage return
'-4': lineEnding, // Line feed
'-3': lineEnding, // Carriage return + line feed
33: labelStartImage, // Exclamation mark
38: characterReference, // Ampersand
42: attention, // Asterisk
60: [autolink, htmlText], // Less than
91: labelStartLink, // Left square bracket
92: [hardBreakEscape, characterEscape], // Backslash
93: labelEnd, // Right square bracket
95: attention, // Underscore
96: codeText // Grave accent
}
var insideSpan = {
null: [attention, text$1.resolver]
}
var disable = {null: []}
exports.contentInitial = contentInitial
exports.disable = disable
exports.document = document
exports.flow = flow
exports.flowInitial = flowInitial
exports.insideSpan = insideSpan
exports.string = string
exports.text = text

85
node_modules/micromark/lib/constructs.mjs generated vendored Normal file
View File

@@ -0,0 +1,85 @@
import {resolver as resolveText} from './initialize/text.mjs'
import attention from './tokenize/attention.mjs'
import autolink from './tokenize/autolink.mjs'
import blockQuote from './tokenize/block-quote.mjs'
import characterEscape from './tokenize/character-escape.mjs'
import characterReference from './tokenize/character-reference.mjs'
import codeFenced from './tokenize/code-fenced.mjs'
import codeIndented from './tokenize/code-indented.mjs'
import codeText from './tokenize/code-text.mjs'
import definition from './tokenize/definition.mjs'
import hardBreakEscape from './tokenize/hard-break-escape.mjs'
import headingAtx from './tokenize/heading-atx.mjs'
import htmlFlow from './tokenize/html-flow.mjs'
import htmlText from './tokenize/html-text.mjs'
import labelEnd from './tokenize/label-end.mjs'
import labelImage from './tokenize/label-start-image.mjs'
import labelLink from './tokenize/label-start-link.mjs'
import lineEnding from './tokenize/line-ending.mjs'
import list from './tokenize/list.mjs'
import setextUnderline from './tokenize/setext-underline.mjs'
import thematicBreak from './tokenize/thematic-break.mjs'
export var document = {
42: list, // Asterisk
43: list, // Plus sign
45: list, // Dash
48: list, // 0
49: list, // 1
50: list, // 2
51: list, // 3
52: list, // 4
53: list, // 5
54: list, // 6
55: list, // 7
56: list, // 8
57: list, // 9
62: blockQuote // Greater than
}
export var contentInitial = {
91: definition // Left square bracket
}
export var flowInitial = {
'-2': codeIndented, // Horizontal tab
'-1': codeIndented, // Virtual space
32: codeIndented // Space
}
export var flow = {
35: headingAtx, // Number sign
42: thematicBreak, // Asterisk
45: [setextUnderline, thematicBreak], // Dash
60: htmlFlow, // Less than
61: setextUnderline, // Equals to
95: thematicBreak, // Underscore
96: codeFenced, // Grave accent
126: codeFenced // Tilde
}
export var string = {
38: characterReference, // Ampersand
92: characterEscape // Backslash
}
export var text = {
'-5': lineEnding, // Carriage return
'-4': lineEnding, // Line feed
'-3': lineEnding, // Carriage return + line feed
33: labelImage, // Exclamation mark
38: characterReference, // Ampersand
42: attention, // Asterisk
60: [autolink, htmlText], // Less than
91: labelLink, // Left square bracket
92: [hardBreakEscape, characterEscape], // Backslash
93: labelEnd, // Right square bracket
95: attention, // Underscore
96: codeText // Grave accent
}
export var insideSpan = {
null: [attention, resolveText]
}
export var disable = {null: []}

11
node_modules/micromark/lib/index.d.ts generated vendored Normal file
View File

@@ -0,0 +1,11 @@
import {Buffer, BufferEncoding, Options} from './shared-types'
declare function buffer(value: string | Buffer, options?: Options): string
declare function buffer(
value: string | Buffer,
encoding?: BufferEncoding,
options?: Options
): string
export default buffer

21
node_modules/micromark/lib/index.js generated vendored Normal file
View File

@@ -0,0 +1,21 @@
'use strict'
var html = require('./compile/html.js')
var parse = require('./parse.js')
var postprocess = require('./postprocess.js')
var preprocess = require('./preprocess.js')
function buffer(value, encoding, options) {
if (typeof encoding !== 'string') {
options = encoding
encoding = undefined
}
return html(options)(
postprocess(
parse(options).document().write(preprocess()(value, encoding, true))
)
)
}
module.exports = buffer

19
node_modules/micromark/lib/index.mjs generated vendored Normal file
View File

@@ -0,0 +1,19 @@
export default buffer
import compiler from './compile/html.mjs'
import parser from './parse.mjs'
import postprocess from './postprocess.mjs'
import preprocessor from './preprocess.mjs'
function buffer(value, encoding, options) {
if (typeof encoding !== 'string') {
options = encoding
encoding = undefined
}
return compiler(options)(
postprocess(
parser(options).document().write(preprocessor()(value, encoding, true))
)
)
}

91
node_modules/micromark/lib/initialize/content.js generated vendored Normal file
View File

@@ -0,0 +1,91 @@
'use strict'
Object.defineProperty(exports, '__esModule', {value: true})
var assert = require('assert')
var codes = require('../character/codes.js')
var markdownLineEnding = require('../character/markdown-line-ending.js')
var constants = require('../constant/constants.js')
var types = require('../constant/types.js')
var factorySpace = require('../tokenize/factory-space.js')
function _interopDefaultLegacy(e) {
return e && typeof e === 'object' && 'default' in e ? e : {default: e}
}
var assert__default = /*#__PURE__*/ _interopDefaultLegacy(assert)
var tokenize = initializeContent
function initializeContent(effects) {
var contentStart = effects.attempt(
this.parser.constructs.contentInitial,
afterContentStartConstruct,
paragraphInitial
)
var previous
return contentStart
function afterContentStartConstruct(code) {
assert__default['default'](
code === codes.eof || markdownLineEnding(code),
'expected eol or eof'
)
if (code === codes.eof) {
effects.consume(code)
return
}
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
return factorySpace(effects, contentStart, types.linePrefix)
}
function paragraphInitial(code) {
assert__default['default'](
code !== codes.eof && !markdownLineEnding(code),
'expected anything other than a line ending or EOF'
)
effects.enter(types.paragraph)
return lineStart(code)
}
function lineStart(code) {
var token = effects.enter(types.chunkText, {
contentType: constants.contentTypeText,
previous: previous
})
if (previous) {
previous.next = token
}
previous = token
return data(code)
}
function data(code) {
if (code === codes.eof) {
effects.exit(types.chunkText)
effects.exit(types.paragraph)
effects.consume(code)
return
}
if (markdownLineEnding(code)) {
effects.consume(code)
effects.exit(types.chunkText)
return lineStart
}
// Data.
effects.consume(code)
return data
}
}
exports.tokenize = tokenize

79
node_modules/micromark/lib/initialize/content.mjs generated vendored Normal file
View File

@@ -0,0 +1,79 @@
export var tokenize = initializeContent
import assert from 'assert'
import codes from '../character/codes.mjs'
import markdownLineEnding from '../character/markdown-line-ending.mjs'
import constants from '../constant/constants.mjs'
import types from '../constant/types.mjs'
import spaceFactory from '../tokenize/factory-space.mjs'
function initializeContent(effects) {
var contentStart = effects.attempt(
this.parser.constructs.contentInitial,
afterContentStartConstruct,
paragraphInitial
)
var previous
return contentStart
function afterContentStartConstruct(code) {
assert(
code === codes.eof || markdownLineEnding(code),
'expected eol or eof'
)
if (code === codes.eof) {
effects.consume(code)
return
}
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
return spaceFactory(effects, contentStart, types.linePrefix)
}
function paragraphInitial(code) {
assert(
code !== codes.eof && !markdownLineEnding(code),
'expected anything other than a line ending or EOF'
)
effects.enter(types.paragraph)
return lineStart(code)
}
function lineStart(code) {
var token = effects.enter(types.chunkText, {
contentType: constants.contentTypeText,
previous: previous
})
if (previous) {
previous.next = token
}
previous = token
return data(code)
}
function data(code) {
if (code === codes.eof) {
effects.exit(types.chunkText)
effects.exit(types.paragraph)
effects.consume(code)
return
}
if (markdownLineEnding(code)) {
effects.consume(code)
effects.exit(types.chunkText)
return lineStart
}
// Data.
effects.consume(code)
return data
}
}

245
node_modules/micromark/lib/initialize/document.js generated vendored Normal file
View File

@@ -0,0 +1,245 @@
'use strict'
Object.defineProperty(exports, '__esModule', {value: true})
var codes = require('../character/codes.js')
var markdownLineEnding = require('../character/markdown-line-ending.js')
var constants = require('../constant/constants.js')
var types = require('../constant/types.js')
var factorySpace = require('../tokenize/factory-space.js')
var partialBlankLine = require('../tokenize/partial-blank-line.js')
var tokenize = initializeDocument
var containerConstruct = {tokenize: tokenizeContainer}
var lazyFlowConstruct = {tokenize: tokenizeLazyFlow}
function initializeDocument(effects) {
var self = this
var stack = []
var continued = 0
var inspectConstruct = {tokenize: tokenizeInspect, partial: true}
var inspectResult
var childFlow
var childToken
return start
function start(code) {
if (continued < stack.length) {
self.containerState = stack[continued][1]
return effects.attempt(
stack[continued][0].continuation,
documentContinue,
documentContinued
)(code)
}
return documentContinued(code)
}
function documentContinue(code) {
continued++
return start(code)
}
function documentContinued(code) {
// If were in a concrete construct (such as when expecting another line of
// HTML, or we resulted in lazy content), we can immediately start flow.
if (inspectResult && inspectResult.flowContinue) {
return flowStart(code)
}
self.interrupt =
childFlow &&
childFlow.currentConstruct &&
childFlow.currentConstruct.interruptible
self.containerState = {}
return effects.attempt(
containerConstruct,
containerContinue,
flowStart
)(code)
}
function containerContinue(code) {
stack.push([self.currentConstruct, self.containerState])
self.containerState = undefined
return documentContinued(code)
}
function flowStart(code) {
if (code === codes.eof) {
exitContainers(0, true)
effects.consume(code)
return
}
childFlow = childFlow || self.parser.flow(self.now())
effects.enter(types.chunkFlow, {
contentType: constants.contentTypeFlow,
previous: childToken,
_tokenizer: childFlow
})
return flowContinue(code)
}
function flowContinue(code) {
if (code === codes.eof) {
continueFlow(effects.exit(types.chunkFlow))
return flowStart(code)
}
if (markdownLineEnding(code)) {
effects.consume(code)
continueFlow(effects.exit(types.chunkFlow))
return effects.check(inspectConstruct, documentAfterPeek)
}
effects.consume(code)
return flowContinue
}
function documentAfterPeek(code) {
exitContainers(
inspectResult.continued,
inspectResult && inspectResult.flowEnd
)
continued = 0
return start(code)
}
function continueFlow(token) {
if (childToken) childToken.next = token
childToken = token
childFlow.lazy = inspectResult && inspectResult.lazy
childFlow.defineSkip(token.start)
childFlow.write(self.sliceStream(token))
}
function exitContainers(size, end) {
var index = stack.length
// Close the flow.
if (childFlow && end) {
childFlow.write([codes.eof])
childToken = childFlow = undefined
}
// Exit open containers.
while (index-- > size) {
self.containerState = stack[index][1]
stack[index][0].exit.call(self, effects)
}
stack.length = size
}
function tokenizeInspect(effects, ok) {
var subcontinued = 0
inspectResult = {}
return inspectStart
function inspectStart(code) {
if (subcontinued < stack.length) {
self.containerState = stack[subcontinued][1]
return effects.attempt(
stack[subcontinued][0].continuation,
inspectContinue,
inspectLess
)(code)
}
// If were continued but in a concrete flow, we cant have more
// containers.
if (childFlow.currentConstruct && childFlow.currentConstruct.concrete) {
inspectResult.flowContinue = true
return inspectDone(code)
}
self.interrupt =
childFlow.currentConstruct && childFlow.currentConstruct.interruptible
self.containerState = {}
return effects.attempt(
containerConstruct,
inspectFlowEnd,
inspectDone
)(code)
}
function inspectContinue(code) {
subcontinued++
return self.containerState._closeFlow
? inspectFlowEnd(code)
: inspectStart(code)
}
function inspectLess(code) {
if (childFlow.currentConstruct && childFlow.currentConstruct.lazy) {
// Maybe another container?
self.containerState = {}
return effects.attempt(
containerConstruct,
inspectFlowEnd,
// Maybe flow, or a blank line?
effects.attempt(
lazyFlowConstruct,
inspectFlowEnd,
effects.check(partialBlankLine, inspectFlowEnd, inspectLazy)
)
)(code)
}
// Otherwise were interrupting.
return inspectFlowEnd(code)
}
function inspectLazy(code) {
// Act as if all containers are continued.
subcontinued = stack.length
inspectResult.lazy = true
inspectResult.flowContinue = true
return inspectDone(code)
}
// Were done with flow if we have more containers, or an interruption.
function inspectFlowEnd(code) {
inspectResult.flowEnd = true
return inspectDone(code)
}
function inspectDone(code) {
inspectResult.continued = subcontinued
self.interrupt = self.containerState = undefined
return ok(code)
}
}
}
function tokenizeContainer(effects, ok, nok) {
return factorySpace(
effects,
effects.attempt(this.parser.constructs.document, ok, nok),
types.linePrefix,
this.parser.constructs.disable.null.indexOf('codeIndented') > -1
? undefined
: constants.tabSize
)
}
function tokenizeLazyFlow(effects, ok, nok) {
return factorySpace(
effects,
effects.lazy(this.parser.constructs.flow, ok, nok),
types.linePrefix,
this.parser.constructs.disable.null.indexOf('codeIndented') > -1
? undefined
: constants.tabSize
)
}
exports.tokenize = tokenize

239
node_modules/micromark/lib/initialize/document.mjs generated vendored Normal file
View File

@@ -0,0 +1,239 @@
export var tokenize = initializeDocument
import codes from '../character/codes.mjs'
import markdownLineEnding from '../character/markdown-line-ending.mjs'
import constants from '../constant/constants.mjs'
import types from '../constant/types.mjs'
import spaceFactory from '../tokenize/factory-space.mjs'
import blank from '../tokenize/partial-blank-line.mjs'
var containerConstruct = {tokenize: tokenizeContainer}
var lazyFlowConstruct = {tokenize: tokenizeLazyFlow}
function initializeDocument(effects) {
var self = this
var stack = []
var continued = 0
var inspectConstruct = {tokenize: tokenizeInspect, partial: true}
var inspectResult
var childFlow
var childToken
return start
function start(code) {
if (continued < stack.length) {
self.containerState = stack[continued][1]
return effects.attempt(
stack[continued][0].continuation,
documentContinue,
documentContinued
)(code)
}
return documentContinued(code)
}
function documentContinue(code) {
continued++
return start(code)
}
function documentContinued(code) {
// If were in a concrete construct (such as when expecting another line of
// HTML, or we resulted in lazy content), we can immediately start flow.
if (inspectResult && inspectResult.flowContinue) {
return flowStart(code)
}
self.interrupt =
childFlow &&
childFlow.currentConstruct &&
childFlow.currentConstruct.interruptible
self.containerState = {}
return effects.attempt(
containerConstruct,
containerContinue,
flowStart
)(code)
}
function containerContinue(code) {
stack.push([self.currentConstruct, self.containerState])
self.containerState = undefined
return documentContinued(code)
}
function flowStart(code) {
if (code === codes.eof) {
exitContainers(0, true)
effects.consume(code)
return
}
childFlow = childFlow || self.parser.flow(self.now())
effects.enter(types.chunkFlow, {
contentType: constants.contentTypeFlow,
previous: childToken,
_tokenizer: childFlow
})
return flowContinue(code)
}
function flowContinue(code) {
if (code === codes.eof) {
continueFlow(effects.exit(types.chunkFlow))
return flowStart(code)
}
if (markdownLineEnding(code)) {
effects.consume(code)
continueFlow(effects.exit(types.chunkFlow))
return effects.check(inspectConstruct, documentAfterPeek)
}
effects.consume(code)
return flowContinue
}
function documentAfterPeek(code) {
exitContainers(
inspectResult.continued,
inspectResult && inspectResult.flowEnd
)
continued = 0
return start(code)
}
function continueFlow(token) {
if (childToken) childToken.next = token
childToken = token
childFlow.lazy = inspectResult && inspectResult.lazy
childFlow.defineSkip(token.start)
childFlow.write(self.sliceStream(token))
}
function exitContainers(size, end) {
var index = stack.length
// Close the flow.
if (childFlow && end) {
childFlow.write([codes.eof])
childToken = childFlow = undefined
}
// Exit open containers.
while (index-- > size) {
self.containerState = stack[index][1]
stack[index][0].exit.call(self, effects)
}
stack.length = size
}
function tokenizeInspect(effects, ok) {
var subcontinued = 0
inspectResult = {}
return inspectStart
function inspectStart(code) {
if (subcontinued < stack.length) {
self.containerState = stack[subcontinued][1]
return effects.attempt(
stack[subcontinued][0].continuation,
inspectContinue,
inspectLess
)(code)
}
// If were continued but in a concrete flow, we cant have more
// containers.
if (childFlow.currentConstruct && childFlow.currentConstruct.concrete) {
inspectResult.flowContinue = true
return inspectDone(code)
}
self.interrupt =
childFlow.currentConstruct && childFlow.currentConstruct.interruptible
self.containerState = {}
return effects.attempt(
containerConstruct,
inspectFlowEnd,
inspectDone
)(code)
}
function inspectContinue(code) {
subcontinued++
return self.containerState._closeFlow
? inspectFlowEnd(code)
: inspectStart(code)
}
function inspectLess(code) {
if (childFlow.currentConstruct && childFlow.currentConstruct.lazy) {
// Maybe another container?
self.containerState = {}
return effects.attempt(
containerConstruct,
inspectFlowEnd,
// Maybe flow, or a blank line?
effects.attempt(
lazyFlowConstruct,
inspectFlowEnd,
effects.check(blank, inspectFlowEnd, inspectLazy)
)
)(code)
}
// Otherwise were interrupting.
return inspectFlowEnd(code)
}
function inspectLazy(code) {
// Act as if all containers are continued.
subcontinued = stack.length
inspectResult.lazy = true
inspectResult.flowContinue = true
return inspectDone(code)
}
// Were done with flow if we have more containers, or an interruption.
function inspectFlowEnd(code) {
inspectResult.flowEnd = true
return inspectDone(code)
}
function inspectDone(code) {
inspectResult.continued = subcontinued
self.interrupt = self.containerState = undefined
return ok(code)
}
}
}
function tokenizeContainer(effects, ok, nok) {
return spaceFactory(
effects,
effects.attempt(this.parser.constructs.document, ok, nok),
types.linePrefix,
this.parser.constructs.disable.null.indexOf('codeIndented') > -1
? undefined
: constants.tabSize
)
}
function tokenizeLazyFlow(effects, ok, nok) {
return spaceFactory(
effects,
effects.lazy(this.parser.constructs.flow, ok, nok),
types.linePrefix,
this.parser.constructs.disable.null.indexOf('codeIndented') > -1
? undefined
: constants.tabSize
)
}

82
node_modules/micromark/lib/initialize/flow.js generated vendored Normal file
View File

@@ -0,0 +1,82 @@
'use strict'
Object.defineProperty(exports, '__esModule', {value: true})
var assert = require('assert')
var codes = require('../character/codes.js')
var markdownLineEnding = require('../character/markdown-line-ending.js')
var types = require('../constant/types.js')
var content = require('../tokenize/content.js')
var factorySpace = require('../tokenize/factory-space.js')
var partialBlankLine = require('../tokenize/partial-blank-line.js')
function _interopDefaultLegacy(e) {
return e && typeof e === 'object' && 'default' in e ? e : {default: e}
}
var assert__default = /*#__PURE__*/ _interopDefaultLegacy(assert)
var tokenize = initializeFlow
function initializeFlow(effects) {
var self = this
var initial = effects.attempt(
// Try to parse a blank line.
partialBlankLine,
atBlankEnding,
// Try to parse initial flow (essentially, only code).
effects.attempt(
this.parser.constructs.flowInitial,
afterConstruct,
factorySpace(
effects,
effects.attempt(
this.parser.constructs.flow,
afterConstruct,
effects.attempt(content, afterConstruct)
),
types.linePrefix
)
)
)
return initial
function atBlankEnding(code) {
assert__default['default'](
code === codes.eof || markdownLineEnding(code),
'expected eol or eof'
)
if (code === codes.eof) {
effects.consume(code)
return
}
effects.enter(types.lineEndingBlank)
effects.consume(code)
effects.exit(types.lineEndingBlank)
self.currentConstruct = undefined
return initial
}
function afterConstruct(code) {
assert__default['default'](
code === codes.eof || markdownLineEnding(code),
'expected eol or eof'
)
if (code === codes.eof) {
effects.consume(code)
return
}
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
self.currentConstruct = undefined
return initial
}
}
exports.tokenize = tokenize

70
node_modules/micromark/lib/initialize/flow.mjs generated vendored Normal file
View File

@@ -0,0 +1,70 @@
export var tokenize = initializeFlow
import assert from 'assert'
import codes from '../character/codes.mjs'
import markdownLineEnding from '../character/markdown-line-ending.mjs'
import types from '../constant/types.mjs'
import content from '../tokenize/content.mjs'
import spaceFactory from '../tokenize/factory-space.mjs'
import blank from '../tokenize/partial-blank-line.mjs'
function initializeFlow(effects) {
var self = this
var initial = effects.attempt(
// Try to parse a blank line.
blank,
atBlankEnding,
// Try to parse initial flow (essentially, only code).
effects.attempt(
this.parser.constructs.flowInitial,
afterConstruct,
spaceFactory(
effects,
effects.attempt(
this.parser.constructs.flow,
afterConstruct,
effects.attempt(content, afterConstruct)
),
types.linePrefix
)
)
)
return initial
function atBlankEnding(code) {
assert(
code === codes.eof || markdownLineEnding(code),
'expected eol or eof'
)
if (code === codes.eof) {
effects.consume(code)
return
}
effects.enter(types.lineEndingBlank)
effects.consume(code)
effects.exit(types.lineEndingBlank)
self.currentConstruct = undefined
return initial
}
function afterConstruct(code) {
assert(
code === codes.eof || markdownLineEnding(code),
'expected eol or eof'
)
if (code === codes.eof) {
effects.consume(code)
return
}
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
self.currentConstruct = undefined
return initial
}
}

210
node_modules/micromark/lib/initialize/text.js generated vendored Normal file
View File

@@ -0,0 +1,210 @@
'use strict'
Object.defineProperty(exports, '__esModule', {value: true})
var codes = require('../character/codes.js')
var assign = require('../constant/assign.js')
var constants = require('../constant/constants.js')
var types = require('../constant/types.js')
var shallow = require('../util/shallow.js')
var text = initializeFactory('text')
var string = initializeFactory('string')
var resolver = {resolveAll: createResolver()}
function initializeFactory(field) {
return {
tokenize: initializeText,
resolveAll: createResolver(
field === 'text' ? resolveAllLineSuffixes : undefined
)
}
function initializeText(effects) {
var self = this
var constructs = this.parser.constructs[field]
var text = effects.attempt(constructs, start, notText)
return start
function start(code) {
return atBreak(code) ? text(code) : notText(code)
}
function notText(code) {
if (code === codes.eof) {
effects.consume(code)
return
}
effects.enter(types.data)
effects.consume(code)
return data
}
function data(code) {
if (atBreak(code)) {
effects.exit(types.data)
return text(code)
}
// Data.
effects.consume(code)
return data
}
function atBreak(code) {
var list = constructs[code]
var index = -1
if (code === codes.eof) {
return true
}
if (list) {
while (++index < list.length) {
if (
!list[index].previous ||
list[index].previous.call(self, self.previous)
) {
return true
}
}
}
}
}
}
function createResolver(extraResolver) {
return resolveAllText
function resolveAllText(events, context) {
var index = -1
var enter
// A rather boring computation (to merge adjacent `data` events) which
// improves mm performance by 29%.
while (++index <= events.length) {
if (enter === undefined) {
if (events[index] && events[index][1].type === types.data) {
enter = index
index++
}
} else if (!events[index] || events[index][1].type !== types.data) {
// Dont do anything if there is one data token.
if (index !== enter + 2) {
events[enter][1].end = events[index - 1][1].end
events.splice(enter + 2, index - enter - 2)
index = enter + 2
}
enter = undefined
}
}
return extraResolver ? extraResolver(events, context) : events
}
}
// A rather ugly set of instructions which again looks at chunks in the input
// stream.
// The reason to do this here is that it is *much* faster to parse in reverse.
// And that we cant hook into `null` to split the line suffix before an EOF.
// To do: figure out if we can make this into a clean utility, or even in core.
// As it will be useful for GFMs literal autolink extension (and maybe even
// tables?)
function resolveAllLineSuffixes(events, context) {
var eventIndex = -1
var chunks
var data
var chunk
var index
var bufferIndex
var size
var tabs
var token
while (++eventIndex <= events.length) {
if (
(eventIndex === events.length ||
events[eventIndex][1].type === types.lineEnding) &&
events[eventIndex - 1][1].type === types.data
) {
data = events[eventIndex - 1][1]
chunks = context.sliceStream(data)
index = chunks.length
bufferIndex = -1
size = 0
tabs = undefined
while (index--) {
chunk = chunks[index]
if (typeof chunk === 'string') {
bufferIndex = chunk.length
while (chunk.charCodeAt(bufferIndex - 1) === codes.space) {
size++
bufferIndex--
}
if (bufferIndex) break
bufferIndex = -1
}
// Number
else if (chunk === codes.horizontalTab) {
tabs = true
size++
} else if (chunk === codes.virtualSpace);
else {
// Replacement character, exit.
index++
break
}
}
if (size) {
token = {
type:
eventIndex === events.length ||
tabs ||
size < constants.hardBreakPrefixSizeMin
? types.lineSuffix
: types.hardBreakTrailing,
start: {
line: data.end.line,
column: data.end.column - size,
offset: data.end.offset - size,
_index: data.start._index + index,
_bufferIndex: index
? bufferIndex
: data.start._bufferIndex + bufferIndex
},
end: shallow(data.end)
}
data.end = shallow(token.start)
if (data.start.offset === data.end.offset) {
assign(data, token)
} else {
events.splice(
eventIndex,
0,
['enter', token, context],
['exit', token, context]
)
eventIndex += 2
}
}
eventIndex++
}
}
return events
}
exports.resolver = resolver
exports.string = string
exports.text = text

203
node_modules/micromark/lib/initialize/text.mjs generated vendored Normal file
View File

@@ -0,0 +1,203 @@
export var text = initializeFactory('text')
export var string = initializeFactory('string')
export var resolver = {resolveAll: createResolver()}
import codes from '../character/codes.mjs'
import assign from '../constant/assign.mjs'
import constants from '../constant/constants.mjs'
import types from '../constant/types.mjs'
import shallow from '../util/shallow.mjs'
function initializeFactory(field) {
return {
tokenize: initializeText,
resolveAll: createResolver(
field === 'text' ? resolveAllLineSuffixes : undefined
)
}
function initializeText(effects) {
var self = this
var constructs = this.parser.constructs[field]
var text = effects.attempt(constructs, start, notText)
return start
function start(code) {
return atBreak(code) ? text(code) : notText(code)
}
function notText(code) {
if (code === codes.eof) {
effects.consume(code)
return
}
effects.enter(types.data)
effects.consume(code)
return data
}
function data(code) {
if (atBreak(code)) {
effects.exit(types.data)
return text(code)
}
// Data.
effects.consume(code)
return data
}
function atBreak(code) {
var list = constructs[code]
var index = -1
if (code === codes.eof) {
return true
}
if (list) {
while (++index < list.length) {
if (
!list[index].previous ||
list[index].previous.call(self, self.previous)
) {
return true
}
}
}
}
}
}
function createResolver(extraResolver) {
return resolveAllText
function resolveAllText(events, context) {
var index = -1
var enter
// A rather boring computation (to merge adjacent `data` events) which
// improves mm performance by 29%.
while (++index <= events.length) {
if (enter === undefined) {
if (events[index] && events[index][1].type === types.data) {
enter = index
index++
}
} else if (!events[index] || events[index][1].type !== types.data) {
// Dont do anything if there is one data token.
if (index !== enter + 2) {
events[enter][1].end = events[index - 1][1].end
events.splice(enter + 2, index - enter - 2)
index = enter + 2
}
enter = undefined
}
}
return extraResolver ? extraResolver(events, context) : events
}
}
// A rather ugly set of instructions which again looks at chunks in the input
// stream.
// The reason to do this here is that it is *much* faster to parse in reverse.
// And that we cant hook into `null` to split the line suffix before an EOF.
// To do: figure out if we can make this into a clean utility, or even in core.
// As it will be useful for GFMs literal autolink extension (and maybe even
// tables?)
function resolveAllLineSuffixes(events, context) {
var eventIndex = -1
var chunks
var data
var chunk
var index
var bufferIndex
var size
var tabs
var token
while (++eventIndex <= events.length) {
if (
(eventIndex === events.length ||
events[eventIndex][1].type === types.lineEnding) &&
events[eventIndex - 1][1].type === types.data
) {
data = events[eventIndex - 1][1]
chunks = context.sliceStream(data)
index = chunks.length
bufferIndex = -1
size = 0
tabs = undefined
while (index--) {
chunk = chunks[index]
if (typeof chunk === 'string') {
bufferIndex = chunk.length
while (chunk.charCodeAt(bufferIndex - 1) === codes.space) {
size++
bufferIndex--
}
if (bufferIndex) break
bufferIndex = -1
}
// Number
else if (chunk === codes.horizontalTab) {
tabs = true
size++
} else if (chunk === codes.virtualSpace) {
// Empty
} else {
// Replacement character, exit.
index++
break
}
}
if (size) {
token = {
type:
eventIndex === events.length ||
tabs ||
size < constants.hardBreakPrefixSizeMin
? types.lineSuffix
: types.hardBreakTrailing,
start: {
line: data.end.line,
column: data.end.column - size,
offset: data.end.offset - size,
_index: data.start._index + index,
_bufferIndex: index
? bufferIndex
: data.start._bufferIndex + bufferIndex
},
end: shallow(data.end)
}
data.end = shallow(token.start)
if (data.start.offset === data.end.offset) {
assign(data, token)
} else {
events.splice(
eventIndex,
0,
['enter', token, context],
['exit', token, context]
)
eventIndex += 2
}
}
eventIndex++
}
}
return events
}

5
node_modules/micromark/lib/parse.d.ts generated vendored Normal file
View File

@@ -0,0 +1,5 @@
import {ParseOptions, Parser} from './shared-types'
declare function createParser(options?: ParseOptions): Parser
export default createParser

36
node_modules/micromark/lib/parse.js generated vendored Normal file
View File

@@ -0,0 +1,36 @@
'use strict'
var content = require('./initialize/content.js')
var document = require('./initialize/document.js')
var flow = require('./initialize/flow.js')
var text = require('./initialize/text.js')
var combineExtensions = require('./util/combine-extensions.js')
var createTokenizer = require('./util/create-tokenizer.js')
var miniflat = require('./util/miniflat.js')
var constructs = require('./constructs.js')
function parse(options) {
var settings = options || {}
var parser = {
defined: [],
constructs: combineExtensions(
[constructs].concat(miniflat(settings.extensions))
),
content: create(content),
document: create(document),
flow: create(flow),
string: create(text.string),
text: create(text.text)
}
return parser
function create(initializer) {
return creator
function creator(from) {
return createTokenizer(parser, initializer, from)
}
}
}
module.exports = parse

34
node_modules/micromark/lib/parse.mjs generated vendored Normal file
View File

@@ -0,0 +1,34 @@
export default parse
import * as initializeContent from './initialize/content.mjs'
import * as initializeDocument from './initialize/document.mjs'
import * as initializeFlow from './initialize/flow.mjs'
import * as initializeText from './initialize/text.mjs'
import combineExtensions from './util/combine-extensions.mjs'
import createTokenizer from './util/create-tokenizer.mjs'
import miniflat from './util/miniflat.mjs'
import * as constructs from './constructs.mjs'
function parse(options) {
var settings = options || {}
var parser = {
defined: [],
constructs: combineExtensions(
[constructs].concat(miniflat(settings.extensions))
),
content: create(initializeContent),
document: create(initializeDocument),
flow: create(initializeFlow),
string: create(initializeText.string),
text: create(initializeText.text)
}
return parser
function create(initializer) {
return creator
function creator(from) {
return createTokenizer(parser, initializer, from)
}
}
}

5
node_modules/micromark/lib/postprocess.d.ts generated vendored Normal file
View File

@@ -0,0 +1,5 @@
import {Event} from './shared-types'
declare function postprocess(events: Event[]): Event[]
export default postprocess

13
node_modules/micromark/lib/postprocess.js generated vendored Normal file
View File

@@ -0,0 +1,13 @@
'use strict'
var subtokenize = require('./util/subtokenize.js')
function postprocess(events) {
while (!subtokenize(events)) {
// Empty
}
return events
}
module.exports = postprocess

11
node_modules/micromark/lib/postprocess.mjs generated vendored Normal file
View File

@@ -0,0 +1,11 @@
export default postprocess
import subtokenize from './util/subtokenize.mjs'
function postprocess(events) {
while (!subtokenize(events)) {
// Empty
}
return events
}

11
node_modules/micromark/lib/preprocess.d.ts generated vendored Normal file
View File

@@ -0,0 +1,11 @@
import {BufferEncoding} from './shared-types'
type PreprocessReturn = (
value: string,
encoding: BufferEncoding,
end?: boolean
) => string[]
declare function preprocess(): PreprocessReturn
export default preprocess

96
node_modules/micromark/lib/preprocess.js generated vendored Normal file
View File

@@ -0,0 +1,96 @@
'use strict'
var codes = require('./character/codes.js')
var constants = require('./constant/constants.js')
var search = /[\0\t\n\r]/g
function preprocess() {
var start = true
var column = 1
var buffer = ''
var atCarriageReturn
return preprocessor
function preprocessor(value, encoding, end) {
var chunks = []
var match
var next
var startPosition
var endPosition
var code
value = buffer + value.toString(encoding)
startPosition = 0
buffer = ''
if (start) {
if (value.charCodeAt(0) === codes.byteOrderMarker) {
startPosition++
}
start = undefined
}
while (startPosition < value.length) {
search.lastIndex = startPosition
match = search.exec(value)
endPosition = match ? match.index : value.length
code = value.charCodeAt(endPosition)
if (!match) {
buffer = value.slice(startPosition)
break
}
if (
code === codes.lf &&
startPosition === endPosition &&
atCarriageReturn
) {
chunks.push(codes.carriageReturnLineFeed)
atCarriageReturn = undefined
} else {
if (atCarriageReturn) {
chunks.push(codes.carriageReturn)
atCarriageReturn = undefined
}
if (startPosition < endPosition) {
chunks.push(value.slice(startPosition, endPosition))
column += endPosition - startPosition
}
if (code === codes.nul) {
chunks.push(codes.replacementCharacter)
column++
} else if (code === codes.ht) {
next = Math.ceil(column / constants.tabSize) * constants.tabSize
chunks.push(codes.horizontalTab)
while (column++ < next) chunks.push(codes.virtualSpace)
} else if (code === codes.lf) {
chunks.push(codes.lineFeed)
column = 1
}
// Must be carriage return.
else {
atCarriageReturn = true
column = 1
}
}
startPosition = endPosition + 1
}
if (end) {
if (atCarriageReturn) chunks.push(codes.carriageReturn)
if (buffer) chunks.push(buffer)
chunks.push(codes.eof)
}
return chunks
}
}
module.exports = preprocess

94
node_modules/micromark/lib/preprocess.mjs generated vendored Normal file
View File

@@ -0,0 +1,94 @@
export default preprocess
import codes from './character/codes.mjs'
import constants from './constant/constants.mjs'
var search = /[\0\t\n\r]/g
function preprocess() {
var start = true
var column = 1
var buffer = ''
var atCarriageReturn
return preprocessor
function preprocessor(value, encoding, end) {
var chunks = []
var match
var next
var startPosition
var endPosition
var code
value = buffer + value.toString(encoding)
startPosition = 0
buffer = ''
if (start) {
if (value.charCodeAt(0) === codes.byteOrderMarker) {
startPosition++
}
start = undefined
}
while (startPosition < value.length) {
search.lastIndex = startPosition
match = search.exec(value)
endPosition = match ? match.index : value.length
code = value.charCodeAt(endPosition)
if (!match) {
buffer = value.slice(startPosition)
break
}
if (
code === codes.lf &&
startPosition === endPosition &&
atCarriageReturn
) {
chunks.push(codes.carriageReturnLineFeed)
atCarriageReturn = undefined
} else {
if (atCarriageReturn) {
chunks.push(codes.carriageReturn)
atCarriageReturn = undefined
}
if (startPosition < endPosition) {
chunks.push(value.slice(startPosition, endPosition))
column += endPosition - startPosition
}
if (code === codes.nul) {
chunks.push(codes.replacementCharacter)
column++
} else if (code === codes.ht) {
next = Math.ceil(column / constants.tabSize) * constants.tabSize
chunks.push(codes.horizontalTab)
while (column++ < next) chunks.push(codes.virtualSpace)
} else if (code === codes.lf) {
chunks.push(codes.lineFeed)
column = 1
}
// Must be carriage return.
else {
atCarriageReturn = true
column = 1
}
}
startPosition = endPosition + 1
}
if (end) {
if (atCarriageReturn) chunks.push(codes.carriageReturn)
if (buffer) chunks.push(buffer)
chunks.push(codes.eof)
}
return chunks
}
}

291
node_modules/micromark/lib/shared-types.d.ts generated vendored Normal file
View File

@@ -0,0 +1,291 @@
// Minimum TypeScript Version: 3.0
import {Code} from './character/codes'
import {Type} from './constant/types'
/**
* A location in a string or buffer
*/
export interface Point {
line: number
column: number
offset: number
_index?: number
_bufferIndex?: number
}
/**
*
*/
export interface Token {
type: Type
start: Point
end: Point
previous?: Token
next?: Token
/**
* Declares a token as having content of a certain type.
* Because markdown requires to first parse containers, flow, content completely,
* and then later go on to phrasing and such, it needs to be declared somewhere on the tokens.
*/
contentType?: 'flow' | 'content' | 'string' | 'text'
/**
* Used when dealing with linked tokens. A child tokenizer is needed to tokenize them, which is stored on those tokens
*/
_tokenizer?: Tokenizer
/**
* Close and open are also used in attention:
* depending on the characters before and after sequences (**),
* the sequence can open, close, both, or none
*/
_open?: boolean
/**
* Close and open are also used in attention:
* depending on the characters before and after sequences (**),
* the sequence can open, close, both, or none
*/
_close?: boolean
}
/**
*
*/
export type Event = [string, Token, Tokenizer]
/**
* These these are transitions to update the CommonMark State Machine (CSMS)
*/
export interface Effects {
/**
* Enter and exit define where tokens start and end
*/
enter: (type: Type) => Token
/**
* Enter and exit define where tokens start and end
*/
exit: (type: Type) => Token
/**
* Consume deals with a character, and moves to the next
*/
consume: (code: number) => void
/**
* Attempt deals with several values, and tries to parse according to those values.
* If a value resulted in `ok`, it worked, the tokens that were made are used,
* and `returnState` is switched to.
* If the result is `nok`, the attempt failed,
* so we revert to the original state, and `bogusState` is used.
*/
attempt: (
constructInfo:
| Construct
| Construct[]
| Record<CodeAsKey, Construct | Construct[]>,
returnState: State,
bogusState?: State
) => (code: Code) => void
/**
* Interrupt is used for stuff right after a line of content.
*/
interrupt: (
constructInfo:
| Construct
| Construct[]
| Record<CodeAsKey, Construct | Construct[]>,
ok: Okay,
nok?: NotOkay
) => (code: Code) => void
check: (
constructInfo:
| Construct
| Construct[]
| Record<CodeAsKey, Construct | Construct[]>,
ok: Okay,
nok?: NotOkay
) => (code: Code) => void
/**
* Lazy is used for lines that were not properly preceded by the container.
*/
lazy: (
constructInfo:
| Construct
| Construct[]
| Record<CodeAsKey, Construct | Construct[]>,
ok: Okay,
nok?: NotOkay
) => void
}
/**
* A state function should return another function: the next state-as-a-function to go to.
*
* But there is one case where they return void: for the eof character code (at the end of a value)
* The reason being: well, there isnt any state that makes sense, so void works well. Practically
* that has also helped: if for some reason it was a mistake, then an exception is throw because
* there is no next function, meaning it surfaces early.
*/
export type State = (code: number) => State | void
/**
*
*/
export type Okay = State
/**
*
*/
export type NotOkay = State
/**
*
*/
export interface Tokenizer {
previous: Code
events: Event[]
parser: Parser
sliceStream: (token: Token) => Chunk[]
sliceSerialize: (token: Token) => string
now: () => Point
defineSkip: (value: Point) => void
write: (slice: Chunk[]) => Event[]
}
export type Resolve = (events: Event[], context: Tokenizer) => Event[]
export type Tokenize = (context: Tokenizer, effects: Effects) => State
export interface Construct {
name?: string
tokenize: Tokenize
partial?: boolean
resolve?: Resolve
resolveTo?: Resolve
resolveAll?: Resolve
concrete?: boolean
interruptible?: boolean
lazy?: boolean
}
/**
*
*/
export interface Parser {
constructs: Record<CodeAsKey, Construct | Construct[]>
content: (from: Point) => Tokenizer
document: (from: Point) => Tokenizer
flow: (from: Point) => Tokenizer
string: (from: Point) => Tokenizer
text: (from: Point) => Tokenizer
defined: string[]
}
/**
*
*/
export interface TokenizerThis {
events: Event[]
interrupt?: boolean
lazy?: boolean
containerState?: Record<string, unknown>
}
/**
* `Compile` is the return value of `lib/compile/html.js`
*/
export type Compile = (slice: Event[]) => string
/**
* https://github.com/micromark/micromark#syntaxextension
*/
export interface SyntaxExtension {
document?: Record<CodeAsKey, Construct | Construct[]>
contentInitial?: Record<CodeAsKey, Construct | Construct[]>
flowInitial?: Record<CodeAsKey, Construct | Construct[]>
flow?: Record<CodeAsKey, Construct | Construct[]>
string?: Record<CodeAsKey, Construct | Construct[]>
text?: Record<CodeAsKey, Construct | Construct[]>
}
/**
* https://github.com/micromark/micromark#htmlextension
*/
export type HtmlExtension =
| {enter: Record<Type, () => void>}
| {exit: Record<Type, () => void>}
export type Options = ParseOptions & CompileOptions
export interface ParseOptions {
// Array of syntax extensions
//
extensions?: SyntaxExtension[]
}
export interface CompileOptions {
// Value to use for line endings not in `doc` (`string`, default: first line
// ending or `'\n'`).
//
// Generally, micromark copies line endings (`'\r'`, `'\n'`, `'\r\n'`) in the
// markdown document over to the compiled HTML.
// In some cases, such as `> a`, CommonMark requires that extra line endings are
// added: `<blockquote>\n<p>a</p>\n</blockquote>`.
//
defaultLineEnding?: '\r' | '\n' | '\r\n'
// Whether to allow embedded HTML (`boolean`, default: `false`).
//
allowDangerousHtml?: boolean
// Whether to allow potentially dangerous protocols in links and images (`boolean`,
// default: `false`).
// URLs relative to the current protocol are always allowed (such as, `image.jpg`).
// For links, the allowed protocols are `http`, `https`, `irc`, `ircs`, `mailto`,
// and `xmpp`.
// For images, the allowed protocols are `http` and `https`.
//
allowDangerousProtocol?: boolean
// Array of HTML extensions
//
htmlExtensions?: HtmlExtension[]
}
export type Chunk = NonNullable<Code> | string
// TypeScript will complain that `null` can't be the key of an object. So when a `Code` value is a key of an object, use CodeAsKey instead.
export type CodeAsKey = NonNullable<Code> | 'null'
/**
* Encodings supported by the buffer class
*
* @remarks
* This is a copy of the typing from Node, copied to prevent Node globals from being needed.
* Copied from https://github.com/DefinitelyTyped/DefinitelyTyped/blob/a2bc1d868d81733a8969236655fa600bd3651a7b/types/node/globals.d.ts#L174
*/
export type BufferEncoding =
| 'ascii'
| 'utf8'
| 'utf-8'
| 'utf16le'
| 'ucs2'
| 'ucs-2'
| 'base64'
| 'latin1'
| 'binary'
| 'hex'
/**
* This is an interface for Node's Buffer.
*/
export interface Buffer {
toString: (encoding?: BufferEncoding) => string
}
export type CodeCheck = (code: Code) => boolean

6
node_modules/micromark/lib/stream.d.ts generated vendored Normal file
View File

@@ -0,0 +1,6 @@
import {EventEmitter} from 'events'
import {Options} from './shared-types'
declare function stream(options?: Options): EventEmitter
export default stream

119
node_modules/micromark/lib/stream.js generated vendored Normal file
View File

@@ -0,0 +1,119 @@
'use strict'
var events = require('events')
var html = require('./compile/html.js')
var parse = require('./parse.js')
var postprocess = require('./postprocess.js')
var preprocess = require('./preprocess.js')
function stream(options) {
var preprocess$1 = preprocess()
var tokenize = parse(options).document().write
var compile = html(options)
var emitter = new events.EventEmitter()
var ended
emitter.writable = emitter.readable = true
emitter.write = write
emitter.end = end
emitter.pipe = pipe
return emitter
// Write a chunk into memory.
function write(chunk, encoding, callback) {
if (typeof encoding === 'function') {
callback = encoding
encoding = undefined
}
if (ended) {
throw new Error('Did not expect `write` after `end`')
}
tokenize(preprocess$1(chunk || '', encoding))
if (callback) {
callback()
}
// Signal succesful write.
return true
}
// End the writing.
// Passes all arguments to a final `write`.
function end(chunk, encoding, callback) {
write(chunk, encoding, callback)
emitter.emit(
'data',
compile(postprocess(tokenize(preprocess$1('', encoding, true))))
)
emitter.emit('end')
ended = true
return true
}
// Pipe the processor into a writable stream.
// Basically `Stream#pipe`, but inlined and simplified to keep the bundled
// size down.
// See: <https://github.com/nodejs/node/blob/43a5170/lib/internal/streams/legacy.js#L13>.
function pipe(dest, options) {
emitter.on('data', ondata)
emitter.on('error', onerror)
emitter.on('end', cleanup)
emitter.on('close', cleanup)
// If the `end` option is not supplied, `dest.end()` will be called when the
// `end` or `close` events are received.
if (!dest._isStdio && (!options || options.end !== false)) {
emitter.on('end', onend)
}
dest.on('error', onerror)
dest.on('close', cleanup)
dest.emit('pipe', emitter)
return dest
// End destination.
function onend() {
if (dest.end) {
dest.end()
}
}
// Handle data.
function ondata(chunk) {
if (dest.writable) {
dest.write(chunk)
}
}
// Clean listeners.
function cleanup() {
emitter.removeListener('data', ondata)
emitter.removeListener('end', onend)
emitter.removeListener('error', onerror)
emitter.removeListener('end', cleanup)
emitter.removeListener('close', cleanup)
dest.removeListener('error', onerror)
dest.removeListener('close', cleanup)
}
// Close dangling pipes and handle unheard errors.
function onerror(error) {
cleanup()
if (!emitter.listenerCount('error')) {
throw error // Unhandled stream error in pipe.
}
}
}
}
module.exports = stream

117
node_modules/micromark/lib/stream.mjs generated vendored Normal file
View File

@@ -0,0 +1,117 @@
export default stream
import {EventEmitter} from 'events'
import compiler from './compile/html.mjs'
import parser from './parse.mjs'
import postprocess from './postprocess.mjs'
import preprocessor from './preprocess.mjs'
function stream(options) {
var preprocess = preprocessor()
var tokenize = parser(options).document().write
var compile = compiler(options)
var emitter = new EventEmitter()
var ended
emitter.writable = emitter.readable = true
emitter.write = write
emitter.end = end
emitter.pipe = pipe
return emitter
// Write a chunk into memory.
function write(chunk, encoding, callback) {
if (typeof encoding === 'function') {
callback = encoding
encoding = undefined
}
if (ended) {
throw new Error('Did not expect `write` after `end`')
}
tokenize(preprocess(chunk || '', encoding))
if (callback) {
callback()
}
// Signal succesful write.
return true
}
// End the writing.
// Passes all arguments to a final `write`.
function end(chunk, encoding, callback) {
write(chunk, encoding, callback)
emitter.emit(
'data',
compile(postprocess(tokenize(preprocess('', encoding, true))))
)
emitter.emit('end')
ended = true
return true
}
// Pipe the processor into a writable stream.
// Basically `Stream#pipe`, but inlined and simplified to keep the bundled
// size down.
// See: <https://github.com/nodejs/node/blob/43a5170/lib/internal/streams/legacy.js#L13>.
function pipe(dest, options) {
emitter.on('data', ondata)
emitter.on('error', onerror)
emitter.on('end', cleanup)
emitter.on('close', cleanup)
// If the `end` option is not supplied, `dest.end()` will be called when the
// `end` or `close` events are received.
if (!dest._isStdio && (!options || options.end !== false)) {
emitter.on('end', onend)
}
dest.on('error', onerror)
dest.on('close', cleanup)
dest.emit('pipe', emitter)
return dest
// End destination.
function onend() {
if (dest.end) {
dest.end()
}
}
// Handle data.
function ondata(chunk) {
if (dest.writable) {
dest.write(chunk)
}
}
// Clean listeners.
function cleanup() {
emitter.removeListener('data', ondata)
emitter.removeListener('end', onend)
emitter.removeListener('error', onerror)
emitter.removeListener('end', cleanup)
emitter.removeListener('close', cleanup)
dest.removeListener('error', onerror)
dest.removeListener('close', cleanup)
}
// Close dangling pipes and handle unheard errors.
function onerror(error) {
cleanup()
if (!emitter.listenerCount('error')) {
throw error // Unhandled stream error in pipe.
}
}
}
}

216
node_modules/micromark/lib/tokenize/attention.js generated vendored Normal file
View File

@@ -0,0 +1,216 @@
'use strict'
var assert = require('assert')
var codes = require('../character/codes.js')
var constants = require('../constant/constants.js')
var types = require('../constant/types.js')
var chunkedPush = require('../util/chunked-push.js')
var chunkedSplice = require('../util/chunked-splice.js')
var classifyCharacter = require('../util/classify-character.js')
var movePoint = require('../util/move-point.js')
var resolveAll = require('../util/resolve-all.js')
var shallow = require('../util/shallow.js')
function _interopDefaultLegacy(e) {
return e && typeof e === 'object' && 'default' in e ? e : {default: e}
}
var assert__default = /*#__PURE__*/ _interopDefaultLegacy(assert)
var attention = {
name: 'attention',
tokenize: tokenizeAttention,
resolveAll: resolveAllAttention
}
// Take all events and resolve attention to emphasis or strong.
function resolveAllAttention(events, context) {
var index = -1
var open
var group
var text
var openingSequence
var closingSequence
var use
var nextEvents
var offset
// Walk through all events.
//
// Note: performance of this is fine on an mb of normal markdown, but its
// a bottleneck for malicious stuff.
while (++index < events.length) {
// Find a token that can close.
if (
events[index][0] === 'enter' &&
events[index][1].type === 'attentionSequence' &&
events[index][1]._close
) {
open = index
// Now walk back to find an opener.
while (open--) {
// Find a token that can open the closer.
if (
events[open][0] === 'exit' &&
events[open][1].type === 'attentionSequence' &&
events[open][1]._open &&
// If the markers are the same:
context.sliceSerialize(events[open][1]).charCodeAt(0) ===
context.sliceSerialize(events[index][1]).charCodeAt(0)
) {
// If the opening can close or the closing can open,
// and the close size *is not* a multiple of three,
// but the sum of the opening and closing size *is* multiple of three,
// then dont match.
if (
(events[open][1]._close || events[index][1]._open) &&
(events[index][1].end.offset - events[index][1].start.offset) % 3 &&
!(
(events[open][1].end.offset -
events[open][1].start.offset +
events[index][1].end.offset -
events[index][1].start.offset) %
3
)
) {
continue
}
// Number of markers to use from the sequence.
use =
events[open][1].end.offset - events[open][1].start.offset > 1 &&
events[index][1].end.offset - events[index][1].start.offset > 1
? 2
: 1
openingSequence = {
type: use > 1 ? types.strongSequence : types.emphasisSequence,
start: movePoint(shallow(events[open][1].end), -use),
end: shallow(events[open][1].end)
}
closingSequence = {
type: use > 1 ? types.strongSequence : types.emphasisSequence,
start: shallow(events[index][1].start),
end: movePoint(shallow(events[index][1].start), use)
}
text = {
type: use > 1 ? types.strongText : types.emphasisText,
start: shallow(events[open][1].end),
end: shallow(events[index][1].start)
}
group = {
type: use > 1 ? types.strong : types.emphasis,
start: shallow(openingSequence.start),
end: shallow(closingSequence.end)
}
events[open][1].end = shallow(openingSequence.start)
events[index][1].start = shallow(closingSequence.end)
nextEvents = []
// If there are more markers in the opening, add them before.
if (events[open][1].end.offset - events[open][1].start.offset) {
nextEvents = chunkedPush(nextEvents, [
['enter', events[open][1], context],
['exit', events[open][1], context]
])
}
// Opening.
nextEvents = chunkedPush(nextEvents, [
['enter', group, context],
['enter', openingSequence, context],
['exit', openingSequence, context],
['enter', text, context]
])
// Between.
nextEvents = chunkedPush(
nextEvents,
resolveAll(
context.parser.constructs.insideSpan.null,
events.slice(open + 1, index),
context
)
)
// Closing.
nextEvents = chunkedPush(nextEvents, [
['exit', text, context],
['enter', closingSequence, context],
['exit', closingSequence, context],
['exit', group, context]
])
// If there are more markers in the closing, add them after.
if (events[index][1].end.offset - events[index][1].start.offset) {
offset = 2
nextEvents = chunkedPush(nextEvents, [
['enter', events[index][1], context],
['exit', events[index][1], context]
])
} else {
offset = 0
}
chunkedSplice(events, open - 1, index - open + 3, nextEvents)
index = open + nextEvents.length - offset - 2
break
}
}
}
}
// Remove remaining sequences.
index = -1
while (++index < events.length) {
if (events[index][1].type === 'attentionSequence') {
events[index][1].type = 'data'
}
}
return events
}
function tokenizeAttention(effects, ok) {
var before = classifyCharacter(this.previous)
var marker
return start
function start(code) {
assert__default['default'](
code === codes.asterisk || code === codes.underscore,
'expected asterisk or underscore'
)
effects.enter('attentionSequence')
marker = code
return sequence(code)
}
function sequence(code) {
var token
var after
var open
var close
if (code === marker) {
effects.consume(code)
return sequence
}
token = effects.exit('attentionSequence')
after = classifyCharacter(code)
open = !after || (after === constants.characterGroupPunctuation && before)
close = !before || (before === constants.characterGroupPunctuation && after)
token._open = marker === codes.asterisk ? open : open && (before || !close)
token._close = marker === codes.asterisk ? close : close && (after || !open)
return ok(code)
}
}
module.exports = attention

207
node_modules/micromark/lib/tokenize/attention.mjs generated vendored Normal file
View File

@@ -0,0 +1,207 @@
var attention = {
name: 'attention',
tokenize: tokenizeAttention,
resolveAll: resolveAllAttention
}
export default attention
import assert from 'assert'
import codes from '../character/codes.mjs'
import constants from '../constant/constants.mjs'
import types from '../constant/types.mjs'
import chunkedPush from '../util/chunked-push.mjs'
import chunkedSplice from '../util/chunked-splice.mjs'
import classifyCharacter from '../util/classify-character.mjs'
import movePoint from '../util/move-point.mjs'
import resolveAll from '../util/resolve-all.mjs'
import shallow from '../util/shallow.mjs'
// Take all events and resolve attention to emphasis or strong.
function resolveAllAttention(events, context) {
var index = -1
var open
var group
var text
var openingSequence
var closingSequence
var use
var nextEvents
var offset
// Walk through all events.
//
// Note: performance of this is fine on an mb of normal markdown, but its
// a bottleneck for malicious stuff.
while (++index < events.length) {
// Find a token that can close.
if (
events[index][0] === 'enter' &&
events[index][1].type === 'attentionSequence' &&
events[index][1]._close
) {
open = index
// Now walk back to find an opener.
while (open--) {
// Find a token that can open the closer.
if (
events[open][0] === 'exit' &&
events[open][1].type === 'attentionSequence' &&
events[open][1]._open &&
// If the markers are the same:
context.sliceSerialize(events[open][1]).charCodeAt(0) ===
context.sliceSerialize(events[index][1]).charCodeAt(0)
) {
// If the opening can close or the closing can open,
// and the close size *is not* a multiple of three,
// but the sum of the opening and closing size *is* multiple of three,
// then dont match.
if (
(events[open][1]._close || events[index][1]._open) &&
(events[index][1].end.offset - events[index][1].start.offset) % 3 &&
!(
(events[open][1].end.offset -
events[open][1].start.offset +
events[index][1].end.offset -
events[index][1].start.offset) %
3
)
) {
continue
}
// Number of markers to use from the sequence.
use =
events[open][1].end.offset - events[open][1].start.offset > 1 &&
events[index][1].end.offset - events[index][1].start.offset > 1
? 2
: 1
openingSequence = {
type: use > 1 ? types.strongSequence : types.emphasisSequence,
start: movePoint(shallow(events[open][1].end), -use),
end: shallow(events[open][1].end)
}
closingSequence = {
type: use > 1 ? types.strongSequence : types.emphasisSequence,
start: shallow(events[index][1].start),
end: movePoint(shallow(events[index][1].start), use)
}
text = {
type: use > 1 ? types.strongText : types.emphasisText,
start: shallow(events[open][1].end),
end: shallow(events[index][1].start)
}
group = {
type: use > 1 ? types.strong : types.emphasis,
start: shallow(openingSequence.start),
end: shallow(closingSequence.end)
}
events[open][1].end = shallow(openingSequence.start)
events[index][1].start = shallow(closingSequence.end)
nextEvents = []
// If there are more markers in the opening, add them before.
if (events[open][1].end.offset - events[open][1].start.offset) {
nextEvents = chunkedPush(nextEvents, [
['enter', events[open][1], context],
['exit', events[open][1], context]
])
}
// Opening.
nextEvents = chunkedPush(nextEvents, [
['enter', group, context],
['enter', openingSequence, context],
['exit', openingSequence, context],
['enter', text, context]
])
// Between.
nextEvents = chunkedPush(
nextEvents,
resolveAll(
context.parser.constructs.insideSpan.null,
events.slice(open + 1, index),
context
)
)
// Closing.
nextEvents = chunkedPush(nextEvents, [
['exit', text, context],
['enter', closingSequence, context],
['exit', closingSequence, context],
['exit', group, context]
])
// If there are more markers in the closing, add them after.
if (events[index][1].end.offset - events[index][1].start.offset) {
offset = 2
nextEvents = chunkedPush(nextEvents, [
['enter', events[index][1], context],
['exit', events[index][1], context]
])
} else {
offset = 0
}
chunkedSplice(events, open - 1, index - open + 3, nextEvents)
index = open + nextEvents.length - offset - 2
break
}
}
}
}
// Remove remaining sequences.
index = -1
while (++index < events.length) {
if (events[index][1].type === 'attentionSequence') {
events[index][1].type = 'data'
}
}
return events
}
function tokenizeAttention(effects, ok) {
var before = classifyCharacter(this.previous)
var marker
return start
function start(code) {
assert(
code === codes.asterisk || code === codes.underscore,
'expected asterisk or underscore'
)
effects.enter('attentionSequence')
marker = code
return sequence(code)
}
function sequence(code) {
var token
var after
var open
var close
if (code === marker) {
effects.consume(code)
return sequence
}
token = effects.exit('attentionSequence')
after = classifyCharacter(code)
open = !after || (after === constants.characterGroupPunctuation && before)
close = !before || (before === constants.characterGroupPunctuation && after)
token._open = marker === codes.asterisk ? open : open && (before || !close)
token._close = marker === codes.asterisk ? close : close && (after || !open)
return ok(code)
}
}

147
node_modules/micromark/lib/tokenize/autolink.js generated vendored Normal file
View File

@@ -0,0 +1,147 @@
'use strict'
var assert = require('assert')
var asciiAlpha = require('../character/ascii-alpha.js')
var asciiAlphanumeric = require('../character/ascii-alphanumeric.js')
var asciiAtext = require('../character/ascii-atext.js')
var asciiControl = require('../character/ascii-control.js')
var codes = require('../character/codes.js')
var constants = require('../constant/constants.js')
var types = require('../constant/types.js')
function _interopDefaultLegacy(e) {
return e && typeof e === 'object' && 'default' in e ? e : {default: e}
}
var assert__default = /*#__PURE__*/ _interopDefaultLegacy(assert)
var autolink = {
name: 'autolink',
tokenize: tokenizeAutolink
}
function tokenizeAutolink(effects, ok, nok) {
var size = 1
return start
function start(code) {
assert__default['default'](code === codes.lessThan, 'expected `<`')
effects.enter(types.autolink)
effects.enter(types.autolinkMarker)
effects.consume(code)
effects.exit(types.autolinkMarker)
effects.enter(types.autolinkProtocol)
return open
}
function open(code) {
if (asciiAlpha(code)) {
effects.consume(code)
return schemeOrEmailAtext
}
return asciiAtext(code) ? emailAtext(code) : nok(code)
}
function schemeOrEmailAtext(code) {
return code === codes.plusSign ||
code === codes.dash ||
code === codes.dot ||
asciiAlphanumeric(code)
? schemeInsideOrEmailAtext(code)
: emailAtext(code)
}
function schemeInsideOrEmailAtext(code) {
if (code === codes.colon) {
effects.consume(code)
return urlInside
}
if (
(code === codes.plusSign ||
code === codes.dash ||
code === codes.dot ||
asciiAlphanumeric(code)) &&
size++ < constants.autolinkSchemeSizeMax
) {
effects.consume(code)
return schemeInsideOrEmailAtext
}
return emailAtext(code)
}
function urlInside(code) {
if (code === codes.greaterThan) {
effects.exit(types.autolinkProtocol)
return end(code)
}
if (code === codes.space || code === codes.lessThan || asciiControl(code)) {
return nok(code)
}
effects.consume(code)
return urlInside
}
function emailAtext(code) {
if (code === codes.atSign) {
effects.consume(code)
size = 0
return emailAtSignOrDot
}
if (asciiAtext(code)) {
effects.consume(code)
return emailAtext
}
return nok(code)
}
function emailAtSignOrDot(code) {
return asciiAlphanumeric(code) ? emailLabel(code) : nok(code)
}
function emailLabel(code) {
if (code === codes.dot) {
effects.consume(code)
size = 0
return emailAtSignOrDot
}
if (code === codes.greaterThan) {
// Exit, then change the type.
effects.exit(types.autolinkProtocol).type = types.autolinkEmail
return end(code)
}
return emailValue(code)
}
function emailValue(code) {
if (
(code === codes.dash || asciiAlphanumeric(code)) &&
size++ < constants.autolinkDomainSizeMax
) {
effects.consume(code)
return code === codes.dash ? emailValue : emailLabel
}
return nok(code)
}
function end(code) {
assert__default['default'].equal(code, codes.greaterThan, 'expected `>`')
effects.enter(types.autolinkMarker)
effects.consume(code)
effects.exit(types.autolinkMarker)
effects.exit(types.autolink)
return ok
}
}
module.exports = autolink

138
node_modules/micromark/lib/tokenize/autolink.mjs generated vendored Normal file
View File

@@ -0,0 +1,138 @@
var autolink = {
name: 'autolink',
tokenize: tokenizeAutolink
}
export default autolink
import assert from 'assert'
import asciiAlpha from '../character/ascii-alpha.mjs'
import asciiAlphanumeric from '../character/ascii-alphanumeric.mjs'
import asciiAtext from '../character/ascii-atext.mjs'
import asciiControl from '../character/ascii-control.mjs'
import codes from '../character/codes.mjs'
import constants from '../constant/constants.mjs'
import types from '../constant/types.mjs'
function tokenizeAutolink(effects, ok, nok) {
var size = 1
return start
function start(code) {
assert(code === codes.lessThan, 'expected `<`')
effects.enter(types.autolink)
effects.enter(types.autolinkMarker)
effects.consume(code)
effects.exit(types.autolinkMarker)
effects.enter(types.autolinkProtocol)
return open
}
function open(code) {
if (asciiAlpha(code)) {
effects.consume(code)
return schemeOrEmailAtext
}
return asciiAtext(code) ? emailAtext(code) : nok(code)
}
function schemeOrEmailAtext(code) {
return code === codes.plusSign ||
code === codes.dash ||
code === codes.dot ||
asciiAlphanumeric(code)
? schemeInsideOrEmailAtext(code)
: emailAtext(code)
}
function schemeInsideOrEmailAtext(code) {
if (code === codes.colon) {
effects.consume(code)
return urlInside
}
if (
(code === codes.plusSign ||
code === codes.dash ||
code === codes.dot ||
asciiAlphanumeric(code)) &&
size++ < constants.autolinkSchemeSizeMax
) {
effects.consume(code)
return schemeInsideOrEmailAtext
}
return emailAtext(code)
}
function urlInside(code) {
if (code === codes.greaterThan) {
effects.exit(types.autolinkProtocol)
return end(code)
}
if (code === codes.space || code === codes.lessThan || asciiControl(code)) {
return nok(code)
}
effects.consume(code)
return urlInside
}
function emailAtext(code) {
if (code === codes.atSign) {
effects.consume(code)
size = 0
return emailAtSignOrDot
}
if (asciiAtext(code)) {
effects.consume(code)
return emailAtext
}
return nok(code)
}
function emailAtSignOrDot(code) {
return asciiAlphanumeric(code) ? emailLabel(code) : nok(code)
}
function emailLabel(code) {
if (code === codes.dot) {
effects.consume(code)
size = 0
return emailAtSignOrDot
}
if (code === codes.greaterThan) {
// Exit, then change the type.
effects.exit(types.autolinkProtocol).type = types.autolinkEmail
return end(code)
}
return emailValue(code)
}
function emailValue(code) {
if (
(code === codes.dash || asciiAlphanumeric(code)) &&
size++ < constants.autolinkDomainSizeMax
) {
effects.consume(code)
return code === codes.dash ? emailValue : emailLabel
}
return nok(code)
}
function end(code) {
assert.equal(code, codes.greaterThan, 'expected `>`')
effects.enter(types.autolinkMarker)
effects.consume(code)
effects.exit(types.autolinkMarker)
effects.exit(types.autolink)
return ok
}
}

67
node_modules/micromark/lib/tokenize/block-quote.js generated vendored Normal file
View File

@@ -0,0 +1,67 @@
'use strict'
var codes = require('../character/codes.js')
var markdownSpace = require('../character/markdown-space.js')
var constants = require('../constant/constants.js')
var types = require('../constant/types.js')
var factorySpace = require('./factory-space.js')
var blockQuote = {
name: 'blockQuote',
tokenize: tokenizeBlockQuoteStart,
continuation: {tokenize: tokenizeBlockQuoteContinuation},
exit: exit
}
function tokenizeBlockQuoteStart(effects, ok, nok) {
var self = this
return start
function start(code) {
if (code === codes.greaterThan) {
if (!self.containerState.open) {
effects.enter(types.blockQuote, {_container: true})
self.containerState.open = true
}
effects.enter(types.blockQuotePrefix)
effects.enter(types.blockQuoteMarker)
effects.consume(code)
effects.exit(types.blockQuoteMarker)
return after
}
return nok(code)
}
function after(code) {
if (markdownSpace(code)) {
effects.enter(types.blockQuotePrefixWhitespace)
effects.consume(code)
effects.exit(types.blockQuotePrefixWhitespace)
effects.exit(types.blockQuotePrefix)
return ok
}
effects.exit(types.blockQuotePrefix)
return ok(code)
}
}
function tokenizeBlockQuoteContinuation(effects, ok, nok) {
return factorySpace(
effects,
effects.attempt(blockQuote, ok, nok),
types.linePrefix,
this.parser.constructs.disable.null.indexOf('codeIndented') > -1
? undefined
: constants.tabSize
)
}
function exit(effects) {
effects.exit(types.blockQuote)
}
module.exports = blockQuote

64
node_modules/micromark/lib/tokenize/block-quote.mjs generated vendored Normal file
View File

@@ -0,0 +1,64 @@
var blockQuote = {
name: 'blockQuote',
tokenize: tokenizeBlockQuoteStart,
continuation: {tokenize: tokenizeBlockQuoteContinuation},
exit: exit
}
export default blockQuote
import codes from '../character/codes.mjs'
import markdownSpace from '../character/markdown-space.mjs'
import constants from '../constant/constants.mjs'
import types from '../constant/types.mjs'
import spaceFactory from './factory-space.mjs'
function tokenizeBlockQuoteStart(effects, ok, nok) {
var self = this
return start
function start(code) {
if (code === codes.greaterThan) {
if (!self.containerState.open) {
effects.enter(types.blockQuote, {_container: true})
self.containerState.open = true
}
effects.enter(types.blockQuotePrefix)
effects.enter(types.blockQuoteMarker)
effects.consume(code)
effects.exit(types.blockQuoteMarker)
return after
}
return nok(code)
}
function after(code) {
if (markdownSpace(code)) {
effects.enter(types.blockQuotePrefixWhitespace)
effects.consume(code)
effects.exit(types.blockQuotePrefixWhitespace)
effects.exit(types.blockQuotePrefix)
return ok
}
effects.exit(types.blockQuotePrefix)
return ok(code)
}
}
function tokenizeBlockQuoteContinuation(effects, ok, nok) {
return spaceFactory(
effects,
effects.attempt(blockQuote, ok, nok),
types.linePrefix,
this.parser.constructs.disable.null.indexOf('codeIndented') > -1
? undefined
: constants.tabSize
)
}
function exit(effects) {
effects.exit(types.blockQuote)
}

View File

@@ -0,0 +1,44 @@
'use strict'
var assert = require('assert')
var asciiPunctuation = require('../character/ascii-punctuation.js')
var codes = require('../character/codes.js')
var types = require('../constant/types.js')
function _interopDefaultLegacy(e) {
return e && typeof e === 'object' && 'default' in e ? e : {default: e}
}
var assert__default = /*#__PURE__*/ _interopDefaultLegacy(assert)
var characterEscape = {
name: 'characterEscape',
tokenize: tokenizeCharacterEscape
}
function tokenizeCharacterEscape(effects, ok, nok) {
return start
function start(code) {
assert__default['default'](code === codes.backslash, 'expected `\\`')
effects.enter(types.characterEscape)
effects.enter(types.escapeMarker)
effects.consume(code)
effects.exit(types.escapeMarker)
return open
}
function open(code) {
if (asciiPunctuation(code)) {
effects.enter(types.characterEscapeValue)
effects.consume(code)
effects.exit(types.characterEscapeValue)
effects.exit(types.characterEscape)
return ok
}
return nok(code)
}
}
module.exports = characterEscape

View File

@@ -0,0 +1,35 @@
var characterEscape = {
name: 'characterEscape',
tokenize: tokenizeCharacterEscape
}
export default characterEscape
import assert from 'assert'
import asciiPunctuation from '../character/ascii-punctuation.mjs'
import codes from '../character/codes.mjs'
import types from '../constant/types.mjs'
function tokenizeCharacterEscape(effects, ok, nok) {
return start
function start(code) {
assert(code === codes.backslash, 'expected `\\`')
effects.enter(types.characterEscape)
effects.enter(types.escapeMarker)
effects.consume(code)
effects.exit(types.escapeMarker)
return open
}
function open(code) {
if (asciiPunctuation(code)) {
effects.enter(types.characterEscapeValue)
effects.consume(code)
effects.exit(types.characterEscapeValue)
effects.exit(types.characterEscape)
return ok
}
return nok(code)
}
}

View File

@@ -0,0 +1,101 @@
'use strict'
var assert = require('assert')
var decodeEntity = require('parse-entities/decode-entity.js')
var asciiAlphanumeric = require('../character/ascii-alphanumeric.js')
var asciiDigit = require('../character/ascii-digit.js')
var asciiHexDigit = require('../character/ascii-hex-digit.js')
var codes = require('../character/codes.js')
var constants = require('../constant/constants.js')
var types = require('../constant/types.js')
function _interopDefaultLegacy(e) {
return e && typeof e === 'object' && 'default' in e ? e : {default: e}
}
var assert__default = /*#__PURE__*/ _interopDefaultLegacy(assert)
var decodeEntity__default = /*#__PURE__*/ _interopDefaultLegacy(decodeEntity)
var characterReference = {
name: 'characterReference',
tokenize: tokenizeCharacterReference
}
function tokenizeCharacterReference(effects, ok, nok) {
var self = this
var size = 0
var max
var test
return start
function start(code) {
assert__default['default'](code === codes.ampersand, 'expected `&`')
effects.enter(types.characterReference)
effects.enter(types.characterReferenceMarker)
effects.consume(code)
effects.exit(types.characterReferenceMarker)
return open
}
function open(code) {
if (code === codes.numberSign) {
effects.enter(types.characterReferenceMarkerNumeric)
effects.consume(code)
effects.exit(types.characterReferenceMarkerNumeric)
return numeric
}
effects.enter(types.characterReferenceValue)
max = constants.characterReferenceNamedSizeMax
test = asciiAlphanumeric
return value(code)
}
function numeric(code) {
if (code === codes.uppercaseX || code === codes.lowercaseX) {
effects.enter(types.characterReferenceMarkerHexadecimal)
effects.consume(code)
effects.exit(types.characterReferenceMarkerHexadecimal)
effects.enter(types.characterReferenceValue)
max = constants.characterReferenceHexadecimalSizeMax
test = asciiHexDigit
return value
}
effects.enter(types.characterReferenceValue)
max = constants.characterReferenceDecimalSizeMax
test = asciiDigit
return value(code)
}
function value(code) {
var token
if (code === codes.semicolon && size) {
token = effects.exit(types.characterReferenceValue)
if (
test === asciiAlphanumeric &&
!decodeEntity__default['default'](self.sliceSerialize(token))
) {
return nok(code)
}
effects.enter(types.characterReferenceMarker)
effects.consume(code)
effects.exit(types.characterReferenceMarker)
effects.exit(types.characterReference)
return ok
}
if (test(code) && size++ < max) {
effects.consume(code)
return value
}
return nok(code)
}
}
module.exports = characterReference

View File

@@ -0,0 +1,88 @@
var characterReference = {
name: 'characterReference',
tokenize: tokenizeCharacterReference
}
export default characterReference
import assert from 'assert'
import decode from 'parse-entities/decode-entity.js'
import asciiAlphanumeric from '../character/ascii-alphanumeric.mjs'
import asciiDigit from '../character/ascii-digit.mjs'
import asciiHexDigit from '../character/ascii-hex-digit.mjs'
import codes from '../character/codes.mjs'
import constants from '../constant/constants.mjs'
import types from '../constant/types.mjs'
function tokenizeCharacterReference(effects, ok, nok) {
var self = this
var size = 0
var max
var test
return start
function start(code) {
assert(code === codes.ampersand, 'expected `&`')
effects.enter(types.characterReference)
effects.enter(types.characterReferenceMarker)
effects.consume(code)
effects.exit(types.characterReferenceMarker)
return open
}
function open(code) {
if (code === codes.numberSign) {
effects.enter(types.characterReferenceMarkerNumeric)
effects.consume(code)
effects.exit(types.characterReferenceMarkerNumeric)
return numeric
}
effects.enter(types.characterReferenceValue)
max = constants.characterReferenceNamedSizeMax
test = asciiAlphanumeric
return value(code)
}
function numeric(code) {
if (code === codes.uppercaseX || code === codes.lowercaseX) {
effects.enter(types.characterReferenceMarkerHexadecimal)
effects.consume(code)
effects.exit(types.characterReferenceMarkerHexadecimal)
effects.enter(types.characterReferenceValue)
max = constants.characterReferenceHexadecimalSizeMax
test = asciiHexDigit
return value
}
effects.enter(types.characterReferenceValue)
max = constants.characterReferenceDecimalSizeMax
test = asciiDigit
return value(code)
}
function value(code) {
var token
if (code === codes.semicolon && size) {
token = effects.exit(types.characterReferenceValue)
if (test === asciiAlphanumeric && !decode(self.sliceSerialize(token))) {
return nok(code)
}
effects.enter(types.characterReferenceMarker)
effects.consume(code)
effects.exit(types.characterReferenceMarker)
effects.exit(types.characterReference)
return ok
}
if (test(code) && size++ < max) {
effects.consume(code)
return value
}
return nok(code)
}
}

185
node_modules/micromark/lib/tokenize/code-fenced.js generated vendored Normal file
View File

@@ -0,0 +1,185 @@
'use strict'
var assert = require('assert')
var codes = require('../character/codes.js')
var markdownLineEnding = require('../character/markdown-line-ending.js')
var markdownLineEndingOrSpace = require('../character/markdown-line-ending-or-space.js')
var constants = require('../constant/constants.js')
var types = require('../constant/types.js')
var prefixSize = require('../util/prefix-size.js')
var factorySpace = require('./factory-space.js')
function _interopDefaultLegacy(e) {
return e && typeof e === 'object' && 'default' in e ? e : {default: e}
}
var assert__default = /*#__PURE__*/ _interopDefaultLegacy(assert)
var codeFenced = {
name: 'codeFenced',
tokenize: tokenizeCodeFenced,
concrete: true
}
function tokenizeCodeFenced(effects, ok, nok) {
var self = this
var closingFenceConstruct = {tokenize: tokenizeClosingFence, partial: true}
var initialPrefix = prefixSize(this.events, types.linePrefix)
var sizeOpen = 0
var marker
return start
function start(code) {
assert__default['default'](
code === codes.graveAccent || code === codes.tilde,
'expected `` ` `` or `~`'
)
effects.enter(types.codeFenced)
effects.enter(types.codeFencedFence)
effects.enter(types.codeFencedFenceSequence)
marker = code
return sequenceOpen(code)
}
function sequenceOpen(code) {
if (code === marker) {
effects.consume(code)
sizeOpen++
return sequenceOpen
}
effects.exit(types.codeFencedFenceSequence)
return sizeOpen < constants.codeFencedSequenceSizeMin
? nok(code)
: factorySpace(effects, infoOpen, types.whitespace)(code)
}
function infoOpen(code) {
if (code === codes.eof || markdownLineEnding(code)) {
return openAfter(code)
}
effects.enter(types.codeFencedFenceInfo)
effects.enter(types.chunkString, {contentType: constants.contentTypeString})
return info(code)
}
function info(code) {
if (code === codes.eof || markdownLineEndingOrSpace(code)) {
effects.exit(types.chunkString)
effects.exit(types.codeFencedFenceInfo)
return factorySpace(effects, infoAfter, types.whitespace)(code)
}
if (code === codes.graveAccent && code === marker) return nok(code)
effects.consume(code)
return info
}
function infoAfter(code) {
if (code === codes.eof || markdownLineEnding(code)) {
return openAfter(code)
}
effects.enter(types.codeFencedFenceMeta)
effects.enter(types.chunkString, {contentType: constants.contentTypeString})
return meta(code)
}
function meta(code) {
if (code === codes.eof || markdownLineEnding(code)) {
effects.exit(types.chunkString)
effects.exit(types.codeFencedFenceMeta)
return openAfter(code)
}
if (code === codes.graveAccent && code === marker) return nok(code)
effects.consume(code)
return meta
}
function openAfter(code) {
effects.exit(types.codeFencedFence)
return self.interrupt ? ok(code) : content(code)
}
function content(code) {
if (code === codes.eof) {
return after(code)
}
if (markdownLineEnding(code)) {
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
return effects.attempt(
closingFenceConstruct,
after,
initialPrefix
? factorySpace(effects, content, types.linePrefix, initialPrefix + 1)
: content
)
}
effects.enter(types.codeFlowValue)
return contentContinue(code)
}
function contentContinue(code) {
if (code === codes.eof || markdownLineEnding(code)) {
effects.exit(types.codeFlowValue)
return content(code)
}
effects.consume(code)
return contentContinue
}
function after(code) {
effects.exit(types.codeFenced)
return ok(code)
}
function tokenizeClosingFence(effects, ok, nok) {
var size = 0
return factorySpace(
effects,
closingSequenceStart,
types.linePrefix,
this.parser.constructs.disable.null.indexOf('codeIndented') > -1
? undefined
: constants.tabSize
)
function closingSequenceStart(code) {
effects.enter(types.codeFencedFence)
effects.enter(types.codeFencedFenceSequence)
return closingSequence(code)
}
function closingSequence(code) {
if (code === marker) {
effects.consume(code)
size++
return closingSequence
}
if (size < sizeOpen) return nok(code)
effects.exit(types.codeFencedFenceSequence)
return factorySpace(effects, closingSequenceEnd, types.whitespace)(code)
}
function closingSequenceEnd(code) {
if (code === codes.eof || markdownLineEnding(code)) {
effects.exit(types.codeFencedFence)
return ok(code)
}
return nok(code)
}
}
}
module.exports = codeFenced

176
node_modules/micromark/lib/tokenize/code-fenced.mjs generated vendored Normal file
View File

@@ -0,0 +1,176 @@
var codeFenced = {
name: 'codeFenced',
tokenize: tokenizeCodeFenced,
concrete: true
}
export default codeFenced
import assert from 'assert'
import codes from '../character/codes.mjs'
import markdownLineEnding from '../character/markdown-line-ending.mjs'
import markdownLineEndingOrSpace from '../character/markdown-line-ending-or-space.mjs'
import constants from '../constant/constants.mjs'
import types from '../constant/types.mjs'
import prefixSize from '../util/prefix-size.mjs'
import spaceFactory from './factory-space.mjs'
function tokenizeCodeFenced(effects, ok, nok) {
var self = this
var closingFenceConstruct = {tokenize: tokenizeClosingFence, partial: true}
var initialPrefix = prefixSize(this.events, types.linePrefix)
var sizeOpen = 0
var marker
return start
function start(code) {
assert(
code === codes.graveAccent || code === codes.tilde,
'expected `` ` `` or `~`'
)
effects.enter(types.codeFenced)
effects.enter(types.codeFencedFence)
effects.enter(types.codeFencedFenceSequence)
marker = code
return sequenceOpen(code)
}
function sequenceOpen(code) {
if (code === marker) {
effects.consume(code)
sizeOpen++
return sequenceOpen
}
effects.exit(types.codeFencedFenceSequence)
return sizeOpen < constants.codeFencedSequenceSizeMin
? nok(code)
: spaceFactory(effects, infoOpen, types.whitespace)(code)
}
function infoOpen(code) {
if (code === codes.eof || markdownLineEnding(code)) {
return openAfter(code)
}
effects.enter(types.codeFencedFenceInfo)
effects.enter(types.chunkString, {contentType: constants.contentTypeString})
return info(code)
}
function info(code) {
if (code === codes.eof || markdownLineEndingOrSpace(code)) {
effects.exit(types.chunkString)
effects.exit(types.codeFencedFenceInfo)
return spaceFactory(effects, infoAfter, types.whitespace)(code)
}
if (code === codes.graveAccent && code === marker) return nok(code)
effects.consume(code)
return info
}
function infoAfter(code) {
if (code === codes.eof || markdownLineEnding(code)) {
return openAfter(code)
}
effects.enter(types.codeFencedFenceMeta)
effects.enter(types.chunkString, {contentType: constants.contentTypeString})
return meta(code)
}
function meta(code) {
if (code === codes.eof || markdownLineEnding(code)) {
effects.exit(types.chunkString)
effects.exit(types.codeFencedFenceMeta)
return openAfter(code)
}
if (code === codes.graveAccent && code === marker) return nok(code)
effects.consume(code)
return meta
}
function openAfter(code) {
effects.exit(types.codeFencedFence)
return self.interrupt ? ok(code) : content(code)
}
function content(code) {
if (code === codes.eof) {
return after(code)
}
if (markdownLineEnding(code)) {
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
return effects.attempt(
closingFenceConstruct,
after,
initialPrefix
? spaceFactory(effects, content, types.linePrefix, initialPrefix + 1)
: content
)
}
effects.enter(types.codeFlowValue)
return contentContinue(code)
}
function contentContinue(code) {
if (code === codes.eof || markdownLineEnding(code)) {
effects.exit(types.codeFlowValue)
return content(code)
}
effects.consume(code)
return contentContinue
}
function after(code) {
effects.exit(types.codeFenced)
return ok(code)
}
function tokenizeClosingFence(effects, ok, nok) {
var size = 0
return spaceFactory(
effects,
closingSequenceStart,
types.linePrefix,
this.parser.constructs.disable.null.indexOf('codeIndented') > -1
? undefined
: constants.tabSize
)
function closingSequenceStart(code) {
effects.enter(types.codeFencedFence)
effects.enter(types.codeFencedFenceSequence)
return closingSequence(code)
}
function closingSequence(code) {
if (code === marker) {
effects.consume(code)
size++
return closingSequence
}
if (size < sizeOpen) return nok(code)
effects.exit(types.codeFencedFenceSequence)
return spaceFactory(effects, closingSequenceEnd, types.whitespace)(code)
}
function closingSequenceEnd(code) {
if (code === codes.eof || markdownLineEnding(code)) {
effects.exit(types.codeFencedFence)
return ok(code)
}
return nok(code)
}
}
}

91
node_modules/micromark/lib/tokenize/code-indented.js generated vendored Normal file
View File

@@ -0,0 +1,91 @@
'use strict'
var codes = require('../character/codes.js')
var markdownLineEnding = require('../character/markdown-line-ending.js')
var constants = require('../constant/constants.js')
var types = require('../constant/types.js')
var chunkedSplice = require('../util/chunked-splice.js')
var prefixSize = require('../util/prefix-size.js')
var factorySpace = require('./factory-space.js')
var codeIndented = {
name: 'codeIndented',
tokenize: tokenizeCodeIndented,
resolve: resolveCodeIndented
}
var indentedContentConstruct = {
tokenize: tokenizeIndentedContent,
partial: true
}
function resolveCodeIndented(events, context) {
var code = {
type: types.codeIndented,
start: events[0][1].start,
end: events[events.length - 1][1].end
}
chunkedSplice(events, 0, 0, [['enter', code, context]])
chunkedSplice(events, events.length, 0, [['exit', code, context]])
return events
}
function tokenizeCodeIndented(effects, ok, nok) {
return effects.attempt(indentedContentConstruct, afterPrefix, nok)
function afterPrefix(code) {
if (code === codes.eof) {
return ok(code)
}
if (markdownLineEnding(code)) {
return effects.attempt(indentedContentConstruct, afterPrefix, ok)(code)
}
effects.enter(types.codeFlowValue)
return content(code)
}
function content(code) {
if (code === codes.eof || markdownLineEnding(code)) {
effects.exit(types.codeFlowValue)
return afterPrefix(code)
}
effects.consume(code)
return content
}
}
function tokenizeIndentedContent(effects, ok, nok) {
var self = this
return factorySpace(
effects,
afterPrefix,
types.linePrefix,
constants.tabSize + 1
)
function afterPrefix(code) {
if (markdownLineEnding(code)) {
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
return factorySpace(
effects,
afterPrefix,
types.linePrefix,
constants.tabSize + 1
)
}
return prefixSize(self.events, types.linePrefix) < constants.tabSize
? nok(code)
: ok(code)
}
}
module.exports = codeIndented

88
node_modules/micromark/lib/tokenize/code-indented.mjs generated vendored Normal file
View File

@@ -0,0 +1,88 @@
var codeIndented = {
name: 'codeIndented',
tokenize: tokenizeCodeIndented,
resolve: resolveCodeIndented
}
export default codeIndented
import codes from '../character/codes.mjs'
import markdownLineEnding from '../character/markdown-line-ending.mjs'
import constants from '../constant/constants.mjs'
import types from '../constant/types.mjs'
import chunkedSplice from '../util/chunked-splice.mjs'
import prefixSize from '../util/prefix-size.mjs'
import spaceFactory from './factory-space.mjs'
var indentedContentConstruct = {
tokenize: tokenizeIndentedContent,
partial: true
}
function resolveCodeIndented(events, context) {
var code = {
type: types.codeIndented,
start: events[0][1].start,
end: events[events.length - 1][1].end
}
chunkedSplice(events, 0, 0, [['enter', code, context]])
chunkedSplice(events, events.length, 0, [['exit', code, context]])
return events
}
function tokenizeCodeIndented(effects, ok, nok) {
return effects.attempt(indentedContentConstruct, afterPrefix, nok)
function afterPrefix(code) {
if (code === codes.eof) {
return ok(code)
}
if (markdownLineEnding(code)) {
return effects.attempt(indentedContentConstruct, afterPrefix, ok)(code)
}
effects.enter(types.codeFlowValue)
return content(code)
}
function content(code) {
if (code === codes.eof || markdownLineEnding(code)) {
effects.exit(types.codeFlowValue)
return afterPrefix(code)
}
effects.consume(code)
return content
}
}
function tokenizeIndentedContent(effects, ok, nok) {
var self = this
return spaceFactory(
effects,
afterPrefix,
types.linePrefix,
constants.tabSize + 1
)
function afterPrefix(code) {
if (markdownLineEnding(code)) {
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
return spaceFactory(
effects,
afterPrefix,
types.linePrefix,
constants.tabSize + 1
)
}
return prefixSize(self.events, types.linePrefix) < constants.tabSize
? nok(code)
: ok(code)
}
}

191
node_modules/micromark/lib/tokenize/code-text.js generated vendored Normal file
View File

@@ -0,0 +1,191 @@
'use strict'
var assert = require('assert')
var codes = require('../character/codes.js')
var markdownLineEnding = require('../character/markdown-line-ending.js')
var types = require('../constant/types.js')
function _interopDefaultLegacy(e) {
return e && typeof e === 'object' && 'default' in e ? e : {default: e}
}
var assert__default = /*#__PURE__*/ _interopDefaultLegacy(assert)
var codeText = {
name: 'codeText',
tokenize: tokenizeCodeText,
resolve: resolveCodeText,
previous: previous
}
function resolveCodeText(events) {
var tailExitIndex = events.length - 4
var headEnterIndex = 3
var index
var enter
// If we start and end with an EOL or a space.
if (
(events[headEnterIndex][1].type === types.lineEnding ||
events[headEnterIndex][1].type === 'space') &&
(events[tailExitIndex][1].type === types.lineEnding ||
events[tailExitIndex][1].type === 'space')
) {
index = headEnterIndex
// And we have data.
while (++index < tailExitIndex) {
if (events[index][1].type === types.codeTextData) {
// Then we have padding.
events[tailExitIndex][1].type = events[headEnterIndex][1].type =
types.codeTextPadding
headEnterIndex += 2
tailExitIndex -= 2
break
}
}
}
// Merge adjacent spaces and data.
index = headEnterIndex - 1
tailExitIndex++
while (++index <= tailExitIndex) {
if (enter === undefined) {
if (
index !== tailExitIndex &&
events[index][1].type !== types.lineEnding
) {
enter = index
}
} else if (
index === tailExitIndex ||
events[index][1].type === types.lineEnding
) {
events[enter][1].type = types.codeTextData
if (index !== enter + 2) {
events[enter][1].end = events[index - 1][1].end
events.splice(enter + 2, index - enter - 2)
tailExitIndex -= index - enter - 2
index = enter + 2
}
enter = undefined
}
}
return events
}
function previous(code) {
// If there is a previous code, there will always be a tail.
return (
code !== codes.graveAccent ||
this.events[this.events.length - 1][1].type === types.characterEscape
)
}
function tokenizeCodeText(effects, ok, nok) {
var self = this
var sizeOpen = 0
var size
var token
return start
function start(code) {
assert__default['default'](code === codes.graveAccent, 'expected `` ` ``')
assert__default['default'](
previous.call(self, self.previous),
'expected correct previous'
)
effects.enter(types.codeText)
effects.enter(types.codeTextSequence)
return openingSequence(code)
}
function openingSequence(code) {
if (code === codes.graveAccent) {
effects.consume(code)
sizeOpen++
return openingSequence
}
effects.exit(types.codeTextSequence)
return gap(code)
}
function gap(code) {
// EOF.
if (code === codes.eof) {
return nok(code)
}
// Closing fence?
// Could also be data.
if (code === codes.graveAccent) {
token = effects.enter(types.codeTextSequence)
size = 0
return closingSequence(code)
}
// Tabs dont work, and virtual spaces dont make sense.
if (code === codes.space) {
effects.enter('space')
effects.consume(code)
effects.exit('space')
return gap
}
if (markdownLineEnding(code)) {
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
return gap
}
// Data.
effects.enter(types.codeTextData)
return data(code)
}
// In code.
function data(code) {
if (
code === codes.eof ||
code === codes.space ||
code === codes.graveAccent ||
markdownLineEnding(code)
) {
effects.exit(types.codeTextData)
return gap(code)
}
effects.consume(code)
return data
}
// Closing fence.
function closingSequence(code) {
// More.
if (code === codes.graveAccent) {
effects.consume(code)
size++
return closingSequence
}
// Done!
if (size === sizeOpen) {
effects.exit(types.codeTextSequence)
effects.exit(types.codeText)
return ok(code)
}
// More or less accents: mark as data.
token.type = types.codeTextData
return data(code)
}
}
module.exports = codeText

179
node_modules/micromark/lib/tokenize/code-text.mjs generated vendored Normal file
View File

@@ -0,0 +1,179 @@
var codeText = {
name: 'codeText',
tokenize: tokenizeCodeText,
resolve: resolveCodeText,
previous: previous
}
export default codeText
import assert from 'assert'
import codes from '../character/codes.mjs'
import markdownLineEnding from '../character/markdown-line-ending.mjs'
import types from '../constant/types.mjs'
function resolveCodeText(events) {
var tailExitIndex = events.length - 4
var headEnterIndex = 3
var index
var enter
// If we start and end with an EOL or a space.
if (
(events[headEnterIndex][1].type === types.lineEnding ||
events[headEnterIndex][1].type === 'space') &&
(events[tailExitIndex][1].type === types.lineEnding ||
events[tailExitIndex][1].type === 'space')
) {
index = headEnterIndex
// And we have data.
while (++index < tailExitIndex) {
if (events[index][1].type === types.codeTextData) {
// Then we have padding.
events[tailExitIndex][1].type = events[headEnterIndex][1].type =
types.codeTextPadding
headEnterIndex += 2
tailExitIndex -= 2
break
}
}
}
// Merge adjacent spaces and data.
index = headEnterIndex - 1
tailExitIndex++
while (++index <= tailExitIndex) {
if (enter === undefined) {
if (
index !== tailExitIndex &&
events[index][1].type !== types.lineEnding
) {
enter = index
}
} else if (
index === tailExitIndex ||
events[index][1].type === types.lineEnding
) {
events[enter][1].type = types.codeTextData
if (index !== enter + 2) {
events[enter][1].end = events[index - 1][1].end
events.splice(enter + 2, index - enter - 2)
tailExitIndex -= index - enter - 2
index = enter + 2
}
enter = undefined
}
}
return events
}
function previous(code) {
// If there is a previous code, there will always be a tail.
return (
code !== codes.graveAccent ||
this.events[this.events.length - 1][1].type === types.characterEscape
)
}
function tokenizeCodeText(effects, ok, nok) {
var self = this
var sizeOpen = 0
var size
var token
return start
function start(code) {
assert(code === codes.graveAccent, 'expected `` ` ``')
assert(previous.call(self, self.previous), 'expected correct previous')
effects.enter(types.codeText)
effects.enter(types.codeTextSequence)
return openingSequence(code)
}
function openingSequence(code) {
if (code === codes.graveAccent) {
effects.consume(code)
sizeOpen++
return openingSequence
}
effects.exit(types.codeTextSequence)
return gap(code)
}
function gap(code) {
// EOF.
if (code === codes.eof) {
return nok(code)
}
// Closing fence?
// Could also be data.
if (code === codes.graveAccent) {
token = effects.enter(types.codeTextSequence)
size = 0
return closingSequence(code)
}
// Tabs dont work, and virtual spaces dont make sense.
if (code === codes.space) {
effects.enter('space')
effects.consume(code)
effects.exit('space')
return gap
}
if (markdownLineEnding(code)) {
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
return gap
}
// Data.
effects.enter(types.codeTextData)
return data(code)
}
// In code.
function data(code) {
if (
code === codes.eof ||
code === codes.space ||
code === codes.graveAccent ||
markdownLineEnding(code)
) {
effects.exit(types.codeTextData)
return gap(code)
}
effects.consume(code)
return data
}
// Closing fence.
function closingSequence(code) {
// More.
if (code === codes.graveAccent) {
effects.consume(code)
size++
return closingSequence
}
// Done!
if (size === sizeOpen) {
effects.exit(types.codeTextSequence)
effects.exit(types.codeText)
return ok(code)
}
// More or less accents: mark as data.
token.type = types.codeTextData
return data(code)
}
}

121
node_modules/micromark/lib/tokenize/content.js generated vendored Normal file
View File

@@ -0,0 +1,121 @@
'use strict'
var assert = require('assert')
var codes = require('../character/codes.js')
var markdownLineEnding = require('../character/markdown-line-ending.js')
var constants = require('../constant/constants.js')
var types = require('../constant/types.js')
var prefixSize = require('../util/prefix-size.js')
var subtokenize = require('../util/subtokenize.js')
var factorySpace = require('./factory-space.js')
function _interopDefaultLegacy(e) {
return e && typeof e === 'object' && 'default' in e ? e : {default: e}
}
var assert__default = /*#__PURE__*/ _interopDefaultLegacy(assert)
// No name because it must not be turned off.
var content = {
tokenize: tokenizeContent,
resolve: resolveContent,
interruptible: true,
lazy: true
}
var continuationConstruct = {tokenize: tokenizeContinuation, partial: true}
// Content is transparent: its parsed right now. That way, definitions are also
// parsed right now: before text in paragraphs (specifically, media) are parsed.
function resolveContent(events) {
subtokenize(events)
return events
}
function tokenizeContent(effects, ok) {
var previous
return start
function start(code) {
assert__default['default'](
code !== codes.eof && !markdownLineEnding(code),
'expected no eof or eol'
)
effects.enter(types.content)
previous = effects.enter(types.chunkContent, {
contentType: constants.contentTypeContent
})
return data(code)
}
function data(code) {
if (code === codes.eof) {
return contentEnd(code)
}
if (markdownLineEnding(code)) {
return effects.check(
continuationConstruct,
contentContinue,
contentEnd
)(code)
}
// Data.
effects.consume(code)
return data
}
function contentEnd(code) {
effects.exit(types.chunkContent)
effects.exit(types.content)
return ok(code)
}
function contentContinue(code) {
assert__default['default'](markdownLineEnding(code), 'expected eol')
effects.consume(code)
effects.exit(types.chunkContent)
previous = previous.next = effects.enter(types.chunkContent, {
contentType: constants.contentTypeContent,
previous: previous
})
return data
}
}
function tokenizeContinuation(effects, ok, nok) {
var self = this
return startLookahead
function startLookahead(code) {
assert__default['default'](
markdownLineEnding(code),
'expected a line ending'
)
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
return factorySpace(effects, prefixed, types.linePrefix)
}
function prefixed(code) {
if (code === codes.eof || markdownLineEnding(code)) {
return nok(code)
}
if (
self.parser.constructs.disable.null.indexOf('codeIndented') > -1 ||
prefixSize(self.events, types.linePrefix) < constants.tabSize
) {
return effects.interrupt(self.parser.constructs.flow, nok, ok)(code)
}
return ok(code)
}
}
module.exports = content

109
node_modules/micromark/lib/tokenize/content.mjs generated vendored Normal file
View File

@@ -0,0 +1,109 @@
// No name because it must not be turned off.
var content = {
tokenize: tokenizeContent,
resolve: resolveContent,
interruptible: true,
lazy: true
}
export default content
import assert from 'assert'
import codes from '../character/codes.mjs'
import markdownLineEnding from '../character/markdown-line-ending.mjs'
import constants from '../constant/constants.mjs'
import types from '../constant/types.mjs'
import prefixSize from '../util/prefix-size.mjs'
import subtokenize from '../util/subtokenize.mjs'
import spaceFactory from './factory-space.mjs'
var continuationConstruct = {tokenize: tokenizeContinuation, partial: true}
// Content is transparent: its parsed right now. That way, definitions are also
// parsed right now: before text in paragraphs (specifically, media) are parsed.
function resolveContent(events) {
subtokenize(events)
return events
}
function tokenizeContent(effects, ok) {
var previous
return start
function start(code) {
assert(
code !== codes.eof && !markdownLineEnding(code),
'expected no eof or eol'
)
effects.enter(types.content)
previous = effects.enter(types.chunkContent, {
contentType: constants.contentTypeContent
})
return data(code)
}
function data(code) {
if (code === codes.eof) {
return contentEnd(code)
}
if (markdownLineEnding(code)) {
return effects.check(
continuationConstruct,
contentContinue,
contentEnd
)(code)
}
// Data.
effects.consume(code)
return data
}
function contentEnd(code) {
effects.exit(types.chunkContent)
effects.exit(types.content)
return ok(code)
}
function contentContinue(code) {
assert(markdownLineEnding(code), 'expected eol')
effects.consume(code)
effects.exit(types.chunkContent)
previous = previous.next = effects.enter(types.chunkContent, {
contentType: constants.contentTypeContent,
previous: previous
})
return data
}
}
function tokenizeContinuation(effects, ok, nok) {
var self = this
return startLookahead
function startLookahead(code) {
assert(markdownLineEnding(code), 'expected a line ending')
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
return spaceFactory(effects, prefixed, types.linePrefix)
}
function prefixed(code) {
if (code === codes.eof || markdownLineEnding(code)) {
return nok(code)
}
if (
self.parser.constructs.disable.null.indexOf('codeIndented') > -1 ||
prefixSize(self.events, types.linePrefix) < constants.tabSize
) {
return effects.interrupt(self.parser.constructs.flow, nok, ok)(code)
}
return ok(code)
}
}

129
node_modules/micromark/lib/tokenize/definition.js generated vendored Normal file
View File

@@ -0,0 +1,129 @@
'use strict'
var assert = require('assert')
var codes = require('../character/codes.js')
var markdownLineEnding = require('../character/markdown-line-ending.js')
var markdownLineEndingOrSpace = require('../character/markdown-line-ending-or-space.js')
var types = require('../constant/types.js')
var normalizeIdentifier = require('../util/normalize-identifier.js')
var factoryDestination = require('./factory-destination.js')
var factoryLabel = require('./factory-label.js')
var factorySpace = require('./factory-space.js')
var factoryWhitespace = require('./factory-whitespace.js')
var factoryTitle = require('./factory-title.js')
function _interopDefaultLegacy(e) {
return e && typeof e === 'object' && 'default' in e ? e : {default: e}
}
var assert__default = /*#__PURE__*/ _interopDefaultLegacy(assert)
var definition = {
name: 'definition',
tokenize: tokenizeDefinition
}
var titleConstruct = {tokenize: tokenizeTitle, partial: true}
function tokenizeDefinition(effects, ok, nok) {
var self = this
var identifier
return start
function start(code) {
assert__default['default'](code === codes.leftSquareBracket, 'expected `[`')
effects.enter(types.definition)
return factoryLabel.call(
self,
effects,
labelAfter,
nok,
types.definitionLabel,
types.definitionLabelMarker,
types.definitionLabelString
)(code)
}
function labelAfter(code) {
identifier = normalizeIdentifier(
self.sliceSerialize(self.events[self.events.length - 1][1]).slice(1, -1)
)
if (code === codes.colon) {
effects.enter(types.definitionMarker)
effects.consume(code)
effects.exit(types.definitionMarker)
// Note: blank lines cant exist in content.
return factoryWhitespace(
effects,
factoryDestination(
effects,
effects.attempt(
titleConstruct,
factorySpace(effects, after, types.whitespace),
factorySpace(effects, after, types.whitespace)
),
nok,
types.definitionDestination,
types.definitionDestinationLiteral,
types.definitionDestinationLiteralMarker,
types.definitionDestinationRaw,
types.definitionDestinationString
)
)
}
return nok(code)
}
function after(code) {
if (code === codes.eof || markdownLineEnding(code)) {
effects.exit(types.definition)
if (self.parser.defined.indexOf(identifier) < 0) {
self.parser.defined.push(identifier)
}
return ok(code)
}
return nok(code)
}
}
function tokenizeTitle(effects, ok, nok) {
return start
function start(code) {
return markdownLineEndingOrSpace(code)
? factoryWhitespace(effects, before)(code)
: nok(code)
}
function before(code) {
if (
code === codes.quotationMark ||
code === codes.apostrophe ||
code === codes.leftParenthesis
) {
return factoryTitle(
effects,
factorySpace(effects, after, types.whitespace),
nok,
types.definitionTitle,
types.definitionTitleMarker,
types.definitionTitleString
)(code)
}
return nok(code)
}
function after(code) {
return code === codes.eof || markdownLineEnding(code) ? ok(code) : nok(code)
}
}
module.exports = definition

120
node_modules/micromark/lib/tokenize/definition.mjs generated vendored Normal file
View File

@@ -0,0 +1,120 @@
var definition = {
name: 'definition',
tokenize: tokenizeDefinition
}
export default definition
import assert from 'assert'
import codes from '../character/codes.mjs'
import markdownLineEnding from '../character/markdown-line-ending.mjs'
import markdownLineEndingOrSpace from '../character/markdown-line-ending-or-space.mjs'
import types from '../constant/types.mjs'
import normalizeIdentifier from '../util/normalize-identifier.mjs'
import destinationFactory from './factory-destination.mjs'
import labelFactory from './factory-label.mjs'
import spaceFactory from './factory-space.mjs'
import whitespaceFactory from './factory-whitespace.mjs'
import titleFactory from './factory-title.mjs'
var titleConstruct = {tokenize: tokenizeTitle, partial: true}
function tokenizeDefinition(effects, ok, nok) {
var self = this
var identifier
return start
function start(code) {
assert(code === codes.leftSquareBracket, 'expected `[`')
effects.enter(types.definition)
return labelFactory.call(
self,
effects,
labelAfter,
nok,
types.definitionLabel,
types.definitionLabelMarker,
types.definitionLabelString
)(code)
}
function labelAfter(code) {
identifier = normalizeIdentifier(
self.sliceSerialize(self.events[self.events.length - 1][1]).slice(1, -1)
)
if (code === codes.colon) {
effects.enter(types.definitionMarker)
effects.consume(code)
effects.exit(types.definitionMarker)
// Note: blank lines cant exist in content.
return whitespaceFactory(
effects,
destinationFactory(
effects,
effects.attempt(
titleConstruct,
spaceFactory(effects, after, types.whitespace),
spaceFactory(effects, after, types.whitespace)
),
nok,
types.definitionDestination,
types.definitionDestinationLiteral,
types.definitionDestinationLiteralMarker,
types.definitionDestinationRaw,
types.definitionDestinationString
)
)
}
return nok(code)
}
function after(code) {
if (code === codes.eof || markdownLineEnding(code)) {
effects.exit(types.definition)
if (self.parser.defined.indexOf(identifier) < 0) {
self.parser.defined.push(identifier)
}
return ok(code)
}
return nok(code)
}
}
function tokenizeTitle(effects, ok, nok) {
return start
function start(code) {
return markdownLineEndingOrSpace(code)
? whitespaceFactory(effects, before)(code)
: nok(code)
}
function before(code) {
if (
code === codes.quotationMark ||
code === codes.apostrophe ||
code === codes.leftParenthesis
) {
return titleFactory(
effects,
spaceFactory(effects, after, types.whitespace),
nok,
types.definitionTitle,
types.definitionTitleMarker,
types.definitionTitleString
)(code)
}
return nok(code)
}
function after(code) {
return code === codes.eof || markdownLineEnding(code) ? ok(code) : nok(code)
}
}

View File

@@ -0,0 +1,145 @@
'use strict'
var asciiControl = require('../character/ascii-control.js')
var codes = require('../character/codes.js')
var markdownLineEndingOrSpace = require('../character/markdown-line-ending-or-space.js')
var markdownLineEnding = require('../character/markdown-line-ending.js')
var constants = require('../constant/constants.js')
var types = require('../constant/types.js')
// eslint-disable-next-line max-params
function destinationFactory(
effects,
ok,
nok,
type,
literalType,
literalMarkerType,
rawType,
stringType,
max
) {
var limit = max || Infinity
var balance = 0
return start
function start(code) {
if (code === codes.lessThan) {
effects.enter(type)
effects.enter(literalType)
effects.enter(literalMarkerType)
effects.consume(code)
effects.exit(literalMarkerType)
return destinationEnclosedBefore
}
if (asciiControl(code) || code === codes.rightParenthesis) {
return nok(code)
}
effects.enter(type)
effects.enter(rawType)
effects.enter(stringType)
effects.enter(types.chunkString, {contentType: constants.contentTypeString})
return destinationRaw(code)
}
function destinationEnclosedBefore(code) {
if (code === codes.greaterThan) {
effects.enter(literalMarkerType)
effects.consume(code)
effects.exit(literalMarkerType)
effects.exit(literalType)
effects.exit(type)
return ok
}
effects.enter(stringType)
effects.enter(types.chunkString, {contentType: constants.contentTypeString})
return destinationEnclosed(code)
}
function destinationEnclosed(code) {
if (code === codes.greaterThan) {
effects.exit(types.chunkString)
effects.exit(stringType)
return destinationEnclosedBefore(code)
}
if (
code === codes.eof ||
code === codes.lessThan ||
markdownLineEnding(code)
) {
return nok(code)
}
effects.consume(code)
return code === codes.backslash
? destinationEnclosedEscape
: destinationEnclosed
}
function destinationEnclosedEscape(code) {
if (
code === codes.lessThan ||
code === codes.greaterThan ||
code === codes.backslash
) {
effects.consume(code)
return destinationEnclosed
}
return destinationEnclosed(code)
}
function destinationRaw(code) {
if (code === codes.leftParenthesis) {
if (++balance > limit) return nok(code)
effects.consume(code)
return destinationRaw
}
if (code === codes.rightParenthesis) {
if (!balance--) {
effects.exit(types.chunkString)
effects.exit(stringType)
effects.exit(rawType)
effects.exit(type)
return ok(code)
}
effects.consume(code)
return destinationRaw
}
if (code === codes.eof || markdownLineEndingOrSpace(code)) {
if (balance) return nok(code)
effects.exit(types.chunkString)
effects.exit(stringType)
effects.exit(rawType)
effects.exit(type)
return ok(code)
}
if (asciiControl(code)) return nok(code)
effects.consume(code)
return code === codes.backslash ? destinationRawEscape : destinationRaw
}
function destinationRawEscape(code) {
if (
code === codes.leftParenthesis ||
code === codes.rightParenthesis ||
code === codes.backslash
) {
effects.consume(code)
return destinationRaw
}
return destinationRaw(code)
}
}
module.exports = destinationFactory

View File

@@ -0,0 +1,143 @@
export default destinationFactory
import asciiControl from '../character/ascii-control.mjs'
import codes from '../character/codes.mjs'
import markdownLineEndingOrSpace from '../character/markdown-line-ending-or-space.mjs'
import markdownLineEnding from '../character/markdown-line-ending.mjs'
import constants from '../constant/constants.mjs'
import types from '../constant/types.mjs'
// eslint-disable-next-line max-params
function destinationFactory(
effects,
ok,
nok,
type,
literalType,
literalMarkerType,
rawType,
stringType,
max
) {
var limit = max || Infinity
var balance = 0
return start
function start(code) {
if (code === codes.lessThan) {
effects.enter(type)
effects.enter(literalType)
effects.enter(literalMarkerType)
effects.consume(code)
effects.exit(literalMarkerType)
return destinationEnclosedBefore
}
if (asciiControl(code) || code === codes.rightParenthesis) {
return nok(code)
}
effects.enter(type)
effects.enter(rawType)
effects.enter(stringType)
effects.enter(types.chunkString, {contentType: constants.contentTypeString})
return destinationRaw(code)
}
function destinationEnclosedBefore(code) {
if (code === codes.greaterThan) {
effects.enter(literalMarkerType)
effects.consume(code)
effects.exit(literalMarkerType)
effects.exit(literalType)
effects.exit(type)
return ok
}
effects.enter(stringType)
effects.enter(types.chunkString, {contentType: constants.contentTypeString})
return destinationEnclosed(code)
}
function destinationEnclosed(code) {
if (code === codes.greaterThan) {
effects.exit(types.chunkString)
effects.exit(stringType)
return destinationEnclosedBefore(code)
}
if (
code === codes.eof ||
code === codes.lessThan ||
markdownLineEnding(code)
) {
return nok(code)
}
effects.consume(code)
return code === codes.backslash
? destinationEnclosedEscape
: destinationEnclosed
}
function destinationEnclosedEscape(code) {
if (
code === codes.lessThan ||
code === codes.greaterThan ||
code === codes.backslash
) {
effects.consume(code)
return destinationEnclosed
}
return destinationEnclosed(code)
}
function destinationRaw(code) {
if (code === codes.leftParenthesis) {
if (++balance > limit) return nok(code)
effects.consume(code)
return destinationRaw
}
if (code === codes.rightParenthesis) {
if (!balance--) {
effects.exit(types.chunkString)
effects.exit(stringType)
effects.exit(rawType)
effects.exit(type)
return ok(code)
}
effects.consume(code)
return destinationRaw
}
if (code === codes.eof || markdownLineEndingOrSpace(code)) {
if (balance) return nok(code)
effects.exit(types.chunkString)
effects.exit(stringType)
effects.exit(rawType)
effects.exit(type)
return ok(code)
}
if (asciiControl(code)) return nok(code)
effects.consume(code)
return code === codes.backslash ? destinationRawEscape : destinationRaw
}
function destinationRawEscape(code) {
if (
code === codes.leftParenthesis ||
code === codes.rightParenthesis ||
code === codes.backslash
) {
effects.consume(code)
return destinationRaw
}
return destinationRaw(code)
}
}

Some files were not shown because too many files have changed in this diff Show More