planning
All checks were successful
Publish To Prod / deploy_and_publish (push) Successful in 35s

This commit is contained in:
2024-10-14 09:15:30 +02:00
parent bcba00a730
commit 6e64e138e2
21059 changed files with 2317811 additions and 1 deletions

17
node_modules/remark-parse/index.js generated vendored Normal file
View File

@@ -0,0 +1,17 @@
'use strict'
var unherit = require('unherit')
var xtend = require('xtend')
var Parser = require('./lib/parser.js')
module.exports = parse
parse.Parser = Parser
function parse(options) {
var settings = this.data('settings')
var Local = unherit(Parser)
Local.prototype.options = xtend(Local.prototype.options, settings, options)
this.Parser = Local
}

70
node_modules/remark-parse/lib/block-elements.js generated vendored Normal file
View File

@@ -0,0 +1,70 @@
'use strict'
module.exports = [
'address',
'article',
'aside',
'base',
'basefont',
'blockquote',
'body',
'caption',
'center',
'col',
'colgroup',
'dd',
'details',
'dialog',
'dir',
'div',
'dl',
'dt',
'fieldset',
'figcaption',
'figure',
'footer',
'form',
'frame',
'frameset',
'h1',
'h2',
'h3',
'h4',
'h5',
'h6',
'head',
'header',
'hgroup',
'hr',
'html',
'iframe',
'legend',
'li',
'link',
'main',
'menu',
'menuitem',
'meta',
'nav',
'noframes',
'ol',
'optgroup',
'option',
'p',
'param',
'pre',
'section',
'source',
'title',
'summary',
'table',
'tbody',
'td',
'tfoot',
'th',
'thead',
'title',
'tr',
'track',
'ul'
]

58
node_modules/remark-parse/lib/decode.js generated vendored Normal file
View File

@@ -0,0 +1,58 @@
'use strict'
var xtend = require('xtend')
var entities = require('parse-entities')
module.exports = factory
// Factory to create an entity decoder.
function factory(ctx) {
decoder.raw = decodeRaw
return decoder
// Normalize `position` to add an `indent`.
function normalize(position) {
var offsets = ctx.offset
var line = position.line
var result = []
while (++line) {
if (!(line in offsets)) {
break
}
result.push((offsets[line] || 0) + 1)
}
return {start: position, indent: result}
}
// Decode `value` (at `position`) into text-nodes.
function decoder(value, position, handler) {
entities(value, {
position: normalize(position),
warning: handleWarning,
text: handler,
reference: handler,
textContext: ctx,
referenceContext: ctx
})
}
// Decode `value` (at `position`) into a string.
function decodeRaw(value, position, options) {
return entities(
value,
xtend(options, {position: normalize(position), warning: handleWarning})
)
}
// Handle a warning.
// See <https://github.com/wooorm/parse-entities> for the warnings.
function handleWarning(reason, position, code) {
if (code !== 3) {
ctx.file.message(reason, position)
}
}
}

10
node_modules/remark-parse/lib/defaults.js generated vendored Normal file
View File

@@ -0,0 +1,10 @@
'use strict'
module.exports = {
position: true,
gfm: true,
commonmark: false,
footnotes: false,
pedantic: false,
blocks: require('./block-elements')
}

17
node_modules/remark-parse/lib/locate/break.js generated vendored Normal file
View File

@@ -0,0 +1,17 @@
'use strict'
module.exports = locate
function locate(value, fromIndex) {
var index = value.indexOf('\n', fromIndex)
while (index > fromIndex) {
if (value.charAt(index - 1) !== ' ') {
break
}
index--
}
return index
}

7
node_modules/remark-parse/lib/locate/code-inline.js generated vendored Normal file
View File

@@ -0,0 +1,7 @@
'use strict'
module.exports = locate
function locate(value, fromIndex) {
return value.indexOf('`', fromIndex)
}

7
node_modules/remark-parse/lib/locate/delete.js generated vendored Normal file
View File

@@ -0,0 +1,7 @@
'use strict'
module.exports = locate
function locate(value, fromIndex) {
return value.indexOf('~~', fromIndex)
}

18
node_modules/remark-parse/lib/locate/emphasis.js generated vendored Normal file
View File

@@ -0,0 +1,18 @@
'use strict'
module.exports = locate
function locate(value, fromIndex) {
var asterisk = value.indexOf('*', fromIndex)
var underscore = value.indexOf('_', fromIndex)
if (underscore === -1) {
return asterisk
}
if (asterisk === -1) {
return underscore
}
return underscore < asterisk ? underscore : asterisk
}

7
node_modules/remark-parse/lib/locate/escape.js generated vendored Normal file
View File

@@ -0,0 +1,7 @@
'use strict'
module.exports = locate
function locate(value, fromIndex) {
return value.indexOf('\\', fromIndex)
}

16
node_modules/remark-parse/lib/locate/link.js generated vendored Normal file
View File

@@ -0,0 +1,16 @@
'use strict'
module.exports = locate
function locate(value, fromIndex) {
var link = value.indexOf('[', fromIndex)
var image = value.indexOf('![', fromIndex)
if (image === -1) {
return link
}
// Link can never be `-1` if an image is found, so we dont need to check
// for that :)
return link < image ? link : image
}

18
node_modules/remark-parse/lib/locate/strong.js generated vendored Normal file
View File

@@ -0,0 +1,18 @@
'use strict'
module.exports = locate
function locate(value, fromIndex) {
var asterisk = value.indexOf('**', fromIndex)
var underscore = value.indexOf('__', fromIndex)
if (underscore === -1) {
return asterisk
}
if (asterisk === -1) {
return underscore
}
return underscore < asterisk ? underscore : asterisk
}

7
node_modules/remark-parse/lib/locate/tag.js generated vendored Normal file
View File

@@ -0,0 +1,7 @@
'use strict'
module.exports = locate
function locate(value, fromIndex) {
return value.indexOf('<', fromIndex)
}

26
node_modules/remark-parse/lib/locate/url.js generated vendored Normal file
View File

@@ -0,0 +1,26 @@
'use strict'
module.exports = locate
var protocols = ['https://', 'http://', 'mailto:']
function locate(value, fromIndex) {
var length = protocols.length
var index = -1
var min = -1
var position
if (!this.options.gfm) {
return -1
}
while (++index < length) {
position = value.indexOf(protocols[index], fromIndex)
if (position !== -1 && (position < min || min === -1)) {
min = position
}
}
return min
}

42
node_modules/remark-parse/lib/parse.js generated vendored Normal file
View File

@@ -0,0 +1,42 @@
'use strict'
var xtend = require('xtend')
var removePosition = require('unist-util-remove-position')
module.exports = parse
var lineFeed = '\n'
var lineBreaksExpression = /\r\n|\r/g
// Parse the bound file.
function parse() {
var self = this
var value = String(self.file)
var start = {line: 1, column: 1, offset: 0}
var content = xtend(start)
var node
// Clean non-unix newlines: `\r\n` and `\r` are all changed to `\n`.
// This should not affect positional information.
value = value.replace(lineBreaksExpression, lineFeed)
// BOM.
if (value.charCodeAt(0) === 0xfeff) {
value = value.slice(1)
content.column++
content.offset++
}
node = {
type: 'root',
children: self.tokenizeBlock(value, content),
position: {start: start, end: self.eof || xtend(start)}
}
if (!self.options.position) {
removePosition(node, true)
}
return node
}

149
node_modules/remark-parse/lib/parser.js generated vendored Normal file
View File

@@ -0,0 +1,149 @@
'use strict'
var xtend = require('xtend')
var toggle = require('state-toggle')
var vfileLocation = require('vfile-location')
var unescape = require('./unescape')
var decode = require('./decode')
var tokenizer = require('./tokenizer')
module.exports = Parser
function Parser(doc, file) {
this.file = file
this.offset = {}
this.options = xtend(this.options)
this.setOptions({})
this.inList = false
this.inBlock = false
this.inLink = false
this.atStart = true
this.toOffset = vfileLocation(file).toOffset
this.unescape = unescape(this, 'escape')
this.decode = decode(this)
}
var proto = Parser.prototype
// Expose core.
proto.setOptions = require('./set-options')
proto.parse = require('./parse')
// Expose `defaults`.
proto.options = require('./defaults')
// Enter and exit helpers.
proto.exitStart = toggle('atStart', true)
proto.enterList = toggle('inList', false)
proto.enterLink = toggle('inLink', false)
proto.enterBlock = toggle('inBlock', false)
// Nodes that can interupt a paragraph:
//
// ```markdown
// A paragraph, followed by a thematic break.
// ___
// ```
//
// In the above example, the thematic break “interupts” the paragraph.
proto.interruptParagraph = [
['thematicBreak'],
['atxHeading'],
['fencedCode'],
['blockquote'],
['html'],
['setextHeading', {commonmark: false}],
['definition', {commonmark: false}],
['footnote', {commonmark: false}]
]
// Nodes that can interupt a list:
//
// ```markdown
// - One
// ___
// ```
//
// In the above example, the thematic break “interupts” the list.
proto.interruptList = [
['atxHeading', {pedantic: false}],
['fencedCode', {pedantic: false}],
['thematicBreak', {pedantic: false}],
['definition', {commonmark: false}],
['footnote', {commonmark: false}]
]
// Nodes that can interupt a blockquote:
//
// ```markdown
// > A paragraph.
// ___
// ```
//
// In the above example, the thematic break “interupts” the blockquote.
proto.interruptBlockquote = [
['indentedCode', {commonmark: true}],
['fencedCode', {commonmark: true}],
['atxHeading', {commonmark: true}],
['setextHeading', {commonmark: true}],
['thematicBreak', {commonmark: true}],
['html', {commonmark: true}],
['list', {commonmark: true}],
['definition', {commonmark: false}],
['footnote', {commonmark: false}]
]
// Handlers.
proto.blockTokenizers = {
newline: require('./tokenize/newline'),
indentedCode: require('./tokenize/code-indented'),
fencedCode: require('./tokenize/code-fenced'),
blockquote: require('./tokenize/blockquote'),
atxHeading: require('./tokenize/heading-atx'),
thematicBreak: require('./tokenize/thematic-break'),
list: require('./tokenize/list'),
setextHeading: require('./tokenize/heading-setext'),
html: require('./tokenize/html-block'),
footnote: require('./tokenize/footnote-definition'),
definition: require('./tokenize/definition'),
table: require('./tokenize/table'),
paragraph: require('./tokenize/paragraph')
}
proto.inlineTokenizers = {
escape: require('./tokenize/escape'),
autoLink: require('./tokenize/auto-link'),
url: require('./tokenize/url'),
html: require('./tokenize/html-inline'),
link: require('./tokenize/link'),
reference: require('./tokenize/reference'),
strong: require('./tokenize/strong'),
emphasis: require('./tokenize/emphasis'),
deletion: require('./tokenize/delete'),
code: require('./tokenize/code-inline'),
break: require('./tokenize/break'),
text: require('./tokenize/text')
}
// Expose precedence.
proto.blockMethods = keys(proto.blockTokenizers)
proto.inlineMethods = keys(proto.inlineTokenizers)
// Tokenizers.
proto.tokenizeBlock = tokenizer('block')
proto.tokenizeInline = tokenizer('inline')
proto.tokenizeFactory = tokenizer
// Get all keys in `value`.
function keys(value) {
var result = []
var key
for (key in value) {
result.push(key)
}
return result
}

46
node_modules/remark-parse/lib/set-options.js generated vendored Normal file
View File

@@ -0,0 +1,46 @@
'use strict'
var xtend = require('xtend')
var escapes = require('markdown-escapes')
var defaults = require('./defaults')
module.exports = setOptions
function setOptions(options) {
var self = this
var current = self.options
var key
var value
if (options == null) {
options = {}
} else if (typeof options === 'object') {
options = xtend(options)
} else {
throw new Error('Invalid value `' + options + '` for setting `options`')
}
for (key in defaults) {
value = options[key]
if (value == null) {
value = current[key]
}
if (
(key !== 'blocks' && typeof value !== 'boolean') ||
(key === 'blocks' && typeof value !== 'object')
) {
throw new Error(
'Invalid value `' + value + '` for setting `options.' + key + '`'
)
}
options[key] = value
}
self.options = options
self.escape = escapes(options)
return self
}

133
node_modules/remark-parse/lib/tokenize/auto-link.js generated vendored Normal file
View File

@@ -0,0 +1,133 @@
'use strict'
var whitespace = require('is-whitespace-character')
var decode = require('parse-entities')
var locate = require('../locate/tag')
module.exports = autoLink
autoLink.locator = locate
autoLink.notInLink = true
var lessThan = '<'
var greaterThan = '>'
var atSign = '@'
var slash = '/'
var mailto = 'mailto:'
var mailtoLength = mailto.length
function autoLink(eat, value, silent) {
var self = this
var subvalue = ''
var length = value.length
var index = 0
var queue = ''
var hasAtCharacter = false
var link = ''
var character
var now
var content
var tokenizers
var exit
if (value.charAt(0) !== lessThan) {
return
}
index++
subvalue = lessThan
while (index < length) {
character = value.charAt(index)
if (
whitespace(character) ||
character === greaterThan ||
character === atSign ||
(character === ':' && value.charAt(index + 1) === slash)
) {
break
}
queue += character
index++
}
if (!queue) {
return
}
link += queue
queue = ''
character = value.charAt(index)
link += character
index++
if (character === atSign) {
hasAtCharacter = true
} else {
if (character !== ':' || value.charAt(index + 1) !== slash) {
return
}
link += slash
index++
}
while (index < length) {
character = value.charAt(index)
if (whitespace(character) || character === greaterThan) {
break
}
queue += character
index++
}
character = value.charAt(index)
if (!queue || character !== greaterThan) {
return
}
/* istanbul ignore if - never used (yet) */
if (silent) {
return true
}
link += queue
content = link
subvalue += link + character
now = eat.now()
now.column++
now.offset++
if (hasAtCharacter) {
if (link.slice(0, mailtoLength).toLowerCase() === mailto) {
content = content.substr(mailtoLength)
now.column += mailtoLength
now.offset += mailtoLength
} else {
link = mailto + link
}
}
// Temporarily remove all tokenizers except text in autolinks.
tokenizers = self.inlineTokenizers
self.inlineTokenizers = {text: tokenizers.text}
exit = self.enterLink()
content = self.tokenizeInline(content, now)
self.inlineTokenizers = tokenizers
exit()
return eat(subvalue)({
type: 'link',
title: null,
url: decode(link, {nonTerminated: false}),
children: content
})
}

124
node_modules/remark-parse/lib/tokenize/blockquote.js generated vendored Normal file
View File

@@ -0,0 +1,124 @@
'use strict'
var trim = require('trim')
var interrupt = require('../util/interrupt')
module.exports = blockquote
var lineFeed = '\n'
var tab = '\t'
var space = ' '
var greaterThan = '>'
function blockquote(eat, value, silent) {
var self = this
var offsets = self.offset
var tokenizers = self.blockTokenizers
var interruptors = self.interruptBlockquote
var now = eat.now()
var currentLine = now.line
var length = value.length
var values = []
var contents = []
var indents = []
var add
var index = 0
var character
var rest
var nextIndex
var content
var line
var startIndex
var prefixed
var exit
while (index < length) {
character = value.charAt(index)
if (character !== space && character !== tab) {
break
}
index++
}
if (value.charAt(index) !== greaterThan) {
return
}
if (silent) {
return true
}
index = 0
while (index < length) {
nextIndex = value.indexOf(lineFeed, index)
startIndex = index
prefixed = false
if (nextIndex === -1) {
nextIndex = length
}
while (index < length) {
character = value.charAt(index)
if (character !== space && character !== tab) {
break
}
index++
}
if (value.charAt(index) === greaterThan) {
index++
prefixed = true
if (value.charAt(index) === space) {
index++
}
} else {
index = startIndex
}
content = value.slice(index, nextIndex)
if (!prefixed && !trim(content)) {
index = startIndex
break
}
if (!prefixed) {
rest = value.slice(index)
// Check if the following code contains a possible block.
if (interrupt(interruptors, tokenizers, self, [eat, rest, true])) {
break
}
}
line = startIndex === index ? content : value.slice(startIndex, nextIndex)
indents.push(index - startIndex)
values.push(line)
contents.push(content)
index = nextIndex + 1
}
index = -1
length = indents.length
add = eat(values.join(lineFeed))
while (++index < length) {
offsets[currentLine] = (offsets[currentLine] || 0) + indents[index]
currentLine++
}
exit = self.enterBlock()
contents = self.tokenizeBlock(contents.join(lineFeed), now)
exit()
return add({type: 'blockquote', children: contents})
}

42
node_modules/remark-parse/lib/tokenize/break.js generated vendored Normal file
View File

@@ -0,0 +1,42 @@
'use strict'
var locate = require('../locate/break')
module.exports = hardBreak
hardBreak.locator = locate
var space = ' '
var lineFeed = '\n'
var minBreakLength = 2
function hardBreak(eat, value, silent) {
var length = value.length
var index = -1
var queue = ''
var character
while (++index < length) {
character = value.charAt(index)
if (character === lineFeed) {
if (index < minBreakLength) {
return
}
/* istanbul ignore if - never used (yet) */
if (silent) {
return true
}
queue += character
return eat(queue)({type: 'break'})
}
if (character !== space) {
return
}
queue += character
}
}

255
node_modules/remark-parse/lib/tokenize/code-fenced.js generated vendored Normal file
View File

@@ -0,0 +1,255 @@
'use strict'
var trim = require('trim-trailing-lines')
module.exports = fencedCode
var lineFeed = '\n'
var tab = '\t'
var space = ' '
var tilde = '~'
var graveAccent = '`'
var minFenceCount = 3
var tabSize = 4
function fencedCode(eat, value, silent) {
var self = this
var gfm = self.options.gfm
var length = value.length + 1
var index = 0
var subvalue = ''
var fenceCount
var marker
var character
var flag
var lang
var meta
var queue
var content
var exdentedContent
var closing
var exdentedClosing
var indent
var now
if (!gfm) {
return
}
// Eat initial spacing.
while (index < length) {
character = value.charAt(index)
if (character !== space && character !== tab) {
break
}
subvalue += character
index++
}
indent = index
// Eat the fence.
character = value.charAt(index)
if (character !== tilde && character !== graveAccent) {
return
}
index++
marker = character
fenceCount = 1
subvalue += character
while (index < length) {
character = value.charAt(index)
if (character !== marker) {
break
}
subvalue += character
fenceCount++
index++
}
if (fenceCount < minFenceCount) {
return
}
// Eat spacing before flag.
while (index < length) {
character = value.charAt(index)
if (character !== space && character !== tab) {
break
}
subvalue += character
index++
}
// Eat flag.
flag = ''
queue = ''
while (index < length) {
character = value.charAt(index)
if (
character === lineFeed ||
character === tilde ||
character === graveAccent
) {
break
}
if (character === space || character === tab) {
queue += character
} else {
flag += queue + character
queue = ''
}
index++
}
character = value.charAt(index)
if (character && character !== lineFeed) {
return
}
if (silent) {
return true
}
now = eat.now()
now.column += subvalue.length
now.offset += subvalue.length
subvalue += flag
flag = self.decode.raw(self.unescape(flag), now)
if (queue) {
subvalue += queue
}
queue = ''
closing = ''
exdentedClosing = ''
content = ''
exdentedContent = ''
// Eat content.
while (index < length) {
character = value.charAt(index)
content += closing
exdentedContent += exdentedClosing
closing = ''
exdentedClosing = ''
if (character !== lineFeed) {
content += character
exdentedClosing += character
index++
continue
}
// Add the newline to `subvalue` if its the first character. Otherwise,
// add it to the `closing` queue.
if (content) {
closing += character
exdentedClosing += character
} else {
subvalue += character
}
queue = ''
index++
while (index < length) {
character = value.charAt(index)
if (character !== space) {
break
}
queue += character
index++
}
closing += queue
exdentedClosing += queue.slice(indent)
if (queue.length >= tabSize) {
continue
}
queue = ''
while (index < length) {
character = value.charAt(index)
if (character !== marker) {
break
}
queue += character
index++
}
closing += queue
exdentedClosing += queue
if (queue.length < fenceCount) {
continue
}
queue = ''
while (index < length) {
character = value.charAt(index)
if (character !== space && character !== tab) {
break
}
closing += character
exdentedClosing += character
index++
}
if (!character || character === lineFeed) {
break
}
}
subvalue += content + closing
// Get lang and meta from the flag.
index = -1
length = flag.length
while (++index < length) {
character = flag.charAt(index)
if (character === space || character === tab) {
if (!lang) {
lang = flag.slice(0, index)
}
} else if (lang) {
meta = flag.slice(index)
break
}
}
return eat(subvalue)({
type: 'code',
lang: lang || flag || null,
meta: meta || null,
value: trim(exdentedContent)
})
}

View File

@@ -0,0 +1,98 @@
'use strict'
var repeat = require('repeat-string')
var trim = require('trim-trailing-lines')
module.exports = indentedCode
var lineFeed = '\n'
var tab = '\t'
var space = ' '
var tabSize = 4
var codeIndent = repeat(space, tabSize)
function indentedCode(eat, value, silent) {
var index = -1
var length = value.length
var subvalue = ''
var content = ''
var subvalueQueue = ''
var contentQueue = ''
var character
var blankQueue
var indent
while (++index < length) {
character = value.charAt(index)
if (indent) {
indent = false
subvalue += subvalueQueue
content += contentQueue
subvalueQueue = ''
contentQueue = ''
if (character === lineFeed) {
subvalueQueue = character
contentQueue = character
} else {
subvalue += character
content += character
while (++index < length) {
character = value.charAt(index)
if (!character || character === lineFeed) {
contentQueue = character
subvalueQueue = character
break
}
subvalue += character
content += character
}
}
} else if (
character === space &&
value.charAt(index + 1) === character &&
value.charAt(index + 2) === character &&
value.charAt(index + 3) === character
) {
subvalueQueue += codeIndent
index += 3
indent = true
} else if (character === tab) {
subvalueQueue += character
indent = true
} else {
blankQueue = ''
while (character === tab || character === space) {
blankQueue += character
character = value.charAt(++index)
}
if (character !== lineFeed) {
break
}
subvalueQueue += blankQueue + character
contentQueue += character
}
}
if (content) {
if (silent) {
return true
}
return eat(subvalue)({
type: 'code',
lang: null,
meta: null,
value: trim(content)
})
}
}

108
node_modules/remark-parse/lib/tokenize/code-inline.js generated vendored Normal file
View File

@@ -0,0 +1,108 @@
'use strict'
var whitespace = require('is-whitespace-character')
var locate = require('../locate/code-inline')
module.exports = inlineCode
inlineCode.locator = locate
var graveAccent = '`'
function inlineCode(eat, value, silent) {
var length = value.length
var index = 0
var queue = ''
var tickQueue = ''
var contentQueue
var subqueue
var count
var openingCount
var subvalue
var character
var found
var next
while (index < length) {
if (value.charAt(index) !== graveAccent) {
break
}
queue += graveAccent
index++
}
if (!queue) {
return
}
subvalue = queue
openingCount = index
queue = ''
next = value.charAt(index)
count = 0
while (index < length) {
character = next
next = value.charAt(index + 1)
if (character === graveAccent) {
count++
tickQueue += character
} else {
count = 0
queue += character
}
if (count && next !== graveAccent) {
if (count === openingCount) {
subvalue += queue + tickQueue
found = true
break
}
queue += tickQueue
tickQueue = ''
}
index++
}
if (!found) {
if (openingCount % 2 !== 0) {
return
}
queue = ''
}
/* istanbul ignore if - never used (yet) */
if (silent) {
return true
}
contentQueue = ''
subqueue = ''
length = queue.length
index = -1
while (++index < length) {
character = queue.charAt(index)
if (whitespace(character)) {
subqueue += character
continue
}
if (subqueue) {
if (contentQueue) {
contentQueue += subqueue
}
subqueue = ''
}
contentQueue += character
}
return eat(subvalue)({type: 'inlineCode', value: contentQueue})
}

275
node_modules/remark-parse/lib/tokenize/definition.js generated vendored Normal file
View File

@@ -0,0 +1,275 @@
'use strict'
var whitespace = require('is-whitespace-character')
var normalize = require('../util/normalize')
module.exports = definition
definition.notInList = true
definition.notInBlock = true
var quotationMark = '"'
var apostrophe = "'"
var backslash = '\\'
var lineFeed = '\n'
var tab = '\t'
var space = ' '
var leftSquareBracket = '['
var rightSquareBracket = ']'
var leftParenthesis = '('
var rightParenthesis = ')'
var colon = ':'
var lessThan = '<'
var greaterThan = '>'
function definition(eat, value, silent) {
var self = this
var commonmark = self.options.commonmark
var index = 0
var length = value.length
var subvalue = ''
var beforeURL
var beforeTitle
var queue
var character
var test
var identifier
var url
var title
while (index < length) {
character = value.charAt(index)
if (character !== space && character !== tab) {
break
}
subvalue += character
index++
}
character = value.charAt(index)
if (character !== leftSquareBracket) {
return
}
index++
subvalue += character
queue = ''
while (index < length) {
character = value.charAt(index)
if (character === rightSquareBracket) {
break
} else if (character === backslash) {
queue += character
index++
character = value.charAt(index)
}
queue += character
index++
}
if (
!queue ||
value.charAt(index) !== rightSquareBracket ||
value.charAt(index + 1) !== colon
) {
return
}
identifier = queue
subvalue += queue + rightSquareBracket + colon
index = subvalue.length
queue = ''
while (index < length) {
character = value.charAt(index)
if (character !== tab && character !== space && character !== lineFeed) {
break
}
subvalue += character
index++
}
character = value.charAt(index)
queue = ''
beforeURL = subvalue
if (character === lessThan) {
index++
while (index < length) {
character = value.charAt(index)
if (!isEnclosedURLCharacter(character)) {
break
}
queue += character
index++
}
character = value.charAt(index)
if (character === isEnclosedURLCharacter.delimiter) {
subvalue += lessThan + queue + character
index++
} else {
if (commonmark) {
return
}
index -= queue.length + 1
queue = ''
}
}
if (!queue) {
while (index < length) {
character = value.charAt(index)
if (!isUnclosedURLCharacter(character)) {
break
}
queue += character
index++
}
subvalue += queue
}
if (!queue) {
return
}
url = queue
queue = ''
while (index < length) {
character = value.charAt(index)
if (character !== tab && character !== space && character !== lineFeed) {
break
}
queue += character
index++
}
character = value.charAt(index)
test = null
if (character === quotationMark) {
test = quotationMark
} else if (character === apostrophe) {
test = apostrophe
} else if (character === leftParenthesis) {
test = rightParenthesis
}
if (!test) {
queue = ''
index = subvalue.length
} else if (queue) {
subvalue += queue + character
index = subvalue.length
queue = ''
while (index < length) {
character = value.charAt(index)
if (character === test) {
break
}
if (character === lineFeed) {
index++
character = value.charAt(index)
if (character === lineFeed || character === test) {
return
}
queue += lineFeed
}
queue += character
index++
}
character = value.charAt(index)
if (character !== test) {
return
}
beforeTitle = subvalue
subvalue += queue + character
index++
title = queue
queue = ''
} else {
return
}
while (index < length) {
character = value.charAt(index)
if (character !== tab && character !== space) {
break
}
subvalue += character
index++
}
character = value.charAt(index)
if (!character || character === lineFeed) {
if (silent) {
return true
}
beforeURL = eat(beforeURL).test().end
url = self.decode.raw(self.unescape(url), beforeURL, {nonTerminated: false})
if (title) {
beforeTitle = eat(beforeTitle).test().end
title = self.decode.raw(self.unescape(title), beforeTitle)
}
return eat(subvalue)({
type: 'definition',
identifier: normalize(identifier),
label: identifier,
title: title || null,
url: url
})
}
}
// Check if `character` can be inside an enclosed URI.
function isEnclosedURLCharacter(character) {
return (
character !== greaterThan &&
character !== leftSquareBracket &&
character !== rightSquareBracket
)
}
isEnclosedURLCharacter.delimiter = greaterThan
// Check if `character` can be inside an unclosed URI.
function isUnclosedURLCharacter(character) {
return (
character !== leftSquareBracket &&
character !== rightSquareBracket &&
!whitespace(character)
)
}

60
node_modules/remark-parse/lib/tokenize/delete.js generated vendored Normal file
View File

@@ -0,0 +1,60 @@
'use strict'
var whitespace = require('is-whitespace-character')
var locate = require('../locate/delete')
module.exports = strikethrough
strikethrough.locator = locate
var tilde = '~'
var fence = '~~'
function strikethrough(eat, value, silent) {
var self = this
var character = ''
var previous = ''
var preceding = ''
var subvalue = ''
var index
var length
var now
if (
!self.options.gfm ||
value.charAt(0) !== tilde ||
value.charAt(1) !== tilde ||
whitespace(value.charAt(2))
) {
return
}
index = 1
length = value.length
now = eat.now()
now.column += 2
now.offset += 2
while (++index < length) {
character = value.charAt(index)
if (
character === tilde &&
previous === tilde &&
(!preceding || !whitespace(preceding))
) {
/* istanbul ignore if - never used (yet) */
if (silent) {
return true
}
return eat(fence + subvalue + fence)({
type: 'delete',
children: self.tokenizeInline(subvalue, now)
})
}
subvalue += previous
preceding = previous
previous = character
}
}

86
node_modules/remark-parse/lib/tokenize/emphasis.js generated vendored Normal file
View File

@@ -0,0 +1,86 @@
'use strict'
var trim = require('trim')
var word = require('is-word-character')
var whitespace = require('is-whitespace-character')
var locate = require('../locate/emphasis')
module.exports = emphasis
emphasis.locator = locate
var asterisk = '*'
var underscore = '_'
var backslash = '\\'
function emphasis(eat, value, silent) {
var self = this
var index = 0
var character = value.charAt(index)
var now
var pedantic
var marker
var queue
var subvalue
var length
var prev
if (character !== asterisk && character !== underscore) {
return
}
pedantic = self.options.pedantic
subvalue = character
marker = character
length = value.length
index++
queue = ''
character = ''
if (pedantic && whitespace(value.charAt(index))) {
return
}
while (index < length) {
prev = character
character = value.charAt(index)
if (character === marker && (!pedantic || !whitespace(prev))) {
character = value.charAt(++index)
if (character !== marker) {
if (!trim(queue) || prev === marker) {
return
}
if (!pedantic && marker === underscore && word(character)) {
queue += marker
continue
}
/* istanbul ignore if - never used (yet) */
if (silent) {
return true
}
now = eat.now()
now.column++
now.offset++
return eat(subvalue + queue + marker)({
type: 'emphasis',
children: self.tokenizeInline(queue, now)
})
}
queue += marker
}
if (!pedantic && character === backslash) {
queue += character
character = value.charAt(++index)
}
queue += character
index++
}
}

34
node_modules/remark-parse/lib/tokenize/escape.js generated vendored Normal file
View File

@@ -0,0 +1,34 @@
'use strict'
var locate = require('../locate/escape')
module.exports = escape
escape.locator = locate
var lineFeed = '\n'
var backslash = '\\'
function escape(eat, value, silent) {
var self = this
var character
var node
if (value.charAt(0) === backslash) {
character = value.charAt(1)
if (self.escape.indexOf(character) !== -1) {
/* istanbul ignore if - never used (yet) */
if (silent) {
return true
}
if (character === lineFeed) {
node = {type: 'break'}
} else {
node = {type: 'text', value: character}
}
return eat(backslash + character)(node)
}
}
}

View File

@@ -0,0 +1,186 @@
'use strict'
var whitespace = require('is-whitespace-character')
var normalize = require('../util/normalize')
module.exports = footnoteDefinition
footnoteDefinition.notInList = true
footnoteDefinition.notInBlock = true
var backslash = '\\'
var lineFeed = '\n'
var tab = '\t'
var space = ' '
var leftSquareBracket = '['
var rightSquareBracket = ']'
var caret = '^'
var colon = ':'
var EXPRESSION_INITIAL_TAB = /^( {4}|\t)?/gm
function footnoteDefinition(eat, value, silent) {
var self = this
var offsets = self.offset
var index
var length
var subvalue
var now
var currentLine
var content
var queue
var subqueue
var character
var identifier
var add
var exit
if (!self.options.footnotes) {
return
}
index = 0
length = value.length
subvalue = ''
now = eat.now()
currentLine = now.line
while (index < length) {
character = value.charAt(index)
if (!whitespace(character)) {
break
}
subvalue += character
index++
}
if (
value.charAt(index) !== leftSquareBracket ||
value.charAt(index + 1) !== caret
) {
return
}
subvalue += leftSquareBracket + caret
index = subvalue.length
queue = ''
while (index < length) {
character = value.charAt(index)
if (character === rightSquareBracket) {
break
} else if (character === backslash) {
queue += character
index++
character = value.charAt(index)
}
queue += character
index++
}
if (
!queue ||
value.charAt(index) !== rightSquareBracket ||
value.charAt(index + 1) !== colon
) {
return
}
if (silent) {
return true
}
identifier = queue
subvalue += queue + rightSquareBracket + colon
index = subvalue.length
while (index < length) {
character = value.charAt(index)
if (character !== tab && character !== space) {
break
}
subvalue += character
index++
}
now.column += subvalue.length
now.offset += subvalue.length
queue = ''
content = ''
subqueue = ''
while (index < length) {
character = value.charAt(index)
if (character === lineFeed) {
subqueue = character
index++
while (index < length) {
character = value.charAt(index)
if (character !== lineFeed) {
break
}
subqueue += character
index++
}
queue += subqueue
subqueue = ''
while (index < length) {
character = value.charAt(index)
if (character !== space) {
break
}
subqueue += character
index++
}
if (subqueue.length === 0) {
break
}
queue += subqueue
}
if (queue) {
content += queue
queue = ''
}
content += character
index++
}
subvalue += content
content = content.replace(EXPRESSION_INITIAL_TAB, function(line) {
offsets[currentLine] = (offsets[currentLine] || 0) + line.length
currentLine++
return ''
})
add = eat(subvalue)
exit = self.enterBlock()
content = self.tokenizeBlock(content, now)
exit()
return add({
type: 'footnoteDefinition',
identifier: normalize(identifier),
label: identifier,
children: content
})
}

135
node_modules/remark-parse/lib/tokenize/heading-atx.js generated vendored Normal file
View File

@@ -0,0 +1,135 @@
'use strict'
module.exports = atxHeading
var lineFeed = '\n'
var tab = '\t'
var space = ' '
var numberSign = '#'
var maxFenceCount = 6
function atxHeading(eat, value, silent) {
var self = this
var pedantic = self.options.pedantic
var length = value.length + 1
var index = -1
var now = eat.now()
var subvalue = ''
var content = ''
var character
var queue
var depth
// Eat initial spacing.
while (++index < length) {
character = value.charAt(index)
if (character !== space && character !== tab) {
index--
break
}
subvalue += character
}
// Eat hashes.
depth = 0
while (++index <= length) {
character = value.charAt(index)
if (character !== numberSign) {
index--
break
}
subvalue += character
depth++
}
if (depth > maxFenceCount) {
return
}
if (!depth || (!pedantic && value.charAt(index + 1) === numberSign)) {
return
}
length = value.length + 1
// Eat intermediate white-space.
queue = ''
while (++index < length) {
character = value.charAt(index)
if (character !== space && character !== tab) {
index--
break
}
queue += character
}
// Exit when not in pedantic mode without spacing.
if (!pedantic && queue.length === 0 && character && character !== lineFeed) {
return
}
if (silent) {
return true
}
// Eat content.
subvalue += queue
queue = ''
content = ''
while (++index < length) {
character = value.charAt(index)
if (!character || character === lineFeed) {
break
}
if (character !== space && character !== tab && character !== numberSign) {
content += queue + character
queue = ''
continue
}
while (character === space || character === tab) {
queue += character
character = value.charAt(++index)
}
// `#` without a queue is part of the content.
if (!pedantic && content && !queue && character === numberSign) {
content += character
continue
}
while (character === numberSign) {
queue += character
character = value.charAt(++index)
}
while (character === space || character === tab) {
queue += character
character = value.charAt(++index)
}
index--
}
now.column += subvalue.length
now.offset += subvalue.length
subvalue += content + queue
return eat(subvalue)({
type: 'heading',
depth: depth,
children: self.tokenizeInline(content, now)
})
}

View File

@@ -0,0 +1,102 @@
'use strict'
module.exports = setextHeading
var lineFeed = '\n'
var tab = '\t'
var space = ' '
var equalsTo = '='
var dash = '-'
var maxIndent = 3
var equalsToDepth = 1
var dashDepth = 2
function setextHeading(eat, value, silent) {
var self = this
var now = eat.now()
var length = value.length
var index = -1
var subvalue = ''
var content
var queue
var character
var marker
var depth
// Eat initial indentation.
while (++index < length) {
character = value.charAt(index)
if (character !== space || index >= maxIndent) {
index--
break
}
subvalue += character
}
// Eat content.
content = ''
queue = ''
while (++index < length) {
character = value.charAt(index)
if (character === lineFeed) {
index--
break
}
if (character === space || character === tab) {
queue += character
} else {
content += queue + character
queue = ''
}
}
now.column += subvalue.length
now.offset += subvalue.length
subvalue += content + queue
// Ensure the content is followed by a newline and a valid marker.
character = value.charAt(++index)
marker = value.charAt(++index)
if (character !== lineFeed || (marker !== equalsTo && marker !== dash)) {
return
}
subvalue += character
// Eat Setext-line.
queue = marker
depth = marker === equalsTo ? equalsToDepth : dashDepth
while (++index < length) {
character = value.charAt(index)
if (character !== marker) {
if (character !== lineFeed) {
return
}
index--
break
}
queue += character
}
if (silent) {
return true
}
return eat(subvalue + queue)({
type: 'heading',
depth: depth,
children: self.tokenizeInline(content, now)
})
}

111
node_modules/remark-parse/lib/tokenize/html-block.js generated vendored Normal file
View File

@@ -0,0 +1,111 @@
'use strict'
var openCloseTag = require('../util/html').openCloseTag
module.exports = blockHtml
var tab = '\t'
var space = ' '
var lineFeed = '\n'
var lessThan = '<'
var rawOpenExpression = /^<(script|pre|style)(?=(\s|>|$))/i
var rawCloseExpression = /<\/(script|pre|style)>/i
var commentOpenExpression = /^<!--/
var commentCloseExpression = /-->/
var instructionOpenExpression = /^<\?/
var instructionCloseExpression = /\?>/
var directiveOpenExpression = /^<![A-Za-z]/
var directiveCloseExpression = />/
var cdataOpenExpression = /^<!\[CDATA\[/
var cdataCloseExpression = /\]\]>/
var elementCloseExpression = /^$/
var otherElementOpenExpression = new RegExp(openCloseTag.source + '\\s*$')
function blockHtml(eat, value, silent) {
var self = this
var blocks = self.options.blocks.join('|')
var elementOpenExpression = new RegExp(
'^</?(' + blocks + ')(?=(\\s|/?>|$))',
'i'
)
var length = value.length
var index = 0
var next
var line
var offset
var character
var count
var sequence
var subvalue
var sequences = [
[rawOpenExpression, rawCloseExpression, true],
[commentOpenExpression, commentCloseExpression, true],
[instructionOpenExpression, instructionCloseExpression, true],
[directiveOpenExpression, directiveCloseExpression, true],
[cdataOpenExpression, cdataCloseExpression, true],
[elementOpenExpression, elementCloseExpression, true],
[otherElementOpenExpression, elementCloseExpression, false]
]
// Eat initial spacing.
while (index < length) {
character = value.charAt(index)
if (character !== tab && character !== space) {
break
}
index++
}
if (value.charAt(index) !== lessThan) {
return
}
next = value.indexOf(lineFeed, index + 1)
next = next === -1 ? length : next
line = value.slice(index, next)
offset = -1
count = sequences.length
while (++offset < count) {
if (sequences[offset][0].test(line)) {
sequence = sequences[offset]
break
}
}
if (!sequence) {
return
}
if (silent) {
return sequence[2]
}
index = next
if (!sequence[1].test(line)) {
while (index < length) {
next = value.indexOf(lineFeed, index + 1)
next = next === -1 ? length : next
line = value.slice(index + 1, next)
if (sequence[1].test(line)) {
if (line) {
index = next
}
break
}
index = next
}
}
subvalue = value.slice(0, index)
return eat(subvalue)({type: 'html', value: subvalue})
}

59
node_modules/remark-parse/lib/tokenize/html-inline.js generated vendored Normal file
View File

@@ -0,0 +1,59 @@
'use strict'
var alphabetical = require('is-alphabetical')
var locate = require('../locate/tag')
var tag = require('../util/html').tag
module.exports = inlineHTML
inlineHTML.locator = locate
var lessThan = '<'
var questionMark = '?'
var exclamationMark = '!'
var slash = '/'
var htmlLinkOpenExpression = /^<a /i
var htmlLinkCloseExpression = /^<\/a>/i
function inlineHTML(eat, value, silent) {
var self = this
var length = value.length
var character
var subvalue
if (value.charAt(0) !== lessThan || length < 3) {
return
}
character = value.charAt(1)
if (
!alphabetical(character) &&
character !== questionMark &&
character !== exclamationMark &&
character !== slash
) {
return
}
subvalue = value.match(tag)
if (!subvalue) {
return
}
/* istanbul ignore if - not used yet. */
if (silent) {
return true
}
subvalue = subvalue[0]
if (!self.inLink && htmlLinkOpenExpression.test(subvalue)) {
self.inLink = true
} else if (self.inLink && htmlLinkCloseExpression.test(subvalue)) {
self.inLink = false
}
return eat(subvalue)({type: 'html', value: subvalue})
}

381
node_modules/remark-parse/lib/tokenize/link.js generated vendored Normal file
View File

@@ -0,0 +1,381 @@
'use strict'
var whitespace = require('is-whitespace-character')
var locate = require('../locate/link')
module.exports = link
link.locator = locate
var lineFeed = '\n'
var exclamationMark = '!'
var quotationMark = '"'
var apostrophe = "'"
var leftParenthesis = '('
var rightParenthesis = ')'
var lessThan = '<'
var greaterThan = '>'
var leftSquareBracket = '['
var backslash = '\\'
var rightSquareBracket = ']'
var graveAccent = '`'
function link(eat, value, silent) {
var self = this
var subvalue = ''
var index = 0
var character = value.charAt(0)
var pedantic = self.options.pedantic
var commonmark = self.options.commonmark
var gfm = self.options.gfm
var closed
var count
var opening
var beforeURL
var beforeTitle
var subqueue
var hasMarker
var isImage
var content
var marker
var length
var title
var depth
var queue
var url
var now
var exit
var node
// Detect whether this is an image.
if (character === exclamationMark) {
isImage = true
subvalue = character
character = value.charAt(++index)
}
// Eat the opening.
if (character !== leftSquareBracket) {
return
}
// Exit when this is a link and were already inside a link.
if (!isImage && self.inLink) {
return
}
subvalue += character
queue = ''
index++
// Eat the content.
length = value.length
now = eat.now()
depth = 0
now.column += index
now.offset += index
while (index < length) {
character = value.charAt(index)
subqueue = character
if (character === graveAccent) {
// Inline-code in link content.
count = 1
while (value.charAt(index + 1) === graveAccent) {
subqueue += character
index++
count++
}
if (!opening) {
opening = count
} else if (count >= opening) {
opening = 0
}
} else if (character === backslash) {
// Allow brackets to be escaped.
index++
subqueue += value.charAt(index)
} else if ((!opening || gfm) && character === leftSquareBracket) {
// In GFM mode, brackets in code still count. In all other modes,
// they dont.
depth++
} else if ((!opening || gfm) && character === rightSquareBracket) {
if (depth) {
depth--
} else {
// Allow white-space between content and url in GFM mode.
if (!pedantic) {
while (index < length) {
character = value.charAt(index + 1)
if (!whitespace(character)) {
break
}
subqueue += character
index++
}
}
if (value.charAt(index + 1) !== leftParenthesis) {
return
}
subqueue += leftParenthesis
closed = true
index++
break
}
}
queue += subqueue
subqueue = ''
index++
}
// Eat the content closing.
if (!closed) {
return
}
content = queue
subvalue += queue + subqueue
index++
// Eat white-space.
while (index < length) {
character = value.charAt(index)
if (!whitespace(character)) {
break
}
subvalue += character
index++
}
// Eat the URL.
character = value.charAt(index)
queue = ''
beforeURL = subvalue
if (character === lessThan) {
index++
beforeURL += lessThan
while (index < length) {
character = value.charAt(index)
if (character === greaterThan) {
break
}
if (commonmark && character === lineFeed) {
return
}
queue += character
index++
}
if (value.charAt(index) !== greaterThan) {
return
}
subvalue += lessThan + queue + greaterThan
url = queue
index++
} else {
character = null
subqueue = ''
while (index < length) {
character = value.charAt(index)
if (
subqueue &&
(character === quotationMark ||
character === apostrophe ||
(commonmark && character === leftParenthesis))
) {
break
}
if (whitespace(character)) {
if (!pedantic) {
break
}
subqueue += character
} else {
if (character === leftParenthesis) {
depth++
} else if (character === rightParenthesis) {
if (depth === 0) {
break
}
depth--
}
queue += subqueue
subqueue = ''
if (character === backslash) {
queue += backslash
character = value.charAt(++index)
}
queue += character
}
index++
}
subvalue += queue
url = queue
index = subvalue.length
}
// Eat white-space.
queue = ''
while (index < length) {
character = value.charAt(index)
if (!whitespace(character)) {
break
}
queue += character
index++
}
character = value.charAt(index)
subvalue += queue
// Eat the title.
if (
queue &&
(character === quotationMark ||
character === apostrophe ||
(commonmark && character === leftParenthesis))
) {
index++
subvalue += character
queue = ''
marker = character === leftParenthesis ? rightParenthesis : character
beforeTitle = subvalue
// In commonmark-mode, things are pretty easy: the marker cannot occur
// inside the title. Non-commonmark does, however, support nested
// delimiters.
if (commonmark) {
while (index < length) {
character = value.charAt(index)
if (character === marker) {
break
}
if (character === backslash) {
queue += backslash
character = value.charAt(++index)
}
index++
queue += character
}
character = value.charAt(index)
if (character !== marker) {
return
}
title = queue
subvalue += queue + character
index++
while (index < length) {
character = value.charAt(index)
if (!whitespace(character)) {
break
}
subvalue += character
index++
}
} else {
subqueue = ''
while (index < length) {
character = value.charAt(index)
if (character === marker) {
if (hasMarker) {
queue += marker + subqueue
subqueue = ''
}
hasMarker = true
} else if (!hasMarker) {
queue += character
} else if (character === rightParenthesis) {
subvalue += queue + marker + subqueue
title = queue
break
} else if (whitespace(character)) {
subqueue += character
} else {
queue += marker + subqueue + character
subqueue = ''
hasMarker = false
}
index++
}
}
}
if (value.charAt(index) !== rightParenthesis) {
return
}
/* istanbul ignore if - never used (yet) */
if (silent) {
return true
}
subvalue += rightParenthesis
url = self.decode.raw(self.unescape(url), eat(beforeURL).test().end, {
nonTerminated: false
})
if (title) {
beforeTitle = eat(beforeTitle).test().end
title = self.decode.raw(self.unescape(title), beforeTitle)
}
node = {
type: isImage ? 'image' : 'link',
title: title || null,
url: url
}
if (isImage) {
node.alt = self.decode.raw(self.unescape(content), now) || null
} else {
exit = self.enterLink()
node.children = self.tokenizeInline(content, now)
exit()
}
return eat(subvalue)(node)
}

452
node_modules/remark-parse/lib/tokenize/list.js generated vendored Normal file
View File

@@ -0,0 +1,452 @@
'use strict'
/* eslint-disable max-params */
var trim = require('trim')
var repeat = require('repeat-string')
var decimal = require('is-decimal')
var getIndent = require('../util/get-indentation')
var removeIndent = require('../util/remove-indentation')
var interrupt = require('../util/interrupt')
module.exports = list
var asterisk = '*'
var underscore = '_'
var plusSign = '+'
var dash = '-'
var dot = '.'
var space = ' '
var lineFeed = '\n'
var tab = '\t'
var rightParenthesis = ')'
var lowercaseX = 'x'
var tabSize = 4
var looseListItemExpression = /\n\n(?!\s*$)/
var taskItemExpression = /^\[([ \t]|x|X)][ \t]/
var bulletExpression = /^([ \t]*)([*+-]|\d+[.)])( {1,4}(?! )| |\t|$|(?=\n))([^\n]*)/
var pedanticBulletExpression = /^([ \t]*)([*+-]|\d+[.)])([ \t]+)/
var initialIndentExpression = /^( {1,4}|\t)?/gm
function list(eat, value, silent) {
var self = this
var commonmark = self.options.commonmark
var pedantic = self.options.pedantic
var tokenizers = self.blockTokenizers
var interuptors = self.interruptList
var index = 0
var length = value.length
var start = null
var size = 0
var queue
var ordered
var character
var marker
var nextIndex
var startIndex
var prefixed
var currentMarker
var content
var line
var prevEmpty
var empty
var items
var allLines
var emptyLines
var item
var enterTop
var exitBlockquote
var spread = false
var node
var now
var end
var indented
while (index < length) {
character = value.charAt(index)
if (character === tab) {
size += tabSize - (size % tabSize)
} else if (character === space) {
size++
} else {
break
}
index++
}
if (size >= tabSize) {
return
}
character = value.charAt(index)
if (character === asterisk || character === plusSign || character === dash) {
marker = character
ordered = false
} else {
ordered = true
queue = ''
while (index < length) {
character = value.charAt(index)
if (!decimal(character)) {
break
}
queue += character
index++
}
character = value.charAt(index)
if (
!queue ||
!(character === dot || (commonmark && character === rightParenthesis))
) {
return
}
start = parseInt(queue, 10)
marker = character
}
character = value.charAt(++index)
if (
character !== space &&
character !== tab &&
(pedantic || (character !== lineFeed && character !== ''))
) {
return
}
if (silent) {
return true
}
index = 0
items = []
allLines = []
emptyLines = []
while (index < length) {
nextIndex = value.indexOf(lineFeed, index)
startIndex = index
prefixed = false
indented = false
if (nextIndex === -1) {
nextIndex = length
}
end = index + tabSize
size = 0
while (index < length) {
character = value.charAt(index)
if (character === tab) {
size += tabSize - (size % tabSize)
} else if (character === space) {
size++
} else {
break
}
index++
}
if (size >= tabSize) {
indented = true
}
if (item && size >= item.indent) {
indented = true
}
character = value.charAt(index)
currentMarker = null
if (!indented) {
if (
character === asterisk ||
character === plusSign ||
character === dash
) {
currentMarker = character
index++
size++
} else {
queue = ''
while (index < length) {
character = value.charAt(index)
if (!decimal(character)) {
break
}
queue += character
index++
}
character = value.charAt(index)
index++
if (
queue &&
(character === dot || (commonmark && character === rightParenthesis))
) {
currentMarker = character
size += queue.length + 1
}
}
if (currentMarker) {
character = value.charAt(index)
if (character === tab) {
size += tabSize - (size % tabSize)
index++
} else if (character === space) {
end = index + tabSize
while (index < end) {
if (value.charAt(index) !== space) {
break
}
index++
size++
}
if (index === end && value.charAt(index) === space) {
index -= tabSize - 1
size -= tabSize - 1
}
} else if (character !== lineFeed && character !== '') {
currentMarker = null
}
}
}
if (currentMarker) {
if (!pedantic && marker !== currentMarker) {
break
}
prefixed = true
} else {
if (!commonmark && !indented && value.charAt(startIndex) === space) {
indented = true
} else if (commonmark && item) {
indented = size >= item.indent || size > tabSize
}
prefixed = false
index = startIndex
}
line = value.slice(startIndex, nextIndex)
content = startIndex === index ? line : value.slice(index, nextIndex)
if (
currentMarker === asterisk ||
currentMarker === underscore ||
currentMarker === dash
) {
if (tokenizers.thematicBreak.call(self, eat, line, true)) {
break
}
}
prevEmpty = empty
empty = !prefixed && !trim(content).length
if (indented && item) {
item.value = item.value.concat(emptyLines, line)
allLines = allLines.concat(emptyLines, line)
emptyLines = []
} else if (prefixed) {
if (emptyLines.length !== 0) {
spread = true
item.value.push('')
item.trail = emptyLines.concat()
}
item = {
value: [line],
indent: size,
trail: []
}
items.push(item)
allLines = allLines.concat(emptyLines, line)
emptyLines = []
} else if (empty) {
if (prevEmpty && !commonmark) {
break
}
emptyLines.push(line)
} else {
if (prevEmpty) {
break
}
if (interrupt(interuptors, tokenizers, self, [eat, line, true])) {
break
}
item.value = item.value.concat(emptyLines, line)
allLines = allLines.concat(emptyLines, line)
emptyLines = []
}
index = nextIndex + 1
}
node = eat(allLines.join(lineFeed)).reset({
type: 'list',
ordered: ordered,
start: start,
spread: spread,
children: []
})
enterTop = self.enterList()
exitBlockquote = self.enterBlock()
index = -1
length = items.length
while (++index < length) {
item = items[index].value.join(lineFeed)
now = eat.now()
eat(item)(listItem(self, item, now), node)
item = items[index].trail.join(lineFeed)
if (index !== length - 1) {
item += lineFeed
}
eat(item)
}
enterTop()
exitBlockquote()
return node
}
function listItem(ctx, value, position) {
var offsets = ctx.offset
var fn = ctx.options.pedantic ? pedanticListItem : normalListItem
var checked = null
var task
var indent
value = fn.apply(null, arguments)
if (ctx.options.gfm) {
task = value.match(taskItemExpression)
if (task) {
indent = task[0].length
checked = task[1].toLowerCase() === lowercaseX
offsets[position.line] += indent
value = value.slice(indent)
}
}
return {
type: 'listItem',
spread: looseListItemExpression.test(value),
checked: checked,
children: ctx.tokenizeBlock(value, position)
}
}
// Create a list-item using overly simple mechanics.
function pedanticListItem(ctx, value, position) {
var offsets = ctx.offset
var line = position.line
// Remove the list-items bullet.
value = value.replace(pedanticBulletExpression, replacer)
// The initial line was also matched by the below, so we reset the `line`.
line = position.line
return value.replace(initialIndentExpression, replacer)
// A simple replacer which removed all matches, and adds their length to
// `offset`.
function replacer($0) {
offsets[line] = (offsets[line] || 0) + $0.length
line++
return ''
}
}
// Create a list-item using sane mechanics.
function normalListItem(ctx, value, position) {
var offsets = ctx.offset
var line = position.line
var max
var bullet
var rest
var lines
var trimmedLines
var index
var length
// Remove the list-items bullet.
value = value.replace(bulletExpression, replacer)
lines = value.split(lineFeed)
trimmedLines = removeIndent(value, getIndent(max).indent).split(lineFeed)
// We replaced the initial bullet with something else above, which was used
// to trick `removeIndentation` into removing some more characters when
// possible. However, that could result in the initial line to be stripped
// more than it should be.
trimmedLines[0] = rest
offsets[line] = (offsets[line] || 0) + bullet.length
line++
index = 0
length = lines.length
while (++index < length) {
offsets[line] =
(offsets[line] || 0) + lines[index].length - trimmedLines[index].length
line++
}
return trimmedLines.join(lineFeed)
function replacer($0, $1, $2, $3, $4) {
bullet = $1 + $2 + $3
rest = $4
// Make sure that the first nine numbered list items can indent with an
// extra space. That is, when the bullet did not receive an extra final
// space.
if (Number($2) < 10 && bullet.length % 2 === 1) {
$2 = space + $2
}
max = $1 + repeat(space, $2.length) + $3
return max + rest
}
}

48
node_modules/remark-parse/lib/tokenize/newline.js generated vendored Normal file
View File

@@ -0,0 +1,48 @@
'use strict'
var whitespace = require('is-whitespace-character')
module.exports = newline
var lineFeed = '\n'
function newline(eat, value, silent) {
var character = value.charAt(0)
var length
var subvalue
var queue
var index
if (character !== lineFeed) {
return
}
/* istanbul ignore if - never used (yet) */
if (silent) {
return true
}
index = 1
length = value.length
subvalue = character
queue = ''
while (index < length) {
character = value.charAt(index)
if (!whitespace(character)) {
break
}
queue += character
if (character === lineFeed) {
subvalue += queue
queue = ''
}
index++
}
eat(subvalue)
}

117
node_modules/remark-parse/lib/tokenize/paragraph.js generated vendored Normal file
View File

@@ -0,0 +1,117 @@
'use strict'
var trim = require('trim')
var decimal = require('is-decimal')
var trimTrailingLines = require('trim-trailing-lines')
var interrupt = require('../util/interrupt')
module.exports = paragraph
var tab = '\t'
var lineFeed = '\n'
var space = ' '
var tabSize = 4
// Tokenise paragraph.
function paragraph(eat, value, silent) {
var self = this
var settings = self.options
var commonmark = settings.commonmark
var gfm = settings.gfm
var tokenizers = self.blockTokenizers
var interruptors = self.interruptParagraph
var index = value.indexOf(lineFeed)
var length = value.length
var position
var subvalue
var character
var size
var now
while (index < length) {
// Eat everything if theres no following newline.
if (index === -1) {
index = length
break
}
// Stop if the next character is NEWLINE.
if (value.charAt(index + 1) === lineFeed) {
break
}
// In commonmark-mode, following indented lines are part of the paragraph.
if (commonmark) {
size = 0
position = index + 1
while (position < length) {
character = value.charAt(position)
if (character === tab) {
size = tabSize
break
} else if (character === space) {
size++
} else {
break
}
position++
}
if (size >= tabSize && character !== lineFeed) {
index = value.indexOf(lineFeed, index + 1)
continue
}
}
subvalue = value.slice(index + 1)
// Check if the following code contains a possible block.
if (interrupt(interruptors, tokenizers, self, [eat, subvalue, true])) {
break
}
// Break if the following line starts a list, when already in a list, or
// when in commonmark, or when in gfm mode and the bullet is *not* numeric.
if (
tokenizers.list.call(self, eat, subvalue, true) &&
(self.inList ||
commonmark ||
(gfm && !decimal(trim.left(subvalue).charAt(0))))
) {
break
}
position = index
index = value.indexOf(lineFeed, index + 1)
if (index !== -1 && trim(value.slice(position, index)) === '') {
index = position
break
}
}
subvalue = value.slice(0, index)
if (trim(subvalue) === '') {
eat(subvalue)
return null
}
/* istanbul ignore if - never used (yet) */
if (silent) {
return true
}
now = eat.now()
subvalue = trimTrailingLines(subvalue)
return eat(subvalue)({
type: 'paragraph',
children: self.tokenizeInline(subvalue, now)
})
}

215
node_modules/remark-parse/lib/tokenize/reference.js generated vendored Normal file
View File

@@ -0,0 +1,215 @@
'use strict'
var whitespace = require('is-whitespace-character')
var locate = require('../locate/link')
var normalize = require('../util/normalize')
module.exports = reference
reference.locator = locate
var link = 'link'
var image = 'image'
var footnote = 'footnote'
var shortcut = 'shortcut'
var collapsed = 'collapsed'
var full = 'full'
var space = ' '
var exclamationMark = '!'
var leftSquareBracket = '['
var backslash = '\\'
var rightSquareBracket = ']'
var caret = '^'
function reference(eat, value, silent) {
var self = this
var commonmark = self.options.commonmark
var character = value.charAt(0)
var index = 0
var length = value.length
var subvalue = ''
var intro = ''
var type = link
var referenceType = shortcut
var content
var identifier
var now
var node
var exit
var queue
var bracketed
var depth
// Check whether were eating an image.
if (character === exclamationMark) {
type = image
intro = character
character = value.charAt(++index)
}
if (character !== leftSquareBracket) {
return
}
index++
intro += character
queue = ''
// Check whether were eating a footnote.
if (self.options.footnotes && value.charAt(index) === caret) {
// Exit if `![^` is found, so the `!` will be seen as text after this,
// and well enter this function again when `[^` is found.
if (type === image) {
return
}
intro += caret
index++
type = footnote
}
// Eat the text.
depth = 0
while (index < length) {
character = value.charAt(index)
if (character === leftSquareBracket) {
bracketed = true
depth++
} else if (character === rightSquareBracket) {
if (!depth) {
break
}
depth--
}
if (character === backslash) {
queue += backslash
character = value.charAt(++index)
}
queue += character
index++
}
subvalue = queue
content = queue
character = value.charAt(index)
if (character !== rightSquareBracket) {
return
}
index++
subvalue += character
queue = ''
if (!commonmark) {
// The original markdown syntax definition explicitly allows for whitespace
// between the link text and link label; commonmark departs from this, in
// part to improve support for shortcut reference links
while (index < length) {
character = value.charAt(index)
if (!whitespace(character)) {
break
}
queue += character
index++
}
}
character = value.charAt(index)
// Inline footnotes cannot have an identifier.
if (type !== footnote && character === leftSquareBracket) {
identifier = ''
queue += character
index++
while (index < length) {
character = value.charAt(index)
if (character === leftSquareBracket || character === rightSquareBracket) {
break
}
if (character === backslash) {
identifier += backslash
character = value.charAt(++index)
}
identifier += character
index++
}
character = value.charAt(index)
if (character === rightSquareBracket) {
referenceType = identifier ? full : collapsed
queue += identifier + character
index++
} else {
identifier = ''
}
subvalue += queue
queue = ''
} else {
if (!content) {
return
}
identifier = content
}
// Brackets cannot be inside the identifier.
if (referenceType !== full && bracketed) {
return
}
subvalue = intro + subvalue
if (type === link && self.inLink) {
return null
}
/* istanbul ignore if - never used (yet) */
if (silent) {
return true
}
if (type === footnote && content.indexOf(space) !== -1) {
return eat(subvalue)({
type: footnote,
children: this.tokenizeInline(content, eat.now())
})
}
now = eat.now()
now.column += intro.length
now.offset += intro.length
identifier = referenceType === full ? identifier : content
node = {
type: type + 'Reference',
identifier: normalize(identifier),
label: identifier
}
if (type === link || type === image) {
node.referenceType = referenceType
}
if (type === link) {
exit = self.enterLink()
node.children = self.tokenizeInline(content, now)
exit()
} else if (type === image) {
node.alt = self.decode.raw(self.unescape(content), now) || null
}
return eat(subvalue)(node)
}

85
node_modules/remark-parse/lib/tokenize/strong.js generated vendored Normal file
View File

@@ -0,0 +1,85 @@
'use strict'
var trim = require('trim')
var whitespace = require('is-whitespace-character')
var locate = require('../locate/strong')
module.exports = strong
strong.locator = locate
var backslash = '\\'
var asterisk = '*'
var underscore = '_'
function strong(eat, value, silent) {
var self = this
var index = 0
var character = value.charAt(index)
var now
var pedantic
var marker
var queue
var subvalue
var length
var prev
if (
(character !== asterisk && character !== underscore) ||
value.charAt(++index) !== character
) {
return
}
pedantic = self.options.pedantic
marker = character
subvalue = marker + marker
length = value.length
index++
queue = ''
character = ''
if (pedantic && whitespace(value.charAt(index))) {
return
}
while (index < length) {
prev = character
character = value.charAt(index)
if (
character === marker &&
value.charAt(index + 1) === marker &&
(!pedantic || !whitespace(prev))
) {
character = value.charAt(index + 2)
if (character !== marker) {
if (!trim(queue)) {
return
}
/* istanbul ignore if - never used (yet) */
if (silent) {
return true
}
now = eat.now()
now.column += 2
now.offset += 2
return eat(subvalue + queue + subvalue)({
type: 'strong',
children: self.tokenizeInline(queue, now)
})
}
}
if (!pedantic && character === backslash) {
queue += character
character = value.charAt(++index)
}
queue += character
index++
}
}

259
node_modules/remark-parse/lib/tokenize/table.js generated vendored Normal file
View File

@@ -0,0 +1,259 @@
'use strict'
var whitespace = require('is-whitespace-character')
module.exports = table
var tab = '\t'
var lineFeed = '\n'
var space = ' '
var dash = '-'
var colon = ':'
var backslash = '\\'
var graveAccent = '`'
var verticalBar = '|'
var minColumns = 1
var minRows = 2
var left = 'left'
var center = 'center'
var right = 'right'
function table(eat, value, silent) {
var self = this
var index
var alignments
var alignment
var subvalue
var row
var length
var lines
var queue
var character
var hasDash
var align
var cell
var preamble
var count
var opening
var now
var position
var lineCount
var line
var rows
var table
var lineIndex
var pipeIndex
var first
// Exit when not in gfm-mode.
if (!self.options.gfm) {
return
}
// Get the rows.
// Detecting tables soon is hard, so there are some checks for performance
// here, such as the minimum number of rows, and allowed characters in the
// alignment row.
index = 0
lineCount = 0
length = value.length + 1
lines = []
while (index < length) {
lineIndex = value.indexOf(lineFeed, index)
pipeIndex = value.indexOf(verticalBar, index + 1)
if (lineIndex === -1) {
lineIndex = value.length
}
if (pipeIndex === -1 || pipeIndex > lineIndex) {
if (lineCount < minRows) {
return
}
break
}
lines.push(value.slice(index, lineIndex))
lineCount++
index = lineIndex + 1
}
// Parse the alignment row.
subvalue = lines.join(lineFeed)
alignments = lines.splice(1, 1)[0] || []
index = 0
length = alignments.length
lineCount--
alignment = false
align = []
while (index < length) {
character = alignments.charAt(index)
if (character === verticalBar) {
hasDash = null
if (alignment === false) {
if (first === false) {
return
}
} else {
align.push(alignment)
alignment = false
}
first = false
} else if (character === dash) {
hasDash = true
alignment = alignment || null
} else if (character === colon) {
if (alignment === left) {
alignment = center
} else if (hasDash && alignment === null) {
alignment = right
} else {
alignment = left
}
} else if (!whitespace(character)) {
return
}
index++
}
if (alignment !== false) {
align.push(alignment)
}
// Exit when without enough columns.
if (align.length < minColumns) {
return
}
/* istanbul ignore if - never used (yet) */
if (silent) {
return true
}
// Parse the rows.
position = -1
rows = []
table = eat(subvalue).reset({type: 'table', align: align, children: rows})
while (++position < lineCount) {
line = lines[position]
row = {type: 'tableRow', children: []}
// Eat a newline character when this is not the first row.
if (position) {
eat(lineFeed)
}
// Eat the row.
eat(line).reset(row, table)
length = line.length + 1
index = 0
queue = ''
cell = ''
preamble = true
count = null
opening = null
while (index < length) {
character = line.charAt(index)
if (character === tab || character === space) {
if (cell) {
queue += character
} else {
eat(character)
}
index++
continue
}
if (character === '' || character === verticalBar) {
if (preamble) {
eat(character)
} else {
if (character && opening) {
queue += character
index++
continue
}
if ((cell || character) && !preamble) {
subvalue = cell
if (queue.length > 1) {
if (character) {
subvalue += queue.slice(0, queue.length - 1)
queue = queue.charAt(queue.length - 1)
} else {
subvalue += queue
queue = ''
}
}
now = eat.now()
eat(subvalue)(
{type: 'tableCell', children: self.tokenizeInline(cell, now)},
row
)
}
eat(queue + character)
queue = ''
cell = ''
}
} else {
if (queue) {
cell += queue
queue = ''
}
cell += character
if (character === backslash && index !== length - 2) {
cell += line.charAt(index + 1)
index++
}
if (character === graveAccent) {
count = 1
while (line.charAt(index + 1) === character) {
cell += character
index++
count++
}
if (!opening) {
opening = count
} else if (count >= opening) {
opening = 0
}
}
}
preamble = false
index++
}
// Eat the alignment row.
if (!position) {
eat(lineFeed + alignments)
}
}
return table
}

57
node_modules/remark-parse/lib/tokenize/text.js generated vendored Normal file
View File

@@ -0,0 +1,57 @@
'use strict'
module.exports = text
function text(eat, value, silent) {
var self = this
var methods
var tokenizers
var index
var length
var subvalue
var position
var tokenizer
var name
var min
var now
/* istanbul ignore if - never used (yet) */
if (silent) {
return true
}
methods = self.inlineMethods
length = methods.length
tokenizers = self.inlineTokenizers
index = -1
min = value.length
while (++index < length) {
name = methods[index]
if (name === 'text' || !tokenizers[name]) {
continue
}
tokenizer = tokenizers[name].locator
if (!tokenizer) {
eat.file.fail('Missing locator: `' + name + '`')
}
position = tokenizer.call(self, value, 1)
if (position !== -1 && position < min) {
min = position
}
}
subvalue = value.slice(0, min)
now = eat.now()
self.decode(subvalue, now, handler)
function handler(content, position, source) {
eat(source || content)({type: 'text', value: content})
}
}

View File

@@ -0,0 +1,70 @@
'use strict'
module.exports = thematicBreak
var tab = '\t'
var lineFeed = '\n'
var space = ' '
var asterisk = '*'
var dash = '-'
var underscore = '_'
var maxCount = 3
function thematicBreak(eat, value, silent) {
var index = -1
var length = value.length + 1
var subvalue = ''
var character
var marker
var markerCount
var queue
while (++index < length) {
character = value.charAt(index)
if (character !== tab && character !== space) {
break
}
subvalue += character
}
if (
character !== asterisk &&
character !== dash &&
character !== underscore
) {
return
}
marker = character
subvalue += character
markerCount = 1
queue = ''
while (++index < length) {
character = value.charAt(index)
if (character === marker) {
markerCount++
subvalue += queue + marker
queue = ''
} else if (character === space) {
queue += character
} else if (
markerCount >= maxCount &&
(!character || character === lineFeed)
) {
subvalue += queue
if (silent) {
return true
}
return eat(subvalue)({type: 'thematicBreak'})
} else {
return
}
}
}

153
node_modules/remark-parse/lib/tokenize/url.js generated vendored Normal file
View File

@@ -0,0 +1,153 @@
'use strict'
var decode = require('parse-entities')
var whitespace = require('is-whitespace-character')
var locate = require('../locate/url')
module.exports = url
url.locator = locate
url.notInLink = true
var quotationMark = '"'
var apostrophe = "'"
var leftParenthesis = '('
var rightParenthesis = ')'
var comma = ','
var dot = '.'
var colon = ':'
var semicolon = ';'
var lessThan = '<'
var atSign = '@'
var leftSquareBracket = '['
var rightSquareBracket = ']'
var http = 'http://'
var https = 'https://'
var mailto = 'mailto:'
var protocols = [http, https, mailto]
var protocolsLength = protocols.length
function url(eat, value, silent) {
var self = this
var subvalue
var content
var character
var index
var position
var protocol
var match
var length
var queue
var parenCount
var nextCharacter
var tokenizers
var exit
if (!self.options.gfm) {
return
}
subvalue = ''
index = -1
while (++index < protocolsLength) {
protocol = protocols[index]
match = value.slice(0, protocol.length)
if (match.toLowerCase() === protocol) {
subvalue = match
break
}
}
if (!subvalue) {
return
}
index = subvalue.length
length = value.length
queue = ''
parenCount = 0
while (index < length) {
character = value.charAt(index)
if (whitespace(character) || character === lessThan) {
break
}
if (
character === dot ||
character === comma ||
character === colon ||
character === semicolon ||
character === quotationMark ||
character === apostrophe ||
character === rightParenthesis ||
character === rightSquareBracket
) {
nextCharacter = value.charAt(index + 1)
if (!nextCharacter || whitespace(nextCharacter)) {
break
}
}
if (character === leftParenthesis || character === leftSquareBracket) {
parenCount++
}
if (character === rightParenthesis || character === rightSquareBracket) {
parenCount--
if (parenCount < 0) {
break
}
}
queue += character
index++
}
if (!queue) {
return
}
subvalue += queue
content = subvalue
if (protocol === mailto) {
position = queue.indexOf(atSign)
if (position === -1 || position === length - 1) {
return
}
content = content.substr(mailto.length)
}
/* istanbul ignore if - never used (yet) */
if (silent) {
return true
}
exit = self.enterLink()
// Temporarily remove all tokenizers except text in url.
tokenizers = self.inlineTokenizers
self.inlineTokenizers = {text: tokenizers.text}
content = self.tokenizeInline(content, eat.now())
self.inlineTokenizers = tokenizers
exit()
return eat(subvalue)({
type: 'link',
title: null,
url: decode(subvalue, {nonTerminated: false}),
children: content
})
}

314
node_modules/remark-parse/lib/tokenizer.js generated vendored Normal file
View File

@@ -0,0 +1,314 @@
'use strict'
module.exports = factory
// Construct a tokenizer. This creates both `tokenizeInline` and `tokenizeBlock`.
function factory(type) {
return tokenize
// Tokenizer for a bound `type`.
function tokenize(value, location) {
var self = this
var offset = self.offset
var tokens = []
var methods = self[type + 'Methods']
var tokenizers = self[type + 'Tokenizers']
var line = location.line
var column = location.column
var index
var length
var method
var name
var matched
var valueLength
// Trim white space only lines.
if (!value) {
return tokens
}
// Expose on `eat`.
eat.now = now
eat.file = self.file
// Sync initial offset.
updatePosition('')
// Iterate over `value`, and iterate over all tokenizers. When one eats
// something, re-iterate with the remaining value. If no tokenizer eats,
// something failed (should not happen) and an exception is thrown.
while (value) {
index = -1
length = methods.length
matched = false
while (++index < length) {
name = methods[index]
method = tokenizers[name]
if (
method &&
/* istanbul ignore next */ (!method.onlyAtStart || self.atStart) &&
(!method.notInList || !self.inList) &&
(!method.notInBlock || !self.inBlock) &&
(!method.notInLink || !self.inLink)
) {
valueLength = value.length
method.apply(self, [eat, value])
matched = valueLength !== value.length
if (matched) {
break
}
}
}
/* istanbul ignore if */
if (!matched) {
self.file.fail(new Error('Infinite loop'), eat.now())
}
}
self.eof = now()
return tokens
// Update line, column, and offset based on `value`.
function updatePosition(subvalue) {
var lastIndex = -1
var index = subvalue.indexOf('\n')
while (index !== -1) {
line++
lastIndex = index
index = subvalue.indexOf('\n', index + 1)
}
if (lastIndex === -1) {
column += subvalue.length
} else {
column = subvalue.length - lastIndex
}
if (line in offset) {
if (lastIndex !== -1) {
column += offset[line]
} else if (column <= offset[line]) {
column = offset[line] + 1
}
}
}
// Get offset. Called before the first character is eaten to retrieve the
// ranges offsets.
function getOffset() {
var indentation = []
var pos = line + 1
// Done. Called when the last character is eaten to retrieve the ranges
// offsets.
return function() {
var last = line + 1
while (pos < last) {
indentation.push((offset[pos] || 0) + 1)
pos++
}
return indentation
}
}
// Get the current position.
function now() {
var pos = {line: line, column: column}
pos.offset = self.toOffset(pos)
return pos
}
// Store position information for a node.
function Position(start) {
this.start = start
this.end = now()
}
// Throw when a value is incorrectly eaten. This shouldnt happen but will
// throw on new, incorrect rules.
function validateEat(subvalue) {
/* istanbul ignore if */
if (value.substring(0, subvalue.length) !== subvalue) {
// Capture stack-trace.
self.file.fail(
new Error(
'Incorrectly eaten value: please report this warning on https://git.io/vg5Ft'
),
now()
)
}
}
// Mark position and patch `node.position`.
function position() {
var before = now()
return update
// Add the position to a node.
function update(node, indent) {
var prev = node.position
var start = prev ? prev.start : before
var combined = []
var n = prev && prev.end.line
var l = before.line
node.position = new Position(start)
// If there was already a `position`, this node was merged. Fixing
// `start` wasnt hard, but the indent is different. Especially
// because some information, the indent between `n` and `l` wasnt
// tracked. Luckily, that space is (should be?) empty, so we can
// safely check for it now.
if (prev && indent && prev.indent) {
combined = prev.indent
if (n < l) {
while (++n < l) {
combined.push((offset[n] || 0) + 1)
}
combined.push(before.column)
}
indent = combined.concat(indent)
}
node.position.indent = indent || []
return node
}
}
// Add `node` to `parent`s children or to `tokens`. Performs merges where
// possible.
function add(node, parent) {
var children = parent ? parent.children : tokens
var prev = children[children.length - 1]
var fn
if (
prev &&
node.type === prev.type &&
(node.type === 'text' || node.type === 'blockquote') &&
mergeable(prev) &&
mergeable(node)
) {
fn = node.type === 'text' ? mergeText : mergeBlockquote
node = fn.call(self, prev, node)
}
if (node !== prev) {
children.push(node)
}
if (self.atStart && tokens.length !== 0) {
self.exitStart()
}
return node
}
// Remove `subvalue` from `value`. `subvalue` must be at the start of
// `value`.
function eat(subvalue) {
var indent = getOffset()
var pos = position()
var current = now()
validateEat(subvalue)
apply.reset = reset
reset.test = test
apply.test = test
value = value.substring(subvalue.length)
updatePosition(subvalue)
indent = indent()
return apply
// Add the given arguments, add `position` to the returned node, and
// return the node.
function apply(node, parent) {
return pos(add(pos(node), parent), indent)
}
// Functions just like apply, but resets the content: the line and
// column are reversed, and the eaten value is re-added. This is
// useful for nodes with a single type of content, such as lists and
// tables. See `apply` above for what parameters are expected.
function reset() {
var node = apply.apply(null, arguments)
line = current.line
column = current.column
value = subvalue + value
return node
}
// Test the position, after eating, and reverse to a not-eaten state.
function test() {
var result = pos({})
line = current.line
column = current.column
value = subvalue + value
return result.position
}
}
}
}
// Check whether a node is mergeable with adjacent nodes.
function mergeable(node) {
var start
var end
if (node.type !== 'text' || !node.position) {
return true
}
start = node.position.start
end = node.position.end
// Only merge nodes which occupy the same size as their `value`.
return (
start.line !== end.line || end.column - start.column === node.value.length
)
}
// Merge two text nodes: `node` into `prev`.
function mergeText(prev, node) {
prev.value += node.value
return prev
}
// Merge two blockquotes: `node` into `prev`, unless in CommonMark mode.
function mergeBlockquote(prev, node) {
if (this.options.commonmark) {
return node
}
prev.children = prev.children.concat(node.children)
return prev
}

36
node_modules/remark-parse/lib/unescape.js generated vendored Normal file
View File

@@ -0,0 +1,36 @@
'use strict'
module.exports = factory
var backslash = '\\'
// Factory to de-escape a value, based on a list at `key` in `ctx`.
function factory(ctx, key) {
return unescape
// De-escape a string using the expression at `key` in `ctx`.
function unescape(value) {
var prev = 0
var index = value.indexOf(backslash)
var escape = ctx[key]
var queue = []
var character
while (index !== -1) {
queue.push(value.slice(prev, index))
prev = index + 1
character = value.charAt(prev)
// If the following character is not a valid escape, add the slash.
if (!character || escape.indexOf(character) === -1) {
queue.push(backslash)
}
index = value.indexOf(backslash, prev + 1)
}
queue.push(value.slice(prev))
return queue.join('')
}
}

33
node_modules/remark-parse/lib/util/get-indentation.js generated vendored Normal file
View File

@@ -0,0 +1,33 @@
'use strict'
module.exports = indentation
var tab = '\t'
var space = ' '
var spaceSize = 1
var tabSize = 4
// Gets indentation information for a line.
function indentation(value) {
var index = 0
var indent = 0
var character = value.charAt(index)
var stops = {}
var size
while (character === tab || character === space) {
size = character === tab ? tabSize : spaceSize
indent += size
if (size > 1) {
indent = Math.floor(indent / size) * size
}
stops[indent] = index
character = value.charAt(++index)
}
return {indent: indent, stops: stops}
}

34
node_modules/remark-parse/lib/util/html.js generated vendored Normal file
View File

@@ -0,0 +1,34 @@
'use strict'
var attributeName = '[a-zA-Z_:][a-zA-Z0-9:._-]*'
var unquoted = '[^"\'=<>`\\u0000-\\u0020]+'
var singleQuoted = "'[^']*'"
var doubleQuoted = '"[^"]*"'
var attributeValue =
'(?:' + unquoted + '|' + singleQuoted + '|' + doubleQuoted + ')'
var attribute =
'(?:\\s+' + attributeName + '(?:\\s*=\\s*' + attributeValue + ')?)'
var openTag = '<[A-Za-z][A-Za-z0-9\\-]*' + attribute + '*\\s*\\/?>'
var closeTag = '<\\/[A-Za-z][A-Za-z0-9\\-]*\\s*>'
var comment = '<!---->|<!--(?:-?[^>-])(?:-?[^-])*-->'
var processing = '<[?].*?[?]>'
var declaration = '<![A-Za-z]+\\s+[^>]*>'
var cdata = '<!\\[CDATA\\[[\\s\\S]*?\\]\\]>'
exports.openCloseTag = new RegExp('^(?:' + openTag + '|' + closeTag + ')')
exports.tag = new RegExp(
'^(?:' +
openTag +
'|' +
closeTag +
'|' +
comment +
'|' +
processing +
'|' +
declaration +
'|' +
cdata +
')'
)

35
node_modules/remark-parse/lib/util/interrupt.js generated vendored Normal file
View File

@@ -0,0 +1,35 @@
'use strict'
module.exports = interrupt
function interrupt(interruptors, tokenizers, ctx, params) {
var length = interruptors.length
var index = -1
var interruptor
var config
while (++index < length) {
interruptor = interruptors[index]
config = interruptor[1] || {}
if (
config.pedantic !== undefined &&
config.pedantic !== ctx.options.pedantic
) {
continue
}
if (
config.commonmark !== undefined &&
config.commonmark !== ctx.options.commonmark
) {
continue
}
if (tokenizers[interruptor[0]].apply(ctx, params)) {
return true
}
}
return false
}

11
node_modules/remark-parse/lib/util/normalize.js generated vendored Normal file
View File

@@ -0,0 +1,11 @@
'use strict'
var collapseWhiteSpace = require('collapse-white-space')
module.exports = normalize
// Normalize an identifier. Collapses multiple white space characters into a
// single space, and removes casing.
function normalize(value) {
return collapseWhiteSpace(value).toLowerCase()
}

View File

@@ -0,0 +1,77 @@
'use strict'
var trim = require('trim')
var repeat = require('repeat-string')
var getIndent = require('./get-indentation')
module.exports = indentation
var tab = '\t'
var lineFeed = '\n'
var space = ' '
var exclamationMark = '!'
// Remove the minimum indent from every line in `value`. Supports both tab,
// spaced, and mixed indentation (as well as possible).
function indentation(value, maximum) {
var values = value.split(lineFeed)
var position = values.length + 1
var minIndent = Infinity
var matrix = []
var index
var indentation
var stops
var padding
values.unshift(repeat(space, maximum) + exclamationMark)
while (position--) {
indentation = getIndent(values[position])
matrix[position] = indentation.stops
if (trim(values[position]).length === 0) {
continue
}
if (indentation.indent) {
if (indentation.indent > 0 && indentation.indent < minIndent) {
minIndent = indentation.indent
}
} else {
minIndent = Infinity
break
}
}
if (minIndent !== Infinity) {
position = values.length
while (position--) {
stops = matrix[position]
index = minIndent
while (index && !(index in stops)) {
index--
}
if (
trim(values[position]).length !== 0 &&
minIndent &&
index !== minIndent
) {
padding = tab
} else {
padding = ''
}
values[position] =
padding + values[position].slice(index in stops ? stops[index] + 1 : 0)
}
}
values.shift()
return values.join(lineFeed)
}

View File

@@ -0,0 +1,30 @@
'use strict'
/* eslint-env browser */
var el
var semicolon = 59 // ';'
module.exports = decodeEntity
function decodeEntity(characters) {
var entity = '&' + characters + ';'
var char
el = el || document.createElement('i')
el.innerHTML = entity
char = el.textContent
// Some entities do not require the closing semicolon (`&not` - for instance),
// which leads to situations where parsing the assumed entity of &notit; will
// result in the string `¬it;`. When we encounter a trailing semicolon after
// parsing and the entity to decode was not a semicolon (`&semi;`), we can
// assume that the matching was incomplete
if (char.charCodeAt(char.length - 1) === semicolon && characters !== 'semi') {
return false
}
// If the decoded string is equal to the input, the entity was not valid
return char === entity ? false : char
}

View File

@@ -0,0 +1,13 @@
'use strict'
var characterEntities = require('character-entities')
module.exports = decodeEntity
var own = {}.hasOwnProperty
function decodeEntity(characters) {
return own.call(characterEntities, characters)
? characterEntities[characters]
: false
}

View File

@@ -0,0 +1,450 @@
'use strict'
var legacy = require('character-entities-legacy')
var invalid = require('character-reference-invalid')
var decimal = require('is-decimal')
var hexadecimal = require('is-hexadecimal')
var alphanumerical = require('is-alphanumerical')
var decodeEntity = require('./decode-entity')
module.exports = parseEntities
var own = {}.hasOwnProperty
var fromCharCode = String.fromCharCode
var noop = Function.prototype
// Default settings.
var defaults = {
warning: null,
reference: null,
text: null,
warningContext: null,
referenceContext: null,
textContext: null,
position: {},
additional: null,
attribute: false,
nonTerminated: true
}
// Characters.
var tab = 9 // '\t'
var lineFeed = 10 // '\n'
var formFeed = 12 // '\f'
var space = 32 // ' '
var ampersand = 38 // '&'
var semicolon = 59 // ';'
var lessThan = 60 // '<'
var equalsTo = 61 // '='
var numberSign = 35 // '#'
var uppercaseX = 88 // 'X'
var lowercaseX = 120 // 'x'
var replacementCharacter = 65533 // '<27>'
// Reference types.
var name = 'named'
var hexa = 'hexadecimal'
var deci = 'decimal'
// Map of bases.
var bases = {}
bases[hexa] = 16
bases[deci] = 10
// Map of types to tests.
// Each type of character reference accepts different characters.
// This test is used to detect whether a reference has ended (as the semicolon
// is not strictly needed).
var tests = {}
tests[name] = alphanumerical
tests[deci] = decimal
tests[hexa] = hexadecimal
// Warning types.
var namedNotTerminated = 1
var numericNotTerminated = 2
var namedEmpty = 3
var numericEmpty = 4
var namedUnknown = 5
var numericDisallowed = 6
var numericProhibited = 7
// Warning messages.
var messages = {}
messages[namedNotTerminated] =
'Named character references must be terminated by a semicolon'
messages[numericNotTerminated] =
'Numeric character references must be terminated by a semicolon'
messages[namedEmpty] = 'Named character references cannot be empty'
messages[numericEmpty] = 'Numeric character references cannot be empty'
messages[namedUnknown] = 'Named character references must be known'
messages[numericDisallowed] =
'Numeric character references cannot be disallowed'
messages[numericProhibited] =
'Numeric character references cannot be outside the permissible Unicode range'
// Wrap to ensure clean parameters are given to `parse`.
function parseEntities(value, options) {
var settings = {}
var option
var key
if (!options) {
options = {}
}
for (key in defaults) {
option = options[key]
settings[key] =
option === null || option === undefined ? defaults[key] : option
}
if (settings.position.indent || settings.position.start) {
settings.indent = settings.position.indent || []
settings.position = settings.position.start
}
return parse(value, settings)
}
// Parse entities.
// eslint-disable-next-line complexity
function parse(value, settings) {
var additional = settings.additional
var nonTerminated = settings.nonTerminated
var handleText = settings.text
var handleReference = settings.reference
var handleWarning = settings.warning
var textContext = settings.textContext
var referenceContext = settings.referenceContext
var warningContext = settings.warningContext
var pos = settings.position
var indent = settings.indent || []
var length = value.length
var index = 0
var lines = -1
var column = pos.column || 1
var line = pos.line || 1
var queue = ''
var result = []
var entityCharacters
var namedEntity
var terminated
var characters
var character
var reference
var following
var warning
var reason
var output
var entity
var begin
var start
var type
var test
var prev
var next
var diff
var end
if (typeof additional === 'string') {
additional = additional.charCodeAt(0)
}
// Cache the current point.
prev = now()
// Wrap `handleWarning`.
warning = handleWarning ? parseError : noop
// Ensure the algorithm walks over the first character and the end (inclusive).
index--
length++
while (++index < length) {
// If the previous character was a newline.
if (character === lineFeed) {
column = indent[lines] || 1
}
character = value.charCodeAt(index)
if (character === ampersand) {
following = value.charCodeAt(index + 1)
// The behaviour depends on the identity of the next character.
if (
following === tab ||
following === lineFeed ||
following === formFeed ||
following === space ||
following === ampersand ||
following === lessThan ||
following !== following ||
(additional && following === additional)
) {
// Not a character reference.
// No characters are consumed, and nothing is returned.
// This is not an error, either.
queue += fromCharCode(character)
column++
continue
}
start = index + 1
begin = start
end = start
if (following === numberSign) {
// Numerical entity.
end = ++begin
// The behaviour further depends on the next character.
following = value.charCodeAt(end)
if (following === uppercaseX || following === lowercaseX) {
// ASCII hex digits.
type = hexa
end = ++begin
} else {
// ASCII digits.
type = deci
}
} else {
// Named entity.
type = name
}
entityCharacters = ''
entity = ''
characters = ''
test = tests[type]
end--
while (++end < length) {
following = value.charCodeAt(end)
if (!test(following)) {
break
}
characters += fromCharCode(following)
// Check if we can match a legacy named reference.
// If so, we cache that as the last viable named reference.
// This ensures we do not need to walk backwards later.
if (type === name && own.call(legacy, characters)) {
entityCharacters = characters
entity = legacy[characters]
}
}
terminated = value.charCodeAt(end) === semicolon
if (terminated) {
end++
namedEntity = type === name ? decodeEntity(characters) : false
if (namedEntity) {
entityCharacters = characters
entity = namedEntity
}
}
diff = 1 + end - start
if (!terminated && !nonTerminated) {
// Empty.
} else if (!characters) {
// An empty (possible) entity is valid, unless its numeric (thus an
// ampersand followed by an octothorp).
if (type !== name) {
warning(numericEmpty, diff)
}
} else if (type === name) {
// An ampersand followed by anything unknown, and not terminated, is
// invalid.
if (terminated && !entity) {
warning(namedUnknown, 1)
} else {
// If theres something after an entity name which is not known, cap
// the reference.
if (entityCharacters !== characters) {
end = begin + entityCharacters.length
diff = 1 + end - begin
terminated = false
}
// If the reference is not terminated, warn.
if (!terminated) {
reason = entityCharacters ? namedNotTerminated : namedEmpty
if (settings.attribute) {
following = value.charCodeAt(end)
if (following === equalsTo) {
warning(reason, diff)
entity = null
} else if (alphanumerical(following)) {
entity = null
} else {
warning(reason, diff)
}
} else {
warning(reason, diff)
}
}
}
reference = entity
} else {
if (!terminated) {
// All non-terminated numeric entities are not rendered, and trigger a
// warning.
warning(numericNotTerminated, diff)
}
// When terminated and number, parse as either hexadecimal or decimal.
reference = parseInt(characters, bases[type])
// Trigger a warning when the parsed number is prohibited, and replace
// with replacement character.
if (prohibited(reference)) {
warning(numericProhibited, diff)
reference = fromCharCode(replacementCharacter)
} else if (reference in invalid) {
// Trigger a warning when the parsed number is disallowed, and replace
// by an alternative.
warning(numericDisallowed, diff)
reference = invalid[reference]
} else {
// Parse the number.
output = ''
// Trigger a warning when the parsed number should not be used.
if (disallowed(reference)) {
warning(numericDisallowed, diff)
}
// Stringify the number.
if (reference > 0xffff) {
reference -= 0x10000
output += fromCharCode((reference >>> (10 & 0x3ff)) | 0xd800)
reference = 0xdc00 | (reference & 0x3ff)
}
reference = output + fromCharCode(reference)
}
}
// Found it!
// First eat the queued characters as normal text, then eat an entity.
if (reference) {
flush()
prev = now()
index = end - 1
column += end - start + 1
result.push(reference)
next = now()
next.offset++
if (handleReference) {
handleReference.call(
referenceContext,
reference,
{start: prev, end: next},
value.slice(start - 1, end)
)
}
prev = next
} else {
// If we could not find a reference, queue the checked characters (as
// normal characters), and move the pointer to their end.
// This is possible because we can be certain neither newlines nor
// ampersands are included.
characters = value.slice(start - 1, end)
queue += characters
column += characters.length
index = end - 1
}
} else {
// Handle anything other than an ampersand, including newlines and EOF.
if (
character === 10 // Line feed
) {
line++
lines++
column = 0
}
if (character === character) {
queue += fromCharCode(character)
column++
} else {
flush()
}
}
}
// Return the reduced nodes, and any possible warnings.
return result.join('')
// Get current position.
function now() {
return {
line: line,
column: column,
offset: index + (pos.offset || 0)
}
}
// “Throw” a parse-error: a warning.
function parseError(code, offset) {
var position = now()
position.column += offset
position.offset += offset
handleWarning.call(warningContext, messages[code], position, code)
}
// Flush `queue` (normal text).
// Macro invoked before each entity and at the end of `value`.
// Does nothing when `queue` is empty.
function flush() {
if (queue) {
result.push(queue)
if (handleText) {
handleText.call(textContext, queue, {start: prev, end: now()})
}
queue = ''
}
}
}
// Check if `character` is outside the permissible unicode range.
function prohibited(code) {
return (code >= 0xd800 && code <= 0xdfff) || code > 0x10ffff
}
// Check if `character` is disallowed.
function disallowed(code) {
return (
(code >= 0x0001 && code <= 0x0008) ||
code === 0x000b ||
(code >= 0x000d && code <= 0x001f) ||
(code >= 0x007f && code <= 0x009f) ||
(code >= 0xfdd0 && code <= 0xfdef) ||
(code & 0xffff) === 0xffff ||
(code & 0xffff) === 0xfffe
)
}

View File

@@ -0,0 +1,22 @@
(The MIT License)
Copyright (c) 2015 Titus Wormer <mailto:tituswormer@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
'Software'), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -0,0 +1,91 @@
{
"name": "parse-entities",
"version": "1.2.2",
"description": "Parse HTML character references: fast, spec-compliant, positional information",
"license": "MIT",
"keywords": [
"parse",
"html",
"character",
"reference",
"entity",
"entities"
],
"repository": "wooorm/parse-entities",
"bugs": "https://github.com/wooorm/parse-entities/issues",
"author": "Titus Wormer <tituswormer@gmail.com> (https://wooorm.com)",
"contributors": [
"Titus Wormer <tituswormer@gmail.com> (https://wooorm.com)"
],
"browser": {
"./decode-entity.js": "./decode-entity.browser.js"
},
"react-native": {
"./decode-entity.js": "./decode-entity.js"
},
"files": [
"index.js",
"decode-entity.js",
"decode-entity.browser.js"
],
"dependencies": {
"character-entities": "^1.0.0",
"character-entities-legacy": "^1.0.0",
"character-reference-invalid": "^1.0.0",
"is-alphanumerical": "^1.0.0",
"is-decimal": "^1.0.0",
"is-hexadecimal": "^1.0.0"
},
"devDependencies": {
"browserify": "^16.0.0",
"nyc": "^14.0.0",
"prettier": "^1.12.1",
"remark-cli": "^6.0.0",
"remark-preset-wooorm": "^4.0.0",
"tape": "^4.2.0",
"tape-run": "^6.0.0",
"tinyify": "^2.4.3",
"xo": "^0.24.0"
},
"scripts": {
"format": "remark . -qfo && prettier --write \"**/*.js\" && xo --fix",
"build-bundle": "browserify . -s parseEntities > parse-entities.js",
"build-mangle": "browserify . -s parseEntities -p tinyify > parse-entities.min.js",
"build": "npm run build-bundle && npm run build-mangle",
"test-api": "node test",
"test-coverage": "nyc --reporter lcov tape test.js",
"test-browser": "browserify test.js | tape-run",
"test": "npm run format && npm run build && npm run test-coverage && npm run test-browser"
},
"nyc": {
"check-coverage": true,
"lines": 100,
"functions": 100,
"branches": 100
},
"prettier": {
"tabWidth": 2,
"useTabs": false,
"singleQuote": true,
"bracketSpacing": false,
"semi": false,
"trailingComma": "none"
},
"xo": {
"prettier": true,
"esnext": false,
"rules": {
"no-self-compare": "off",
"guard-for-in": "off",
"max-depth": "off"
},
"ignores": [
"parse-entities.js"
]
},
"remarkConfig": {
"plugins": [
"preset-wooorm"
]
}
}

View File

@@ -0,0 +1,217 @@
# parse-entities
[![Build][build-badge]][build]
[![Coverage][coverage-badge]][coverage]
[![Downloads][downloads-badge]][downloads]
[![Size][size-badge]][size]
Parse HTML character references: fast, spec-compliant, positional
information.
## Installation
[npm][]:
```bash
npm install parse-entities
```
## Usage
```js
var decode = require('parse-entities')
decode('alpha &amp bravo')
// => alpha & bravo
decode('charlie &copycat; delta')
// => charlie ©cat; delta
decode('echo &copy; foxtrot &#8800; golf &#x1D306; hotel')
// => echo © foxtrot ≠ golf 𝌆 hotel
```
## API
## `parseEntities(value[, options])`
##### `options`
###### `options.additional`
Additional character to accept (`string?`, default: `''`).
This allows other characters, without error, when following an ampersand.
###### `options.attribute`
Whether to parse `value` as an attribute value (`boolean?`, default:
`false`).
###### `options.nonTerminated`
Whether to allow non-terminated entities (`boolean`, default: `true`).
For example, `&copycat` for `©cat`. This behaviour is spec-compliant but
can lead to unexpected results.
###### `options.warning`
Error handler ([`Function?`][warning]).
###### `options.text`
Text handler ([`Function?`][text]).
###### `options.reference`
Reference handler ([`Function?`][reference]).
###### `options.warningContext`
Context used when invoking `warning` (`'*'`, optional).
###### `options.textContext`
Context used when invoking `text` (`'*'`, optional).
###### `options.referenceContext`
Context used when invoking `reference` (`'*'`, optional)
###### `options.position`
Starting `position` of `value` (`Location` or `Position`, optional). Useful
when dealing with values nested in some sort of syntax tree. The default is:
```js
{
start: {line: 1, column: 1, offset: 0},
indent: []
}
```
##### Returns
`string` — Decoded `value`.
### `function warning(reason, position, code)`
Error handler.
##### Context
`this` refers to `warningContext` when given to `parseEntities`.
##### Parameters
###### `reason`
Human-readable reason for triggering a parse error (`string`).
###### `position`
Place at which the parse error occurred (`Position`).
###### `code`
Identifier of reason for triggering a parse error (`number`).
The following codes are used:
| Code | Example | Note |
| ---- | ------------------ | --------------------------------------------- |
| `1` | `foo &amp bar` | Missing semicolon (named) |
| `2` | `foo &#123 bar` | Missing semicolon (numeric) |
| `3` | `Foo &bar baz` | Ampersand did not start a reference |
| `4` | `Foo &#` | Empty reference |
| `5` | `Foo &bar; baz` | Unknown entity |
| `6` | `Foo &#128; baz` | [Disallowed reference][invalid] |
| `7` | `Foo &#xD800; baz` | Prohibited: outside permissible unicode range |
### `function text(value, location)`
Text handler.
##### Context
`this` refers to `textContext` when given to `parseEntities`.
##### Parameters
###### `value`
String of content (`string`).
###### `location`
Location at which `value` starts and ends (`Location`).
### `function reference(value, location, source)`
Character reference handler.
##### Context
`this` refers to `referenceContext` when given to `parseEntities`.
##### Parameters
###### `value`
Encoded character reference (`string`).
###### `location`
Location at which `value` starts and ends (`Location`).
###### `source`
Source of character reference (`Location`).
## Related
* [`stringify-entities`](https://github.com/wooorm/stringify-entities)
— Encode HTML character references
* [`character-entities`](https://github.com/wooorm/character-entities)
— Info on character entities
* [`character-entities-html4`](https://github.com/wooorm/character-entities-html4)
— Info on HTML4 character entities
* [`character-entities-legacy`](https://github.com/wooorm/character-entities-legacy)
— Info on legacy character entities
* [`character-reference-invalid`](https://github.com/wooorm/character-reference-invalid)
— Info on invalid numeric character references
## License
[MIT][license] © [Titus Wormer][author]
<!-- Definitions -->
[build-badge]: https://img.shields.io/travis/wooorm/parse-entities.svg
[build]: https://travis-ci.org/wooorm/parse-entities
[coverage-badge]: https://img.shields.io/codecov/c/github/wooorm/parse-entities.svg
[coverage]: https://codecov.io/github/wooorm/parse-entities
[downloads-badge]: https://img.shields.io/npm/dm/parse-entities.svg
[downloads]: https://www.npmjs.com/package/parse-entities
[size-badge]: https://img.shields.io/bundlephobia/minzip/parse-entities.svg
[size]: https://bundlephobia.com/result?p=parse-entities
[npm]: https://docs.npmjs.com/cli/install
[license]: license
[author]: https://wooorm.com
[warning]: #function-warningreason-position-code
[text]: #function-textvalue-location
[reference]: #function-referencevalue-location-source
[invalid]: https://github.com/wooorm/character-reference-invalid

55
node_modules/remark-parse/package.json generated vendored Normal file
View File

@@ -0,0 +1,55 @@
{
"name": "remark-parse",
"version": "6.0.3",
"description": "Markdown parser for remark",
"license": "MIT",
"keywords": [
"markdown",
"abstract",
"syntax",
"tree",
"ast",
"parse"
],
"homepage": "https://remark.js.org",
"repository": "https://github.com/remarkjs/remark/tree/master/packages/remark-parse",
"bugs": "https://github.com/remarkjs/remark/issues",
"author": "Titus Wormer <tituswormer@gmail.com> (https://wooorm.com)",
"contributors": [
"Titus Wormer <tituswormer@gmail.com> (https://wooorm.com)",
"Eugene Sharygin <eush77@gmail.com>",
"Junyoung Choi <fluke8259@gmail.com>",
"Elijah Hamovitz <elijahhamovitz@gmail.com>",
"Ika <ikatyang@gmail.com>"
],
"files": [
"index.js",
"lib"
],
"dependencies": {
"collapse-white-space": "^1.0.2",
"is-alphabetical": "^1.0.0",
"is-decimal": "^1.0.0",
"is-whitespace-character": "^1.0.0",
"is-word-character": "^1.0.0",
"markdown-escapes": "^1.0.0",
"parse-entities": "^1.1.0",
"repeat-string": "^1.5.4",
"state-toggle": "^1.0.0",
"trim": "0.0.1",
"trim-trailing-lines": "^1.0.0",
"unherit": "^1.0.4",
"unist-util-remove-position": "^1.0.0",
"vfile-location": "^2.0.0",
"xtend": "^4.0.1"
},
"devDependencies": {
"tape": "^4.9.1",
"unified": "^7.0.0",
"vfile": "^3.0.0"
},
"scripts": {
"test": "tape test.js"
},
"xo": false
}

506
node_modules/remark-parse/readme.md generated vendored Normal file
View File

@@ -0,0 +1,506 @@
# remark-parse [![Travis][build-badge]][build-status] [![Coverage][coverage-badge]][coverage-status] [![Downloads][dl-badge]][dl] [![Size][size-badge]][size] [![Chat][chat-badge]][chat]
[Parser][] for [**unified**][unified].
Parses markdown to [**mdast**][mdast] syntax trees.
Used in the [**remark** processor][processor] but can be used on its own as
well.
Can be [extended][extend] to change how markdown is parsed.
* * *
**Announcing the unified collective! 🎉
[Read more about it on Medium »][announcement]**
## Sponsors
<!--lint ignore no-html maximum-line-length-->
<table>
<tr valign="top">
<td width="20%" align="center">
<a href="https://zeit.co"><img src="https://avatars1.githubusercontent.com/u/14985020?s=400&v=4"></a>
<br><br>🥇
<a href="https://zeit.co">ZEIT</a>
</td>
<td width="20%" align="center">
<a href="https://www.gatsbyjs.org"><img src="https://avatars1.githubusercontent.com/u/12551863?s=400&v=4"></a>
<br><br>🥇
<a href="https://www.gatsbyjs.org">Gatsby</a></td>
<td width="20%" align="center">
<a href="https://compositor.io"><img src="https://avatars1.githubusercontent.com/u/19245838?s=400&v=4"></a>
<br><br>🥉
<a href="https://compositor.io">Compositor</a>
</td>
<td width="20%" align="center">
<a href="https://www.holloway.com"><img src="https://avatars1.githubusercontent.com/u/35904294?s=400&v=4"></a>
<br><br>
<a href="https://www.holloway.com">Holloway</a>
</td>
<td width="20%" align="center">
<br><br><br><br>
<a href="https://opencollective.com/unified"><strong>You?</strong>
</td>
</tr>
</table>
## Installation
[npm][]:
```sh
npm install remark-parse
```
## Usage
```js
var unified = require('unified')
var createStream = require('unified-stream')
var markdown = require('remark-parse')
var html = require('remark-html')
var processor = unified()
.use(markdown, {commonmark: true})
.use(html)
process.stdin.pipe(createStream(processor)).pipe(process.stdout)
```
## Table of Contents
* [API](#api)
* [processor.use(parse\[, options\])](#processoruseparse-options)
* [parse.Parser](#parseparser)
* [Extending the Parser](#extending-the-parser)
* [Parser#blockTokenizers](#parserblocktokenizers)
* [Parser#blockMethods](#parserblockmethods)
* [Parser#inlineTokenizers](#parserinlinetokenizers)
* [Parser#inlineMethods](#parserinlinemethods)
* [function tokenizer(eat, value, silent)](#function-tokenizereat-value-silent)
* [tokenizer.locator(value, fromIndex)](#tokenizerlocatorvalue-fromindex)
* [eat(subvalue)](#eatsubvalue)
* [add(node\[, parent\])](#addnode-parent)
* [add.test()](#addtest)
* [add.reset(node\[, parent\])](#addresetnode-parent)
* [Turning off a tokenizer](#turning-off-a-tokenizer)
* [License](#license)
## API
### `processor.use(parse[, options])`
Configure the `processor` to read markdown as input and process
[**mdast**][mdast] syntax trees.
##### `options`
Options are passed directly, or passed later through [`processor.data()`][data].
##### `options.gfm`
```md
hello ~~hi~~ world
```
GFM mode (`boolean`, default: `true`) turns on:
* [Fenced code blocks](https://help.github.com/articles/github-flavored-markdown/#fenced-code-blocks)
* [Autolinking of URLs](https://help.github.com/articles/github-flavored-markdown/#url-autolinking)
* [Deletions (strikethrough)](https://help.github.com/articles/github-flavored-markdown/#strikethrough)
* [Task lists](https://help.github.com/articles/writing-on-github/#task-lists)
* [Tables](https://help.github.com/articles/github-flavored-markdown/#tables)
##### `options.commonmark`
```md
This is a paragraph
and this is also part of the preceding paragraph.
```
CommonMark mode (`boolean`, default: `false`) allows:
* Empty lines to split blockquotes
* Parentheses (`(` and `)`) around for link and image titles
* Any escaped [ASCII-punctuation][escapes] character
* Closing parenthesis (`)`) as an ordered list marker
* URL definitions (and footnotes, when enabled) in blockquotes
CommonMark mode disallows:
* Code directly following a paragraph
* ATX-headings (`# Hash headings`) without spacing after opening hashes
or and before closing hashes
* Setext headings (`Underline headings\n---`) when following a paragraph
* Newlines in link and image titles
* White space in link and image URLs in auto-links (links in brackets,
`<` and `>`)
* Lazy blockquote continuation, lines not preceded by a closing angle
bracket (`>`), for lists, code, and thematicBreak
##### `options.footnotes`
```md
Something something[^or something?].
And something else[^1].
[^1]: This reference footnote contains a paragraph...
* ...and a list
```
Footnotes mode (`boolean`, default: `false`) enables reference footnotes and
inline footnotes. Both are wrapped in square brackets and preceded by a caret
(`^`), and can be referenced from inside other footnotes.
##### `options.blocks`
```md
<block>foo
</block>
```
Blocks (`Array.<string>`, default: list of [block HTML elements][blocks])
exposes lets users define block-level HTML elements.
##### `options.pedantic`
```md
Check out some_file_name.txt
```
Pedantic mode (`boolean`, default: `false`) turns on:
* Emphasis (`_alpha_`) and importance (`__bravo__`) with underscores
in words
* Unordered lists with different markers (`*`, `-`, `+`)
* If `commonmark` is also turned on, ordered lists with different
markers (`.`, `)`)
* And pedantic mode removes less spaces in list-items (at most four,
instead of the whole indent)
### `parse.Parser`
Access to the [parser][], if you need it.
## Extending the Parser
Most often, using transformers to manipulate a syntax tree produces
the desired output. Sometimes, mainly when introducing new syntactic
entities with a certain level of precedence, interfacing with the parser
is necessary.
If the `remark-parse` plugin is used, it adds a [`Parser`][parser] constructor
to the `processor`. Other plugins can add tokenizers to the parsers prototype
to change how markdown is parsed.
The below plugin adds a [tokenizer][] for at-mentions.
```js
module.exports = mentions
function mentions() {
var Parser = this.Parser
var tokenizers = Parser.prototype.inlineTokenizers
var methods = Parser.prototype.inlineMethods
// Add an inline tokenizer (defined in the following example).
tokenizers.mention = tokenizeMention
// Run it just before `text`.
methods.splice(methods.indexOf('text'), 0, 'mention')
}
```
### `Parser#blockTokenizers`
An object mapping tokenizer names to [tokenizer][]s. These
tokenizers (for example: `fencedCode`, `table`, and `paragraph`) eat
from the start of a value to a line ending.
See `#blockMethods` below for a list of methods that are included by
default.
### `Parser#blockMethods`
Array of `blockTokenizers` names (`string`) specifying the order in
which they run.
<!--methods-block start-->
* `newline`
* `indentedCode`
* `fencedCode`
* `blockquote`
* `atxHeading`
* `thematicBreak`
* `list`
* `setextHeading`
* `html`
* `footnote`
* `definition`
* `table`
* `paragraph`
<!--methods-block end-->
### `Parser#inlineTokenizers`
An object mapping tokenizer names to [tokenizer][]s. These tokenizers
(for example: `url`, `reference`, and `emphasis`) eat from the start
of a value. To increase performance, they depend on [locator][]s.
See `#inlineMethods` below for a list of methods that are included by
default.
### `Parser#inlineMethods`
Array of `inlineTokenizers` names (`string`) specifying the order in
which they run.
<!--methods-inline start-->
* `escape`
* `autoLink`
* `url`
* `html`
* `link`
* `reference`
* `strong`
* `emphasis`
* `deletion`
* `code`
* `break`
* `text`
<!--methods-inline end-->
### `function tokenizer(eat, value, silent)`
```js
tokenizeMention.notInLink = true
tokenizeMention.locator = locateMention
function tokenizeMention(eat, value, silent) {
var match = /^@(\w+)/.exec(value)
if (match) {
if (silent) {
return true
}
return eat(match[0])({
type: 'link',
url: 'https://social-network/' + match[1],
children: [{type: 'text', value: match[0]}]
})
}
}
```
The parser knows two types of tokenizers: block level and inline level.
Block level tokenizers are the same as inline level tokenizers, with
the exception that the latter must have a [locator][].
Tokenizers _test_ whether a document starts with a certain syntactic
entity. In _silent_ mode, they return whether that test passes.
In _normal_ mode, they consume that token, a process which is called
“eating”. Locators enable tokenizers to function faster by providing
information on where the next entity may occur.
###### Signatures
* `Node? = tokenizer(eat, value)`
* `boolean? = tokenizer(eat, value, silent)`
###### Parameters
* `eat` ([`Function`][eat]) — Eat, when applicable, an entity
* `value` (`string`) — Value which may start an entity
* `silent` (`boolean`, optional) — Whether to detect or consume
###### Properties
* `locator` ([`Function`][locator])
— Required for inline tokenizers
* `onlyAtStart` (`boolean`)
— Whether nodes can only be found at the beginning of the document
* `notInBlock` (`boolean`)
— Whether nodes cannot be in blockquotes, lists, or footnote
definitions
* `notInList` (`boolean`)
— Whether nodes cannot be in lists
* `notInLink` (`boolean`)
— Whether nodes cannot be in links
###### Returns
* In _silent_ mode, whether a node can be found at the start of `value`
* In _normal_ mode, a node if it can be found at the start of `value`
### `tokenizer.locator(value, fromIndex)`
```js
function locateMention(value, fromIndex) {
return value.indexOf('@', fromIndex)
}
```
Locators are required for inline tokenization to keep the process
performant. Locators enable inline tokenizers to function faster by
providing information on the where the next entity occurs. Locators
may be wrong, its OK if there actually isnt a node to be found at
the index they return, but they must skip any nodes.
###### Parameters
* `value` (`string`) — Value which may contain an entity
* `fromIndex` (`number`) — Position to start searching at
###### Returns
Index at which an entity may start, and `-1` otherwise.
### `eat(subvalue)`
```js
var add = eat('foo')
```
Eat `subvalue`, which is a string at the start of the
[tokenize][tokenizer]d `value` (its tracked to ensure the correct
value is eaten).
###### Parameters
* `subvalue` (`string`) - Value to eat.
###### Returns
[`add`][add].
### `add(node[, parent])`
```js
var add = eat('foo')
add({type: 'text', value: 'foo'})
```
Add [positional information][location] to `node` and add it to `parent`.
###### Parameters
* `node` ([`Node`][node]) - Node to patch position on and insert
* `parent` ([`Node`][node], optional) - Place to add `node` to in
the syntax tree. Defaults to the currently processed node
###### Returns
The given `node`.
### `add.test()`
Get the [positional information][location] which would be patched on
`node` by `add`.
###### Returns
[`Location`][location].
### `add.reset(node[, parent])`
`add`, but resets the internal location. Useful for example in
lists, where the same content is first eaten for a list, and later
for list items
###### Parameters
* `node` ([`Node`][node]) - Node to patch position on and insert
* `parent` ([`Node`][node], optional) - Place to add `node` to in
the syntax tree. Defaults to the currently processed node
###### Returns
The given `node`.
### Turning off a tokenizer
In rare situations, you may want to turn off a tokenizer to avoid parsing
that syntactic feature. This can be done by replacing the tokenizer from
your Parsers `blockTokenizers` (or `blockMethods`) or `inlineTokenizers`
(or `inlineMethods`).
The following example turns off indented code blocks:
```js
remarkParse.Parser.prototype.blockTokenizers.indentedCode = indentedCode
function indentedCode() {
return true
}
```
Preferably, just use [this plugin](https://github.com/zestedesavoir/zmarkdown/tree/master/packages/remark-disable-tokenizers).
## License
[MIT][license] © [Titus Wormer][author]
<!-- Definitions -->
[build-badge]: https://img.shields.io/travis/remarkjs/remark/master.svg
[build-status]: https://travis-ci.org/remarkjs/remark
[coverage-badge]: https://img.shields.io/codecov/c/github/remarkjs/remark.svg
[coverage-status]: https://codecov.io/github/remarkjs/remark
[dl-badge]: https://img.shields.io/npm/dm/remark-parse.svg
[dl]: https://www.npmjs.com/package/remark-parse
[size-badge]: https://img.shields.io/bundlephobia/minzip/remark-parse.svg
[size]: https://bundlephobia.com/result?p=remark-parse
[chat-badge]: https://img.shields.io/badge/join%20the%20community-on%20spectrum-7b16ff.svg
[chat]: https://spectrum.chat/unified/remark
[license]: https://github.com/remarkjs/remark/blob/master/license
[author]: https://wooorm.com
[npm]: https://docs.npmjs.com/cli/install
[unified]: https://github.com/unifiedjs/unified
[data]: https://github.com/unifiedjs/unified#processordatakey-value
[processor]: https://github.com/remarkjs/remark/blob/master/packages/remark
[mdast]: https://github.com/syntax-tree/mdast
[escapes]: https://spec.commonmark.org/0.28/#backslash-escapes
[node]: https://github.com/syntax-tree/unist#node
[location]: https://github.com/syntax-tree/unist#location
[parser]: https://github.com/unifiedjs/unified#processorparser
[extend]: #extending-the-parser
[tokenizer]: #function-tokenizereat-value-silent
[locator]: #tokenizerlocatorvalue-fromindex
[eat]: #eatsubvalue
[add]: #addnode-parent
[blocks]: https://github.com/remarkjs/remark/blob/master/packages/remark-parse/lib/block-elements.js
[announcement]: https://medium.com/unifiedjs/collectively-evolving-through-crowdsourcing-22c359ea95cc