planning
All checks were successful
Publish To Prod / deploy_and_publish (push) Successful in 35s

This commit is contained in:
2024-10-14 09:15:30 +02:00
parent bcba00a730
commit 6e64e138e2
21059 changed files with 2317811 additions and 1 deletions

14
node_modules/micromark/dist/util/chunked-push.js generated vendored Normal file
View File

@@ -0,0 +1,14 @@
'use strict'
var chunkedSplice = require('./chunked-splice.js')
function chunkedPush(list, items) {
if (list.length) {
chunkedSplice(list, list.length, 0, items)
return list
}
return items
}
module.exports = chunkedPush

38
node_modules/micromark/dist/util/chunked-splice.js generated vendored Normal file
View File

@@ -0,0 +1,38 @@
'use strict'
var splice = require('../constant/splice.js')
// causes a stack overflow in V8 when trying to insert 100k items for instance.
function chunkedSplice(list, start, remove, items) {
var end = list.length
var chunkStart = 0
var parameters // Make start between zero and `end` (included).
if (start < 0) {
start = -start > end ? 0 : end + start
} else {
start = start > end ? end : start
}
remove = remove > 0 ? remove : 0 // No need to chunk the items if theres only a couple (10k) items.
if (items.length < 10000) {
parameters = Array.from(items)
parameters.unshift(start, remove)
splice.apply(list, parameters)
} else {
// Delete `remove` items starting from `start`
if (remove) splice.apply(list, [start, remove]) // Insert the items in chunks to not cause stack overflows.
while (chunkStart < items.length) {
parameters = items.slice(chunkStart, chunkStart + 10000)
parameters.unshift(start, 0)
splice.apply(list, parameters)
chunkStart += 10000
start += 10000
}
}
}
module.exports = chunkedSplice

25
node_modules/micromark/dist/util/classify-character.js generated vendored Normal file
View File

@@ -0,0 +1,25 @@
'use strict'
var markdownLineEndingOrSpace = require('../character/markdown-line-ending-or-space.js')
var unicodePunctuation = require('../character/unicode-punctuation.js')
var unicodeWhitespace = require('../character/unicode-whitespace.js')
// Classify whether a character is unicode whitespace, unicode punctuation, or
// anything else.
// Used for attention (emphasis, strong), whose sequences can open or close
// based on the class of surrounding characters.
function classifyCharacter(code) {
if (
code === null ||
markdownLineEndingOrSpace(code) ||
unicodeWhitespace(code)
) {
return 1
}
if (unicodePunctuation(code)) {
return 2
}
}
module.exports = classifyCharacter

49
node_modules/micromark/dist/util/combine-extensions.js generated vendored Normal file
View File

@@ -0,0 +1,49 @@
'use strict'
var hasOwnProperty = require('../constant/has-own-property.js')
var chunkedSplice = require('./chunked-splice.js')
var miniflat = require('./miniflat.js')
function combineExtensions(extensions) {
var all = {}
var index = -1
while (++index < extensions.length) {
extension(all, extensions[index])
}
return all
}
function extension(all, extension) {
var hook
var left
var right
var code
for (hook in extension) {
left = hasOwnProperty.call(all, hook) ? all[hook] : (all[hook] = {})
right = extension[hook]
for (code in right) {
left[code] = constructs(
miniflat(right[code]),
hasOwnProperty.call(left, code) ? left[code] : []
)
}
}
}
function constructs(list, existing) {
var index = -1
var before = []
while (++index < list.length) {
;(list[index].add === 'after' ? existing : before).push(list[index])
}
chunkedSplice(existing, 0, 0, before)
return existing
}
module.exports = combineExtensions

View File

@@ -0,0 +1,34 @@
'use strict'
var hasOwnProperty = require('../constant/has-own-property.js')
function combineHtmlExtensions(extensions) {
var handlers = {}
var index = -1
while (++index < extensions.length) {
extension(handlers, extensions[index])
}
return handlers
}
function extension(handlers, extension) {
var hook
var left
var right
var type
for (hook in extension) {
left = hasOwnProperty.call(handlers, hook)
? handlers[hook]
: (handlers[hook] = {})
right = extension[hook]
for (type in right) {
left[type] = right[type]
}
}
}
module.exports = combineHtmlExtensions

316
node_modules/micromark/dist/util/create-tokenizer.js generated vendored Normal file
View File

@@ -0,0 +1,316 @@
'use strict'
var assign = require('../constant/assign.js')
var markdownLineEnding = require('../character/markdown-line-ending.js')
var chunkedPush = require('./chunked-push.js')
var chunkedSplice = require('./chunked-splice.js')
var miniflat = require('./miniflat.js')
var resolveAll = require('./resolve-all.js')
var serializeChunks = require('./serialize-chunks.js')
var shallow = require('./shallow.js')
var sliceChunks = require('./slice-chunks.js')
// Create a tokenizer.
// Tokenizers deal with one type of data (e.g., containers, flow, text).
// The parser is the object dealing with it all.
// `initialize` works like other constructs, except that only its `tokenize`
// function is used, in which case it doesnt receive an `ok` or `nok`.
// `from` can be given to set the point before the first character, although
// when further lines are indented, they must be set with `defineSkip`.
function createTokenizer(parser, initialize, from) {
var point = from
? shallow(from)
: {
line: 1,
column: 1,
offset: 0
}
var columnStart = {}
var resolveAllConstructs = []
var chunks = []
var stack = []
var effects = {
consume: consume,
enter: enter,
exit: exit,
attempt: constructFactory(onsuccessfulconstruct),
check: constructFactory(onsuccessfulcheck),
interrupt: constructFactory(onsuccessfulcheck, {
interrupt: true
}),
lazy: constructFactory(onsuccessfulcheck, {
lazy: true
})
} // State and tools for resolving and serializing.
var context = {
previous: null,
events: [],
parser: parser,
sliceStream: sliceStream,
sliceSerialize: sliceSerialize,
now: now,
defineSkip: skip,
write: write
} // The state function.
var state = initialize.tokenize.call(context, effects) // Track which character we expect to be consumed, to catch bugs.
if (initialize.resolveAll) {
resolveAllConstructs.push(initialize)
} // Store where we are in the input stream.
point._index = 0
point._bufferIndex = -1
return context
function write(slice) {
chunks = chunkedPush(chunks, slice)
main() // Exit if were not done, resolve might change stuff.
if (chunks[chunks.length - 1] !== null) {
return []
}
addResult(initialize, 0) // Otherwise, resolve, and exit.
context.events = resolveAll(resolveAllConstructs, context.events, context)
return context.events
} //
// Tools.
//
function sliceSerialize(token) {
return serializeChunks(sliceStream(token))
}
function sliceStream(token) {
return sliceChunks(chunks, token)
}
function now() {
return shallow(point)
}
function skip(value) {
columnStart[value.line] = value.column
accountForPotentialSkip()
} //
// State management.
//
// Main loop (note that `_index` and `_bufferIndex` in `point` are modified by
// `consume`).
// Here is where we walk through the chunks, which either include strings of
// several characters, or numerical character codes.
// The reason to do this in a loop instead of a call is so the stack can
// drain.
function main() {
var chunkIndex
var chunk
while (point._index < chunks.length) {
chunk = chunks[point._index] // If were in a buffer chunk, loop through it.
if (typeof chunk === 'string') {
chunkIndex = point._index
if (point._bufferIndex < 0) {
point._bufferIndex = 0
}
while (
point._index === chunkIndex &&
point._bufferIndex < chunk.length
) {
go(chunk.charCodeAt(point._bufferIndex))
}
} else {
go(chunk)
}
}
} // Deal with one code.
function go(code) {
state = state(code)
} // Move a character forward.
function consume(code) {
if (markdownLineEnding(code)) {
point.line++
point.column = 1
point.offset += code === -3 ? 2 : 1
accountForPotentialSkip()
} else if (code !== -1) {
point.column++
point.offset++
} // Not in a string chunk.
if (point._bufferIndex < 0) {
point._index++
} else {
point._bufferIndex++ // At end of string chunk.
if (point._bufferIndex === chunks[point._index].length) {
point._bufferIndex = -1
point._index++
}
} // Expose the previous character.
context.previous = code // Mark as consumed.
} // Start a token.
function enter(type, fields) {
var token = fields || {}
token.type = type
token.start = now()
context.events.push(['enter', token, context])
stack.push(token)
return token
} // Stop a token.
function exit(type) {
var token = stack.pop()
token.end = now()
context.events.push(['exit', token, context])
return token
} // Use results.
function onsuccessfulconstruct(construct, info) {
addResult(construct, info.from)
} // Discard results.
function onsuccessfulcheck(construct, info) {
info.restore()
} // Factory to attempt/check/interrupt.
function constructFactory(onreturn, fields) {
return hook // Handle either an object mapping codes to constructs, a list of
// constructs, or a single construct.
function hook(constructs, returnState, bogusState) {
var listOfConstructs
var constructIndex
var currentConstruct
var info
return constructs.tokenize || 'length' in constructs
? handleListOfConstructs(miniflat(constructs))
: handleMapOfConstructs
function handleMapOfConstructs(code) {
if (code in constructs || null in constructs) {
return handleListOfConstructs(
constructs.null
? /* c8 ignore next */
miniflat(constructs[code]).concat(miniflat(constructs.null))
: constructs[code]
)(code)
}
return bogusState(code)
}
function handleListOfConstructs(list) {
listOfConstructs = list
constructIndex = 0
return handleConstruct(list[constructIndex])
}
function handleConstruct(construct) {
return start
function start(code) {
// To do: not nede to store if there is no bogus state, probably?
// Currently doesnt work because `inspect` in document does a check
// w/o a bogus, which doesnt make sense. But it does seem to help perf
// by not storing.
info = store()
currentConstruct = construct
if (!construct.partial) {
context.currentConstruct = construct
}
if (
construct.name &&
context.parser.constructs.disable.null.indexOf(construct.name) > -1
) {
return nok()
}
return construct.tokenize.call(
fields ? assign({}, context, fields) : context,
effects,
ok,
nok
)(code)
}
}
function ok(code) {
onreturn(currentConstruct, info)
return returnState
}
function nok(code) {
info.restore()
if (++constructIndex < listOfConstructs.length) {
return handleConstruct(listOfConstructs[constructIndex])
}
return bogusState
}
}
}
function addResult(construct, from) {
if (construct.resolveAll && resolveAllConstructs.indexOf(construct) < 0) {
resolveAllConstructs.push(construct)
}
if (construct.resolve) {
chunkedSplice(
context.events,
from,
context.events.length - from,
construct.resolve(context.events.slice(from), context)
)
}
if (construct.resolveTo) {
context.events = construct.resolveTo(context.events, context)
}
}
function store() {
var startPoint = now()
var startPrevious = context.previous
var startCurrentConstruct = context.currentConstruct
var startEventsIndex = context.events.length
var startStack = Array.from(stack)
return {
restore: restore,
from: startEventsIndex
}
function restore() {
point = startPoint
context.previous = startPrevious
context.currentConstruct = startCurrentConstruct
context.events.length = startEventsIndex
stack = startStack
accountForPotentialSkip()
}
}
function accountForPotentialSkip() {
if (point.line in columnStart && point.column < 2) {
point.column = columnStart[point.line]
point.offset += columnStart[point.line] - 1
}
}
}
module.exports = createTokenizer

11
node_modules/micromark/dist/util/miniflat.js generated vendored Normal file
View File

@@ -0,0 +1,11 @@
'use strict'
function miniflat(value) {
return value === null || value === undefined
? []
: 'length' in value
? value
: [value]
}
module.exports = miniflat

12
node_modules/micromark/dist/util/move-point.js generated vendored Normal file
View File

@@ -0,0 +1,12 @@
'use strict'
// chunks (replacement characters, tabs, or line endings).
function movePoint(point, offset) {
point.column += offset
point.offset += offset
point._bufferIndex += offset
return point
}
module.exports = movePoint

View File

@@ -0,0 +1,18 @@
'use strict'
function normalizeIdentifier(value) {
return (
value // Collapse Markdown whitespace.
.replace(/[\t\n\r ]+/g, ' ') // Trim.
.replace(/^ | $/g, '') // Some characters are considered “uppercase”, but if their lowercase
// counterpart is uppercased will result in a different uppercase
// character.
// Hence, to get that form, we perform both lower- and uppercase.
// Upper case makes sure keys will not interact with default prototypal
// methods: no object method is uppercase.
.toLowerCase()
.toUpperCase()
)
}
module.exports = normalizeIdentifier

62
node_modules/micromark/dist/util/normalize-uri.js generated vendored Normal file
View File

@@ -0,0 +1,62 @@
'use strict'
var asciiAlphanumeric = require('../character/ascii-alphanumeric.js')
var fromCharCode = require('../constant/from-char-code.js')
// encoded sequences.
function normalizeUri(value) {
var index = -1
var result = []
var start = 0
var skip = 0
var code
var next
var replace
while (++index < value.length) {
code = value.charCodeAt(index) // A correct percent encoded value.
if (
code === 37 &&
asciiAlphanumeric(value.charCodeAt(index + 1)) &&
asciiAlphanumeric(value.charCodeAt(index + 2))
) {
skip = 2
} // ASCII.
else if (code < 128) {
if (!/[!#$&-;=?-Z_a-z~]/.test(fromCharCode(code))) {
replace = fromCharCode(code)
}
} // Astral.
else if (code > 55295 && code < 57344) {
next = value.charCodeAt(index + 1) // A correct surrogate pair.
if (code < 56320 && next > 56319 && next < 57344) {
replace = fromCharCode(code, next)
skip = 1
} // Lone surrogate.
else {
replace = '\uFFFD'
}
} // Unicode.
else {
replace = fromCharCode(code)
}
if (replace) {
result.push(value.slice(start, index), encodeURIComponent(replace))
start = index + skip + 1
replace = undefined
}
if (skip) {
index += skip
skip = 0
}
}
return result.join('') + value.slice(start)
}
module.exports = normalizeUri

11
node_modules/micromark/dist/util/prefix-size.js generated vendored Normal file
View File

@@ -0,0 +1,11 @@
'use strict'
var sizeChunks = require('./size-chunks.js')
function prefixSize(events, type) {
var tail = events[events.length - 1]
if (!tail || tail[1].type !== type) return 0
return sizeChunks(tail[2].sliceStream(tail[1]))
}
module.exports = prefixSize

13
node_modules/micromark/dist/util/regex-check.js generated vendored Normal file
View File

@@ -0,0 +1,13 @@
'use strict'
var fromCharCode = require('../constant/from-char-code.js')
function regexCheck(regex) {
return check
function check(code) {
return regex.test(fromCharCode(code))
}
}
module.exports = regexCheck

20
node_modules/micromark/dist/util/resolve-all.js generated vendored Normal file
View File

@@ -0,0 +1,20 @@
'use strict'
function resolveAll(constructs, events, context) {
var called = []
var index = -1
var resolve
while (++index < constructs.length) {
resolve = constructs[index].resolveAll
if (resolve && called.indexOf(resolve) < 0) {
events = resolve(events, context)
called.push(resolve)
}
}
return events
}
module.exports = resolveAll

26
node_modules/micromark/dist/util/safe-from-int.js generated vendored Normal file
View File

@@ -0,0 +1,26 @@
'use strict'
var fromCharCode = require('../constant/from-char-code.js')
function safeFromInt(value, base) {
var code = parseInt(value, base)
if (
// C0 except for HT, LF, FF, CR, space
code < 9 ||
code === 11 ||
(code > 13 && code < 32) || // Control character (DEL) of the basic block and C1 controls.
(code > 126 && code < 160) || // Lone high surrogates and low surrogates.
(code > 55295 && code < 57344) || // Noncharacters.
(code > 64975 && code < 65008) ||
(code & 65535) === 65535 ||
(code & 65535) === 65534 || // Out of range
code > 1114111
) {
return '\uFFFD'
}
return fromCharCode(code)
}
module.exports = safeFromInt

40
node_modules/micromark/dist/util/serialize-chunks.js generated vendored Normal file
View File

@@ -0,0 +1,40 @@
'use strict'
var fromCharCode = require('../constant/from-char-code.js')
function serializeChunks(chunks) {
var index = -1
var result = []
var chunk
var value
var atTab
while (++index < chunks.length) {
chunk = chunks[index]
if (typeof chunk === 'string') {
value = chunk
} else if (chunk === -5) {
value = '\r'
} else if (chunk === -4) {
value = '\n'
} else if (chunk === -3) {
value = '\r' + '\n'
} else if (chunk === -2) {
value = '\t'
} else if (chunk === -1) {
if (atTab) continue
value = ' '
} else {
// Currently only replacement character.
value = fromCharCode(chunk)
}
atTab = chunk === -2
result.push(value)
}
return result.join('')
}
module.exports = serializeChunks

9
node_modules/micromark/dist/util/shallow.js generated vendored Normal file
View File

@@ -0,0 +1,9 @@
'use strict'
var assign = require('../constant/assign.js')
function shallow(object) {
return assign({}, object)
}
module.exports = shallow

16
node_modules/micromark/dist/util/size-chunks.js generated vendored Normal file
View File

@@ -0,0 +1,16 @@
'use strict'
// Counts tabs based on their expanded size, and CR+LF as one character.
function sizeChunks(chunks) {
var index = -1
var size = 0
while (++index < chunks.length) {
size += typeof chunks[index] === 'string' ? chunks[index].length : 1
}
return size
}
module.exports = sizeChunks

27
node_modules/micromark/dist/util/slice-chunks.js generated vendored Normal file
View File

@@ -0,0 +1,27 @@
'use strict'
function sliceChunks(chunks, token) {
var startIndex = token.start._index
var startBufferIndex = token.start._bufferIndex
var endIndex = token.end._index
var endBufferIndex = token.end._bufferIndex
var view
if (startIndex === endIndex) {
view = [chunks[startIndex].slice(startBufferIndex, endBufferIndex)]
} else {
view = chunks.slice(startIndex, endIndex)
if (startBufferIndex > -1) {
view[0] = view[0].slice(startBufferIndex)
}
if (endBufferIndex > 0) {
view.push(chunks[endIndex].slice(0, endBufferIndex))
}
}
return view
}
module.exports = sliceChunks

199
node_modules/micromark/dist/util/subtokenize.js generated vendored Normal file
View File

@@ -0,0 +1,199 @@
'use strict'
var assign = require('../constant/assign.js')
var chunkedSplice = require('./chunked-splice.js')
var shallow = require('./shallow.js')
function subtokenize(events) {
var jumps = {}
var index = -1
var event
var lineIndex
var otherIndex
var otherEvent
var parameters
var subevents
var more
while (++index < events.length) {
while (index in jumps) {
index = jumps[index]
}
event = events[index] // Add a hook for the GFM tasklist extension, which needs to know if text
// is in the first content of a list item.
if (
index &&
event[1].type === 'chunkFlow' &&
events[index - 1][1].type === 'listItemPrefix'
) {
subevents = event[1]._tokenizer.events
otherIndex = 0
if (
otherIndex < subevents.length &&
subevents[otherIndex][1].type === 'lineEndingBlank'
) {
otherIndex += 2
}
if (
otherIndex < subevents.length &&
subevents[otherIndex][1].type === 'content'
) {
while (++otherIndex < subevents.length) {
if (subevents[otherIndex][1].type === 'content') {
break
}
if (subevents[otherIndex][1].type === 'chunkText') {
subevents[otherIndex][1].isInFirstContentOfListItem = true
otherIndex++
}
}
}
} // Enter.
if (event[0] === 'enter') {
if (event[1].contentType) {
assign(jumps, subcontent(events, index))
index = jumps[index]
more = true
}
} // Exit.
else if (event[1]._container || event[1]._movePreviousLineEndings) {
otherIndex = index
lineIndex = undefined
while (otherIndex--) {
otherEvent = events[otherIndex]
if (
otherEvent[1].type === 'lineEnding' ||
otherEvent[1].type === 'lineEndingBlank'
) {
if (otherEvent[0] === 'enter') {
if (lineIndex) {
events[lineIndex][1].type = 'lineEndingBlank'
}
otherEvent[1].type = 'lineEnding'
lineIndex = otherIndex
}
} else {
break
}
}
if (lineIndex) {
// Fix position.
event[1].end = shallow(events[lineIndex][1].start) // Switch container exit w/ line endings.
parameters = events.slice(lineIndex, index)
parameters.unshift(event)
chunkedSplice(events, lineIndex, index - lineIndex + 1, parameters)
}
}
}
return !more
}
function subcontent(events, eventIndex) {
var token = events[eventIndex][1]
var context = events[eventIndex][2]
var startPosition = eventIndex - 1
var startPositions = []
var tokenizer =
token._tokenizer || context.parser[token.contentType](token.start)
var childEvents = tokenizer.events
var jumps = []
var gaps = {}
var stream
var previous
var index
var entered
var end
var adjust // Loop forward through the linked tokens to pass them in order to the
// subtokenizer.
while (token) {
// Find the position of the event for this token.
while (events[++startPosition][1] !== token) {
// Empty.
}
startPositions.push(startPosition)
if (!token._tokenizer) {
stream = context.sliceStream(token)
if (!token.next) {
stream.push(null)
}
if (previous) {
tokenizer.defineSkip(token.start)
}
if (token.isInFirstContentOfListItem) {
tokenizer._gfmTasklistFirstContentOfListItem = true
}
tokenizer.write(stream)
if (token.isInFirstContentOfListItem) {
tokenizer._gfmTasklistFirstContentOfListItem = undefined
}
} // Unravel the next token.
previous = token
token = token.next
} // Now, loop back through all events (and linked tokens), to figure out which
// parts belong where.
token = previous
index = childEvents.length
while (index--) {
// Make sure weve at least seen something (final eol is part of the last
// token).
if (childEvents[index][0] === 'enter') {
entered = true
} else if (
// Find a void token that includes a break.
entered &&
childEvents[index][1].type === childEvents[index - 1][1].type &&
childEvents[index][1].start.line !== childEvents[index][1].end.line
) {
add(childEvents.slice(index + 1, end))
// Help GC.
token._tokenizer = token.next = undefined
token = token.previous
end = index + 1
}
}
// Help GC.
tokenizer.events = token._tokenizer = token.next = undefined // Do head:
add(childEvents.slice(0, end))
index = -1
adjust = 0
while (++index < jumps.length) {
gaps[adjust + jumps[index][0]] = adjust + jumps[index][1]
adjust += jumps[index][1] - jumps[index][0] - 1
}
return gaps
function add(slice) {
var start = startPositions.pop()
jumps.unshift([start, start + slice.length - 1])
chunkedSplice(events, start, 2, slice)
}
}
module.exports = subtokenize