planning
All checks were successful
Publish To Prod / deploy_and_publish (push) Successful in 35s

This commit is contained in:
2024-10-14 09:15:30 +02:00
parent bcba00a730
commit 6e64e138e2
21059 changed files with 2317811 additions and 1 deletions

14
node_modules/micromark/lib/util/chunked-push.js generated vendored Normal file
View File

@@ -0,0 +1,14 @@
'use strict'
var chunkedSplice = require('./chunked-splice.js')
function chunkedPush(list, items) {
if (list.length) {
chunkedSplice(list, list.length, 0, items)
return list
}
return items
}
module.exports = chunkedPush

12
node_modules/micromark/lib/util/chunked-push.mjs generated vendored Normal file
View File

@@ -0,0 +1,12 @@
export default chunkedPush
import chunkedSplice from './chunked-splice.mjs'
function chunkedPush(list, items) {
if (list.length) {
chunkedSplice(list, list.length, 0, items)
return list
}
return items
}

46
node_modules/micromark/lib/util/chunked-splice.js generated vendored Normal file
View File

@@ -0,0 +1,46 @@
'use strict'
var constants = require('../constant/constants.js')
var splice = require('../constant/splice.js')
// `Array#splice` takes all items to be inserted as individual argument which
// causes a stack overflow in V8 when trying to insert 100k items for instance.
function chunkedSplice(list, start, remove, items) {
var end = list.length
var chunkStart = 0
var parameters
// Make start between zero and `end` (included).
if (start < 0) {
start = -start > end ? 0 : end + start
} else {
start = start > end ? end : start
}
remove = remove > 0 ? remove : 0
// No need to chunk the items if theres only a couple (10k) items.
if (items.length < constants.v8MaxSafeChunkSize) {
parameters = Array.from(items)
parameters.unshift(start, remove)
splice.apply(list, parameters)
} else {
// Delete `remove` items starting from `start`
if (remove) splice.apply(list, [start, remove])
// Insert the items in chunks to not cause stack overflows.
while (chunkStart < items.length) {
parameters = items.slice(
chunkStart,
chunkStart + constants.v8MaxSafeChunkSize
)
parameters.unshift(start, 0)
splice.apply(list, parameters)
chunkStart += constants.v8MaxSafeChunkSize
start += constants.v8MaxSafeChunkSize
}
}
}
module.exports = chunkedSplice

44
node_modules/micromark/lib/util/chunked-splice.mjs generated vendored Normal file
View File

@@ -0,0 +1,44 @@
export default chunkedSplice
import constants from '../constant/constants.mjs'
import splice from '../constant/splice.mjs'
// `Array#splice` takes all items to be inserted as individual argument which
// causes a stack overflow in V8 when trying to insert 100k items for instance.
function chunkedSplice(list, start, remove, items) {
var end = list.length
var chunkStart = 0
var parameters
// Make start between zero and `end` (included).
if (start < 0) {
start = -start > end ? 0 : end + start
} else {
start = start > end ? end : start
}
remove = remove > 0 ? remove : 0
// No need to chunk the items if theres only a couple (10k) items.
if (items.length < constants.v8MaxSafeChunkSize) {
parameters = Array.from(items)
parameters.unshift(start, remove)
splice.apply(list, parameters)
} else {
// Delete `remove` items starting from `start`
if (remove) splice.apply(list, [start, remove])
// Insert the items in chunks to not cause stack overflows.
while (chunkStart < items.length) {
parameters = items.slice(
chunkStart,
chunkStart + constants.v8MaxSafeChunkSize
)
parameters.unshift(start, 0)
splice.apply(list, parameters)
chunkStart += constants.v8MaxSafeChunkSize
start += constants.v8MaxSafeChunkSize
}
}
}

27
node_modules/micromark/lib/util/classify-character.js generated vendored Normal file
View File

@@ -0,0 +1,27 @@
'use strict'
var codes = require('../character/codes.js')
var markdownLineEndingOrSpace = require('../character/markdown-line-ending-or-space.js')
var unicodePunctuation = require('../character/unicode-punctuation.js')
var unicodeWhitespace = require('../character/unicode-whitespace.js')
var constants = require('../constant/constants.js')
// Classify whether a character is unicode whitespace, unicode punctuation, or
// anything else.
// Used for attention (emphasis, strong), whose sequences can open or close
// based on the class of surrounding characters.
function classifyCharacter(code) {
if (
code === codes.eof ||
markdownLineEndingOrSpace(code) ||
unicodeWhitespace(code)
) {
return constants.characterGroupWhitespace
}
if (unicodePunctuation(code)) {
return constants.characterGroupPunctuation
}
}
module.exports = classifyCharacter

25
node_modules/micromark/lib/util/classify-character.mjs generated vendored Normal file
View File

@@ -0,0 +1,25 @@
export default classifyCharacter
import codes from '../character/codes.mjs'
import markdownLineEndingOrSpace from '../character/markdown-line-ending-or-space.mjs'
import unicodePunctuation from '../character/unicode-punctuation.mjs'
import unicodeWhitespace from '../character/unicode-whitespace.mjs'
import constants from '../constant/constants.mjs'
// Classify whether a character is unicode whitespace, unicode punctuation, or
// anything else.
// Used for attention (emphasis, strong), whose sequences can open or close
// based on the class of surrounding characters.
function classifyCharacter(code) {
if (
code === codes.eof ||
markdownLineEndingOrSpace(code) ||
unicodeWhitespace(code)
) {
return constants.characterGroupWhitespace
}
if (unicodePunctuation(code)) {
return constants.characterGroupPunctuation
}
}

50
node_modules/micromark/lib/util/combine-extensions.js generated vendored Normal file
View File

@@ -0,0 +1,50 @@
'use strict'
var hasOwnProperty = require('../constant/has-own-property.js')
var chunkedSplice = require('./chunked-splice.js')
var miniflat = require('./miniflat.js')
// Combine several syntax extensions into one.
function combineExtensions(extensions) {
var all = {}
var index = -1
while (++index < extensions.length) {
extension(all, extensions[index])
}
return all
}
function extension(all, extension) {
var hook
var left
var right
var code
for (hook in extension) {
left = hasOwnProperty.call(all, hook) ? all[hook] : (all[hook] = {})
right = extension[hook]
for (code in right) {
left[code] = constructs(
miniflat(right[code]),
hasOwnProperty.call(left, code) ? left[code] : []
)
}
}
}
function constructs(list, existing) {
var index = -1
var before = []
while (++index < list.length) {
;(list[index].add === 'after' ? existing : before).push(list[index])
}
chunkedSplice(existing, 0, 0, before)
return existing
}
module.exports = combineExtensions

48
node_modules/micromark/lib/util/combine-extensions.mjs generated vendored Normal file
View File

@@ -0,0 +1,48 @@
export default combineExtensions
import own from '../constant/has-own-property.mjs'
import chunkedSplice from './chunked-splice.mjs'
import miniflat from './miniflat.mjs'
// Combine several syntax extensions into one.
function combineExtensions(extensions) {
var all = {}
var index = -1
while (++index < extensions.length) {
extension(all, extensions[index])
}
return all
}
function extension(all, extension) {
var hook
var left
var right
var code
for (hook in extension) {
left = own.call(all, hook) ? all[hook] : (all[hook] = {})
right = extension[hook]
for (code in right) {
left[code] = constructs(
miniflat(right[code]),
own.call(left, code) ? left[code] : []
)
}
}
}
function constructs(list, existing) {
var index = -1
var before = []
while (++index < list.length) {
;(list[index].add === 'after' ? existing : before).push(list[index])
}
chunkedSplice(existing, 0, 0, before)
return existing
}

View File

@@ -0,0 +1,35 @@
'use strict'
var hasOwnProperty = require('../constant/has-own-property.js')
// Combine several HTML extensions into one.
function combineHtmlExtensions(extensions) {
var handlers = {}
var index = -1
while (++index < extensions.length) {
extension(handlers, extensions[index])
}
return handlers
}
function extension(handlers, extension) {
var hook
var left
var right
var type
for (hook in extension) {
left = hasOwnProperty.call(handlers, hook)
? handlers[hook]
: (handlers[hook] = {})
right = extension[hook]
for (type in right) {
left[type] = right[type]
}
}
}
module.exports = combineHtmlExtensions

View File

@@ -0,0 +1,31 @@
export default combineHtmlExtensions
import own from '../constant/has-own-property.mjs'
// Combine several HTML extensions into one.
function combineHtmlExtensions(extensions) {
var handlers = {}
var index = -1
while (++index < extensions.length) {
extension(handlers, extensions[index])
}
return handlers
}
function extension(handlers, extension) {
var hook
var left
var right
var type
for (hook in extension) {
left = own.call(handlers, hook) ? handlers[hook] : (handlers[hook] = {})
right = extension[hook]
for (type in right) {
left[type] = right[type]
}
}
}

440
node_modules/micromark/lib/util/create-tokenizer.js generated vendored Normal file
View File

@@ -0,0 +1,440 @@
'use strict'
var assert = require('assert')
var createDebug = require('debug')
var assign = require('../constant/assign.js')
var codes = require('../character/codes.js')
var markdownLineEnding = require('../character/markdown-line-ending.js')
var chunkedPush = require('./chunked-push.js')
var chunkedSplice = require('./chunked-splice.js')
var miniflat = require('./miniflat.js')
var resolveAll = require('./resolve-all.js')
var serializeChunks = require('./serialize-chunks.js')
var shallow = require('./shallow.js')
var sliceChunks = require('./slice-chunks.js')
function _interopDefaultLegacy(e) {
return e && typeof e === 'object' && 'default' in e ? e : {default: e}
}
var assert__default = /*#__PURE__*/ _interopDefaultLegacy(assert)
var createDebug__default = /*#__PURE__*/ _interopDefaultLegacy(createDebug)
var debug = createDebug__default['default']('micromark')
// Create a tokenizer.
// Tokenizers deal with one type of data (e.g., containers, flow, text).
// The parser is the object dealing with it all.
// `initialize` works like other constructs, except that only its `tokenize`
// function is used, in which case it doesnt receive an `ok` or `nok`.
// `from` can be given to set the point before the first character, although
// when further lines are indented, they must be set with `defineSkip`.
function createTokenizer(parser, initialize, from) {
var point = from ? shallow(from) : {line: 1, column: 1, offset: 0}
var columnStart = {}
var resolveAllConstructs = []
var chunks = []
var stack = []
var consumed = true
// Tools used for tokenizing.
var effects = {
consume: consume,
enter: enter,
exit: exit,
attempt: constructFactory(onsuccessfulconstruct),
check: constructFactory(onsuccessfulcheck),
interrupt: constructFactory(onsuccessfulcheck, {interrupt: true}),
lazy: constructFactory(onsuccessfulcheck, {lazy: true})
}
// State and tools for resolving and serializing.
var context = {
previous: codes.eof,
events: [],
parser: parser,
sliceStream: sliceStream,
sliceSerialize: sliceSerialize,
now: now,
defineSkip: skip,
write: write
}
// The state function.
var state = initialize.tokenize.call(context, effects)
// Track which character we expect to be consumed, to catch bugs.
var expectedCode
if (initialize.resolveAll) {
resolveAllConstructs.push(initialize)
}
// Store where we are in the input stream.
point._index = 0
point._bufferIndex = -1
return context
function write(slice) {
chunks = chunkedPush(chunks, slice)
main()
// Exit if were not done, resolve might change stuff.
if (chunks[chunks.length - 1] !== codes.eof) {
return []
}
addResult(initialize, 0)
// Otherwise, resolve, and exit.
context.events = resolveAll(resolveAllConstructs, context.events, context)
return context.events
}
//
// Tools.
//
function sliceSerialize(token) {
return serializeChunks(sliceStream(token))
}
function sliceStream(token) {
return sliceChunks(chunks, token)
}
function now() {
return shallow(point)
}
function skip(value) {
columnStart[value.line] = value.column
accountForPotentialSkip()
debug('position: define skip: `%j`', point)
}
//
// State management.
//
// Main loop (note that `_index` and `_bufferIndex` in `point` are modified by
// `consume`).
// Here is where we walk through the chunks, which either include strings of
// several characters, or numerical character codes.
// The reason to do this in a loop instead of a call is so the stack can
// drain.
function main() {
var chunkIndex
var chunk
while (point._index < chunks.length) {
chunk = chunks[point._index]
// If were in a buffer chunk, loop through it.
if (typeof chunk === 'string') {
chunkIndex = point._index
if (point._bufferIndex < 0) {
point._bufferIndex = 0
}
while (
point._index === chunkIndex &&
point._bufferIndex < chunk.length
) {
go(chunk.charCodeAt(point._bufferIndex))
}
} else {
go(chunk)
}
}
}
// Deal with one code.
function go(code) {
assert__default['default'].equal(
consumed,
true,
'expected character to be consumed'
)
consumed = undefined
debug('main: passing `%s` to %s', code, state.name)
expectedCode = code
state = state(code)
}
// Move a character forward.
function consume(code) {
assert__default['default'].equal(
code,
expectedCode,
'expected given code to equal expected code'
)
debug('consume: `%s`', code)
assert__default['default'].equal(
consumed,
undefined,
'expected code to not have been consumed'
)
assert__default['default'](
code === null
? !context.events.length ||
context.events[context.events.length - 1][0] === 'exit'
: context.events[context.events.length - 1][0] === 'enter',
'expected last token to be open'
)
if (markdownLineEnding(code)) {
point.line++
point.column = 1
point.offset += code === codes.carriageReturnLineFeed ? 2 : 1
accountForPotentialSkip()
debug('position: after eol: `%j`', point)
} else if (code !== codes.virtualSpace) {
point.column++
point.offset++
}
// Not in a string chunk.
if (point._bufferIndex < 0) {
point._index++
} else {
point._bufferIndex++
// At end of string chunk.
if (point._bufferIndex === chunks[point._index].length) {
point._bufferIndex = -1
point._index++
}
}
// Expose the previous character.
context.previous = code
// Mark as consumed.
consumed = true
}
// Start a token.
function enter(type, fields) {
var token = fields || {}
token.type = type
token.start = now()
assert__default['default'].equal(
typeof type,
'string',
'expected string type'
)
assert__default['default'].notEqual(
type.length,
0,
'expected non-empty string'
)
debug('enter: `%s`', type)
context.events.push(['enter', token, context])
stack.push(token)
return token
}
// Stop a token.
function exit(type) {
assert__default['default'].equal(
typeof type,
'string',
'expected string type'
)
assert__default['default'].notEqual(
type.length,
0,
'expected non-empty string'
)
assert__default['default'].notEqual(
stack.length,
0,
'cannot close w/o open tokens'
)
var token = stack.pop()
token.end = now()
assert__default['default'].equal(
type,
token.type,
'expected exit token to match current token'
)
assert__default['default'](
!(
token.start._index === token.end._index &&
token.start._bufferIndex === token.end._bufferIndex
),
'expected non-empty token (`' + type + '`)'
)
debug('exit: `%s`', token.type)
context.events.push(['exit', token, context])
return token
}
// Use results.
function onsuccessfulconstruct(construct, info) {
addResult(construct, info.from)
}
// Discard results.
function onsuccessfulcheck(construct, info) {
info.restore()
}
// Factory to attempt/check/interrupt.
function constructFactory(onreturn, fields) {
return hook
// Handle either an object mapping codes to constructs, a list of
// constructs, or a single construct.
function hook(constructs, returnState, bogusState) {
var listOfConstructs
var constructIndex
var currentConstruct
var info
return constructs.tokenize || 'length' in constructs
? handleListOfConstructs(miniflat(constructs))
: handleMapOfConstructs
function handleMapOfConstructs(code) {
if (code in constructs || codes.eof in constructs) {
return handleListOfConstructs(
constructs.null
? /* c8 ignore next */
miniflat(constructs[code]).concat(miniflat(constructs.null))
: constructs[code]
)(code)
}
return bogusState(code)
}
function handleListOfConstructs(list) {
listOfConstructs = list
constructIndex = 0
return handleConstruct(list[constructIndex])
}
function handleConstruct(construct) {
return start
function start(code) {
// To do: not nede to store if there is no bogus state, probably?
// Currently doesnt work because `inspect` in document does a check
// w/o a bogus, which doesnt make sense. But it does seem to help perf
// by not storing.
info = store()
currentConstruct = construct
if (!construct.partial) {
context.currentConstruct = construct
}
if (
construct.name &&
context.parser.constructs.disable.null.indexOf(construct.name) > -1
) {
return nok(code)
}
return construct.tokenize.call(
fields ? assign({}, context, fields) : context,
effects,
ok,
nok
)(code)
}
}
function ok(code) {
assert__default['default'].equal(code, expectedCode, 'expected code')
consumed = true
onreturn(currentConstruct, info)
return returnState
}
function nok(code) {
assert__default['default'].equal(code, expectedCode, 'expected code')
consumed = true
info.restore()
if (++constructIndex < listOfConstructs.length) {
return handleConstruct(listOfConstructs[constructIndex])
}
return bogusState
}
}
}
function addResult(construct, from) {
if (construct.resolveAll && resolveAllConstructs.indexOf(construct) < 0) {
resolveAllConstructs.push(construct)
}
if (construct.resolve) {
chunkedSplice(
context.events,
from,
context.events.length - from,
construct.resolve(context.events.slice(from), context)
)
}
if (construct.resolveTo) {
context.events = construct.resolveTo(context.events, context)
}
assert__default['default'](
construct.partial ||
!context.events.length ||
context.events[context.events.length - 1][0] === 'exit',
'expected last token to end'
)
}
function store() {
var startPoint = now()
var startPrevious = context.previous
var startCurrentConstruct = context.currentConstruct
var startEventsIndex = context.events.length
var startStack = Array.from(stack)
return {restore: restore, from: startEventsIndex}
function restore() {
point = startPoint
context.previous = startPrevious
context.currentConstruct = startCurrentConstruct
context.events.length = startEventsIndex
stack = startStack
accountForPotentialSkip()
debug('position: restore: `%j`', point)
}
}
function accountForPotentialSkip() {
if (point.line in columnStart && point.column < 2) {
point.column = columnStart[point.line]
point.offset += columnStart[point.line] - 1
}
}
}
module.exports = createTokenizer

399
node_modules/micromark/lib/util/create-tokenizer.mjs generated vendored Normal file
View File

@@ -0,0 +1,399 @@
export default createTokenizer
import assert from 'assert'
import createDebug from 'debug'
import assign from '../constant/assign.mjs'
import codes from '../character/codes.mjs'
import markdownLineEnding from '../character/markdown-line-ending.mjs'
import chunkedPush from './chunked-push.mjs'
import chunkedSplice from './chunked-splice.mjs'
import miniflat from './miniflat.mjs'
import resolveAll from './resolve-all.mjs'
import serializeChunks from './serialize-chunks.mjs'
import shallow from './shallow.mjs'
import sliceChunks from './slice-chunks.mjs'
var debug = createDebug('micromark')
// Create a tokenizer.
// Tokenizers deal with one type of data (e.g., containers, flow, text).
// The parser is the object dealing with it all.
// `initialize` works like other constructs, except that only its `tokenize`
// function is used, in which case it doesnt receive an `ok` or `nok`.
// `from` can be given to set the point before the first character, although
// when further lines are indented, they must be set with `defineSkip`.
function createTokenizer(parser, initialize, from) {
var point = from ? shallow(from) : {line: 1, column: 1, offset: 0}
var columnStart = {}
var resolveAllConstructs = []
var chunks = []
var stack = []
var consumed = true
// Tools used for tokenizing.
var effects = {
consume: consume,
enter: enter,
exit: exit,
attempt: constructFactory(onsuccessfulconstruct),
check: constructFactory(onsuccessfulcheck),
interrupt: constructFactory(onsuccessfulcheck, {interrupt: true}),
lazy: constructFactory(onsuccessfulcheck, {lazy: true})
}
// State and tools for resolving and serializing.
var context = {
previous: codes.eof,
events: [],
parser: parser,
sliceStream: sliceStream,
sliceSerialize: sliceSerialize,
now: now,
defineSkip: skip,
write: write
}
// The state function.
var state = initialize.tokenize.call(context, effects)
// Track which character we expect to be consumed, to catch bugs.
var expectedCode
if (initialize.resolveAll) {
resolveAllConstructs.push(initialize)
}
// Store where we are in the input stream.
point._index = 0
point._bufferIndex = -1
return context
function write(slice) {
chunks = chunkedPush(chunks, slice)
main()
// Exit if were not done, resolve might change stuff.
if (chunks[chunks.length - 1] !== codes.eof) {
return []
}
addResult(initialize, 0)
// Otherwise, resolve, and exit.
context.events = resolveAll(resolveAllConstructs, context.events, context)
return context.events
}
//
// Tools.
//
function sliceSerialize(token) {
return serializeChunks(sliceStream(token))
}
function sliceStream(token) {
return sliceChunks(chunks, token)
}
function now() {
return shallow(point)
}
function skip(value) {
columnStart[value.line] = value.column
accountForPotentialSkip()
debug('position: define skip: `%j`', point)
}
//
// State management.
//
// Main loop (note that `_index` and `_bufferIndex` in `point` are modified by
// `consume`).
// Here is where we walk through the chunks, which either include strings of
// several characters, or numerical character codes.
// The reason to do this in a loop instead of a call is so the stack can
// drain.
function main() {
var chunkIndex
var chunk
while (point._index < chunks.length) {
chunk = chunks[point._index]
// If were in a buffer chunk, loop through it.
if (typeof chunk === 'string') {
chunkIndex = point._index
if (point._bufferIndex < 0) {
point._bufferIndex = 0
}
while (
point._index === chunkIndex &&
point._bufferIndex < chunk.length
) {
go(chunk.charCodeAt(point._bufferIndex))
}
} else {
go(chunk)
}
}
}
// Deal with one code.
function go(code) {
assert.equal(consumed, true, 'expected character to be consumed')
consumed = undefined
debug('main: passing `%s` to %s', code, state.name)
expectedCode = code
state = state(code)
}
// Move a character forward.
function consume(code) {
assert.equal(
code,
expectedCode,
'expected given code to equal expected code'
)
debug('consume: `%s`', code)
assert.equal(consumed, undefined, 'expected code to not have been consumed')
assert(
code === null
? !context.events.length ||
context.events[context.events.length - 1][0] === 'exit'
: context.events[context.events.length - 1][0] === 'enter',
'expected last token to be open'
)
if (markdownLineEnding(code)) {
point.line++
point.column = 1
point.offset += code === codes.carriageReturnLineFeed ? 2 : 1
accountForPotentialSkip()
debug('position: after eol: `%j`', point)
} else if (code !== codes.virtualSpace) {
point.column++
point.offset++
}
// Not in a string chunk.
if (point._bufferIndex < 0) {
point._index++
} else {
point._bufferIndex++
// At end of string chunk.
if (point._bufferIndex === chunks[point._index].length) {
point._bufferIndex = -1
point._index++
}
}
// Expose the previous character.
context.previous = code
// Mark as consumed.
consumed = true
}
// Start a token.
function enter(type, fields) {
var token = fields || {}
token.type = type
token.start = now()
assert.equal(typeof type, 'string', 'expected string type')
assert.notEqual(type.length, 0, 'expected non-empty string')
debug('enter: `%s`', type)
context.events.push(['enter', token, context])
stack.push(token)
return token
}
// Stop a token.
function exit(type) {
assert.equal(typeof type, 'string', 'expected string type')
assert.notEqual(type.length, 0, 'expected non-empty string')
assert.notEqual(stack.length, 0, 'cannot close w/o open tokens')
var token = stack.pop()
token.end = now()
assert.equal(type, token.type, 'expected exit token to match current token')
assert(
!(
token.start._index === token.end._index &&
token.start._bufferIndex === token.end._bufferIndex
),
'expected non-empty token (`' + type + '`)'
)
debug('exit: `%s`', token.type)
context.events.push(['exit', token, context])
return token
}
// Use results.
function onsuccessfulconstruct(construct, info) {
addResult(construct, info.from)
}
// Discard results.
function onsuccessfulcheck(construct, info) {
info.restore()
}
// Factory to attempt/check/interrupt.
function constructFactory(onreturn, fields) {
return hook
// Handle either an object mapping codes to constructs, a list of
// constructs, or a single construct.
function hook(constructs, returnState, bogusState) {
var listOfConstructs
var constructIndex
var currentConstruct
var info
return constructs.tokenize || 'length' in constructs
? handleListOfConstructs(miniflat(constructs))
: handleMapOfConstructs
function handleMapOfConstructs(code) {
if (code in constructs || codes.eof in constructs) {
return handleListOfConstructs(
constructs.null
? /* c8 ignore next */
miniflat(constructs[code]).concat(miniflat(constructs.null))
: constructs[code]
)(code)
}
return bogusState(code)
}
function handleListOfConstructs(list) {
listOfConstructs = list
constructIndex = 0
return handleConstruct(list[constructIndex])
}
function handleConstruct(construct) {
return start
function start(code) {
// To do: not nede to store if there is no bogus state, probably?
// Currently doesnt work because `inspect` in document does a check
// w/o a bogus, which doesnt make sense. But it does seem to help perf
// by not storing.
info = store()
currentConstruct = construct
if (!construct.partial) {
context.currentConstruct = construct
}
if (
construct.name &&
context.parser.constructs.disable.null.indexOf(construct.name) > -1
) {
return nok(code)
}
return construct.tokenize.call(
fields ? assign({}, context, fields) : context,
effects,
ok,
nok
)(code)
}
}
function ok(code) {
assert.equal(code, expectedCode, 'expected code')
consumed = true
onreturn(currentConstruct, info)
return returnState
}
function nok(code) {
assert.equal(code, expectedCode, 'expected code')
consumed = true
info.restore()
if (++constructIndex < listOfConstructs.length) {
return handleConstruct(listOfConstructs[constructIndex])
}
return bogusState
}
}
}
function addResult(construct, from) {
if (construct.resolveAll && resolveAllConstructs.indexOf(construct) < 0) {
resolveAllConstructs.push(construct)
}
if (construct.resolve) {
chunkedSplice(
context.events,
from,
context.events.length - from,
construct.resolve(context.events.slice(from), context)
)
}
if (construct.resolveTo) {
context.events = construct.resolveTo(context.events, context)
}
assert(
construct.partial ||
!context.events.length ||
context.events[context.events.length - 1][0] === 'exit',
'expected last token to end'
)
}
function store() {
var startPoint = now()
var startPrevious = context.previous
var startCurrentConstruct = context.currentConstruct
var startEventsIndex = context.events.length
var startStack = Array.from(stack)
return {restore: restore, from: startEventsIndex}
function restore() {
point = startPoint
context.previous = startPrevious
context.currentConstruct = startCurrentConstruct
context.events.length = startEventsIndex
stack = startStack
accountForPotentialSkip()
debug('position: restore: `%j`', point)
}
}
function accountForPotentialSkip() {
if (point.line in columnStart && point.column < 2) {
point.column = columnStart[point.line]
point.offset += columnStart[point.line] - 1
}
}
}

11
node_modules/micromark/lib/util/miniflat.js generated vendored Normal file
View File

@@ -0,0 +1,11 @@
'use strict'
function miniflat(value) {
return value === null || value === undefined
? []
: 'length' in value
? value
: [value]
}
module.exports = miniflat

9
node_modules/micromark/lib/util/miniflat.mjs generated vendored Normal file
View File

@@ -0,0 +1,9 @@
export default miniflat
function miniflat(value) {
return value === null || value === undefined
? []
: 'length' in value
? value
: [value]
}

12
node_modules/micromark/lib/util/move-point.js generated vendored Normal file
View File

@@ -0,0 +1,12 @@
'use strict'
// Note! `move` only works inside lines! Its not possible to move past other
// chunks (replacement characters, tabs, or line endings).
function movePoint(point, offset) {
point.column += offset
point.offset += offset
point._bufferIndex += offset
return point
}
module.exports = movePoint

10
node_modules/micromark/lib/util/move-point.mjs generated vendored Normal file
View File

@@ -0,0 +1,10 @@
export default movePoint
// Note! `move` only works inside lines! Its not possible to move past other
// chunks (replacement characters, tabs, or line endings).
function movePoint(point, offset) {
point.column += offset
point.offset += offset
point._bufferIndex += offset
return point
}

View File

@@ -0,0 +1,23 @@
'use strict'
var values = require('../character/values.js')
function normalizeIdentifier(value) {
return (
value
// Collapse Markdown whitespace.
.replace(/[\t\n\r ]+/g, values.space)
// Trim.
.replace(/^ | $/g, '')
// Some characters are considered “uppercase”, but if their lowercase
// counterpart is uppercased will result in a different uppercase
// character.
// Hence, to get that form, we perform both lower- and uppercase.
// Upper case makes sure keys will not interact with default prototypal
// methods: no object method is uppercase.
.toLowerCase()
.toUpperCase()
)
}
module.exports = normalizeIdentifier

View File

@@ -0,0 +1,21 @@
export default normalizeIdentifier
import values from '../character/values.mjs'
function normalizeIdentifier(value) {
return (
value
// Collapse Markdown whitespace.
.replace(/[\t\n\r ]+/g, values.space)
// Trim.
.replace(/^ | $/g, '')
// Some characters are considered “uppercase”, but if their lowercase
// counterpart is uppercased will result in a different uppercase
// character.
// Hence, to get that form, we perform both lower- and uppercase.
// Upper case makes sure keys will not interact with default prototypal
// methods: no object method is uppercase.
.toLowerCase()
.toUpperCase()
)
}

70
node_modules/micromark/lib/util/normalize-uri.js generated vendored Normal file
View File

@@ -0,0 +1,70 @@
'use strict'
var asciiAlphanumeric = require('../character/ascii-alphanumeric.js')
var codes = require('../character/codes.js')
var values = require('../character/values.js')
var fromCharCode = require('../constant/from-char-code.js')
// Encode unsafe characters with percent-encoding, skipping already
// encoded sequences.
function normalizeUri(value) {
var index = -1
var result = []
var start = 0
var skip = 0
var code
var next
var replace
while (++index < value.length) {
code = value.charCodeAt(index)
// A correct percent encoded value.
if (
code === codes.percentSign &&
asciiAlphanumeric(value.charCodeAt(index + 1)) &&
asciiAlphanumeric(value.charCodeAt(index + 2))
) {
skip = 2
}
// ASCII.
else if (code < 128) {
if (!/[!#$&-;=?-Z_a-z~]/.test(fromCharCode(code))) {
replace = fromCharCode(code)
}
}
// Astral.
else if (code > 55295 && code < 57344) {
next = value.charCodeAt(index + 1)
// A correct surrogate pair.
if (code < 56320 && next > 56319 && next < 57344) {
replace = fromCharCode(code, next)
skip = 1
}
// Lone surrogate.
else {
replace = values.replacementCharacter
}
}
// Unicode.
else {
replace = fromCharCode(code)
}
if (replace) {
result.push(value.slice(start, index), encodeURIComponent(replace))
start = index + skip + 1
replace = undefined
}
if (skip) {
index += skip
skip = 0
}
}
return result.join('') + value.slice(start)
}
module.exports = normalizeUri

68
node_modules/micromark/lib/util/normalize-uri.mjs generated vendored Normal file
View File

@@ -0,0 +1,68 @@
export default normalizeUri
import asciiAlphanumeric from '../character/ascii-alphanumeric.mjs'
import codes from '../character/codes.mjs'
import values from '../character/values.mjs'
import fromCharCode from '../constant/from-char-code.mjs'
// Encode unsafe characters with percent-encoding, skipping already
// encoded sequences.
function normalizeUri(value) {
var index = -1
var result = []
var start = 0
var skip = 0
var code
var next
var replace
while (++index < value.length) {
code = value.charCodeAt(index)
// A correct percent encoded value.
if (
code === codes.percentSign &&
asciiAlphanumeric(value.charCodeAt(index + 1)) &&
asciiAlphanumeric(value.charCodeAt(index + 2))
) {
skip = 2
}
// ASCII.
else if (code < 128) {
if (!/[!#$&-;=?-Z_a-z~]/.test(fromCharCode(code))) {
replace = fromCharCode(code)
}
}
// Astral.
else if (code > 55295 && code < 57344) {
next = value.charCodeAt(index + 1)
// A correct surrogate pair.
if (code < 56320 && next > 56319 && next < 57344) {
replace = fromCharCode(code, next)
skip = 1
}
// Lone surrogate.
else {
replace = values.replacementCharacter
}
}
// Unicode.
else {
replace = fromCharCode(code)
}
if (replace) {
result.push(value.slice(start, index), encodeURIComponent(replace))
start = index + skip + 1
replace = undefined
}
if (skip) {
index += skip
skip = 0
}
}
return result.join('') + value.slice(start)
}

11
node_modules/micromark/lib/util/prefix-size.js generated vendored Normal file
View File

@@ -0,0 +1,11 @@
'use strict'
var sizeChunks = require('./size-chunks.js')
function prefixSize(events, type) {
var tail = events[events.length - 1]
if (!tail || tail[1].type !== type) return 0
return sizeChunks(tail[2].sliceStream(tail[1]))
}
module.exports = prefixSize

9
node_modules/micromark/lib/util/prefix-size.mjs generated vendored Normal file
View File

@@ -0,0 +1,9 @@
export default prefixSize
import sizeChunks from './size-chunks.mjs'
function prefixSize(events, type) {
var tail = events[events.length - 1]
if (!tail || tail[1].type !== type) return 0
return sizeChunks(tail[2].sliceStream(tail[1]))
}

12
node_modules/micromark/lib/util/regex-check.js generated vendored Normal file
View File

@@ -0,0 +1,12 @@
'use strict'
var fromCharCode = require('../constant/from-char-code.js')
function regexCheck(regex) {
return check
function check(code) {
return regex.test(fromCharCode(code))
}
}
module.exports = regexCheck

10
node_modules/micromark/lib/util/regex-check.mjs generated vendored Normal file
View File

@@ -0,0 +1,10 @@
export default regexCheck
import fromCharCode from '../constant/from-char-code.mjs'
function regexCheck(regex) {
return check
function check(code) {
return regex.test(fromCharCode(code))
}
}

20
node_modules/micromark/lib/util/resolve-all.js generated vendored Normal file
View File

@@ -0,0 +1,20 @@
'use strict'
function resolveAll(constructs, events, context) {
var called = []
var index = -1
var resolve
while (++index < constructs.length) {
resolve = constructs[index].resolveAll
if (resolve && called.indexOf(resolve) < 0) {
events = resolve(events, context)
called.push(resolve)
}
}
return events
}
module.exports = resolveAll

18
node_modules/micromark/lib/util/resolve-all.mjs generated vendored Normal file
View File

@@ -0,0 +1,18 @@
export default resolveAll
function resolveAll(constructs, events, context) {
var called = []
var index = -1
var resolve
while (++index < constructs.length) {
resolve = constructs[index].resolveAll
if (resolve && called.indexOf(resolve) < 0) {
events = resolve(events, context)
called.push(resolve)
}
}
return events
}

32
node_modules/micromark/lib/util/safe-from-int.js generated vendored Normal file
View File

@@ -0,0 +1,32 @@
'use strict'
var codes = require('../character/codes.js')
var values = require('../character/values.js')
var fromCharCode = require('../constant/from-char-code.js')
function safeFromInt(value, base) {
var code = parseInt(value, base)
if (
// C0 except for HT, LF, FF, CR, space
code < codes.ht ||
code === codes.vt ||
(code > codes.cr && code < codes.space) ||
// Control character (DEL) of the basic block and C1 controls.
(code > codes.tilde && code < 160) ||
// Lone high surrogates and low surrogates.
(code > 55295 && code < 57344) ||
// Noncharacters.
(code > 64975 && code < 65008) ||
(code & 65535) === 65535 ||
(code & 65535) === 65534 ||
// Out of range
code > 1114111
) {
return values.replacementCharacter
}
return fromCharCode(code)
}
module.exports = safeFromInt

30
node_modules/micromark/lib/util/safe-from-int.mjs generated vendored Normal file
View File

@@ -0,0 +1,30 @@
export default safeFromInt
import codes from '../character/codes.mjs'
import values from '../character/values.mjs'
import fromCharCode from '../constant/from-char-code.mjs'
function safeFromInt(value, base) {
var code = parseInt(value, base)
if (
// C0 except for HT, LF, FF, CR, space
code < codes.ht ||
code === codes.vt ||
(code > codes.cr && code < codes.space) ||
// Control character (DEL) of the basic block and C1 controls.
(code > codes.tilde && code < 160) ||
// Lone high surrogates and low surrogates.
(code > 55295 && code < 57344) ||
// Noncharacters.
(code > 64975 && code < 65008) ||
(code & 65535) === 65535 ||
(code & 65535) === 65534 ||
// Out of range
code > 1114111
) {
return values.replacementCharacter
}
return fromCharCode(code)
}

54
node_modules/micromark/lib/util/serialize-chunks.js generated vendored Normal file
View File

@@ -0,0 +1,54 @@
'use strict'
var assert = require('assert')
var codes = require('../character/codes.js')
var values = require('../character/values.js')
var fromCharCode = require('../constant/from-char-code.js')
function _interopDefaultLegacy(e) {
return e && typeof e === 'object' && 'default' in e ? e : {default: e}
}
var assert__default = /*#__PURE__*/ _interopDefaultLegacy(assert)
function serializeChunks(chunks) {
var index = -1
var result = []
var chunk
var value
var atTab
while (++index < chunks.length) {
chunk = chunks[index]
if (typeof chunk === 'string') {
value = chunk
} else if (chunk === codes.carriageReturn) {
value = values.cr
} else if (chunk === codes.lineFeed) {
value = values.lf
} else if (chunk === codes.carriageReturnLineFeed) {
value = values.cr + values.lf
} else if (chunk === codes.horizontalTab) {
value = values.ht
} else if (chunk === codes.virtualSpace) {
if (atTab) continue
value = values.space
} else {
assert__default['default'].equal(
typeof chunk,
'number',
'expected number'
)
// Currently only replacement character.
value = fromCharCode(chunk)
}
atTab = chunk === codes.horizontalTab
result.push(value)
}
return result.join('')
}
module.exports = serializeChunks

42
node_modules/micromark/lib/util/serialize-chunks.mjs generated vendored Normal file
View File

@@ -0,0 +1,42 @@
export default serializeChunks
import assert from 'assert'
import codes from '../character/codes.mjs'
import values from '../character/values.mjs'
import fromCharCode from '../constant/from-char-code.mjs'
function serializeChunks(chunks) {
var index = -1
var result = []
var chunk
var value
var atTab
while (++index < chunks.length) {
chunk = chunks[index]
if (typeof chunk === 'string') {
value = chunk
} else if (chunk === codes.carriageReturn) {
value = values.cr
} else if (chunk === codes.lineFeed) {
value = values.lf
} else if (chunk === codes.carriageReturnLineFeed) {
value = values.cr + values.lf
} else if (chunk === codes.horizontalTab) {
value = values.ht
} else if (chunk === codes.virtualSpace) {
if (atTab) continue
value = values.space
} else {
assert.equal(typeof chunk, 'number', 'expected number')
// Currently only replacement character.
value = fromCharCode(chunk)
}
atTab = chunk === codes.horizontalTab
result.push(value)
}
return result.join('')
}

9
node_modules/micromark/lib/util/shallow.js generated vendored Normal file
View File

@@ -0,0 +1,9 @@
'use strict'
var assign = require('../constant/assign.js')
function shallow(object) {
return assign({}, object)
}
module.exports = shallow

7
node_modules/micromark/lib/util/shallow.mjs generated vendored Normal file
View File

@@ -0,0 +1,7 @@
export default shallow
import assign from '../constant/assign.mjs'
function shallow(object) {
return assign({}, object)
}

16
node_modules/micromark/lib/util/size-chunks.js generated vendored Normal file
View File

@@ -0,0 +1,16 @@
'use strict'
// Measure the number of character codes in chunks.
// Counts tabs based on their expanded size, and CR+LF as one character.
function sizeChunks(chunks) {
var index = -1
var size = 0
while (++index < chunks.length) {
size += typeof chunks[index] === 'string' ? chunks[index].length : 1
}
return size
}
module.exports = sizeChunks

14
node_modules/micromark/lib/util/size-chunks.mjs generated vendored Normal file
View File

@@ -0,0 +1,14 @@
export default sizeChunks
// Measure the number of character codes in chunks.
// Counts tabs based on their expanded size, and CR+LF as one character.
function sizeChunks(chunks) {
var index = -1
var size = 0
while (++index < chunks.length) {
size += typeof chunks[index] === 'string' ? chunks[index].length : 1
}
return size
}

43
node_modules/micromark/lib/util/slice-chunks.js generated vendored Normal file
View File

@@ -0,0 +1,43 @@
'use strict'
var assert = require('assert')
function _interopDefaultLegacy(e) {
return e && typeof e === 'object' && 'default' in e ? e : {default: e}
}
var assert__default = /*#__PURE__*/ _interopDefaultLegacy(assert)
function sliceChunks(chunks, token) {
var startIndex = token.start._index
var startBufferIndex = token.start._bufferIndex
var endIndex = token.end._index
var endBufferIndex = token.end._bufferIndex
var view
if (startIndex === endIndex) {
assert__default['default'](
endBufferIndex > -1,
'expected non-negative end buffer index'
)
assert__default['default'](
startBufferIndex > -1,
'expected non-negative start buffer index'
)
view = [chunks[startIndex].slice(startBufferIndex, endBufferIndex)]
} else {
view = chunks.slice(startIndex, endIndex)
if (startBufferIndex > -1) {
view[0] = view[0].slice(startBufferIndex)
}
if (endBufferIndex > 0) {
view.push(chunks[endIndex].slice(0, endBufferIndex))
}
}
return view
}
module.exports = sliceChunks

29
node_modules/micromark/lib/util/slice-chunks.mjs generated vendored Normal file
View File

@@ -0,0 +1,29 @@
export default sliceChunks
import assert from 'assert'
function sliceChunks(chunks, token) {
var startIndex = token.start._index
var startBufferIndex = token.start._bufferIndex
var endIndex = token.end._index
var endBufferIndex = token.end._bufferIndex
var view
if (startIndex === endIndex) {
assert(endBufferIndex > -1, 'expected non-negative end buffer index')
assert(startBufferIndex > -1, 'expected non-negative start buffer index')
view = [chunks[startIndex].slice(startBufferIndex, endBufferIndex)]
} else {
view = chunks.slice(startIndex, endIndex)
if (startBufferIndex > -1) {
view[0] = view[0].slice(startBufferIndex)
}
if (endBufferIndex > 0) {
view.push(chunks[endIndex].slice(0, endBufferIndex))
}
}
return view
}

219
node_modules/micromark/lib/util/subtokenize.js generated vendored Normal file
View File

@@ -0,0 +1,219 @@
'use strict'
var assert = require('assert')
var codes = require('../character/codes.js')
var assign = require('../constant/assign.js')
var types = require('../constant/types.js')
var chunkedSplice = require('./chunked-splice.js')
var shallow = require('./shallow.js')
function _interopDefaultLegacy(e) {
return e && typeof e === 'object' && 'default' in e ? e : {default: e}
}
var assert__default = /*#__PURE__*/ _interopDefaultLegacy(assert)
function subtokenize(events) {
var jumps = {}
var index = -1
var event
var lineIndex
var otherIndex
var otherEvent
var parameters
var subevents
var more
while (++index < events.length) {
while (index in jumps) {
index = jumps[index]
}
event = events[index]
// Add a hook for the GFM tasklist extension, which needs to know if text
// is in the first content of a list item.
if (
index &&
event[1].type === types.chunkFlow &&
events[index - 1][1].type === types.listItemPrefix
) {
subevents = event[1]._tokenizer.events
otherIndex = 0
if (
otherIndex < subevents.length &&
subevents[otherIndex][1].type === types.lineEndingBlank
) {
otherIndex += 2
}
if (
otherIndex < subevents.length &&
subevents[otherIndex][1].type === types.content
) {
while (++otherIndex < subevents.length) {
if (subevents[otherIndex][1].type === types.content) {
break
}
if (subevents[otherIndex][1].type === types.chunkText) {
subevents[otherIndex][1].isInFirstContentOfListItem = true
otherIndex++
}
}
}
}
// Enter.
if (event[0] === 'enter') {
if (event[1].contentType) {
assign(jumps, subcontent(events, index))
index = jumps[index]
more = true
}
}
// Exit.
else if (event[1]._container || event[1]._movePreviousLineEndings) {
otherIndex = index
lineIndex = undefined
while (otherIndex--) {
otherEvent = events[otherIndex]
if (
otherEvent[1].type === types.lineEnding ||
otherEvent[1].type === types.lineEndingBlank
) {
if (otherEvent[0] === 'enter') {
if (lineIndex) {
events[lineIndex][1].type = types.lineEndingBlank
}
otherEvent[1].type = types.lineEnding
lineIndex = otherIndex
}
} else {
break
}
}
if (lineIndex) {
// Fix position.
event[1].end = shallow(events[lineIndex][1].start)
// Switch container exit w/ line endings.
parameters = events.slice(lineIndex, index)
parameters.unshift(event)
chunkedSplice(events, lineIndex, index - lineIndex + 1, parameters)
}
}
}
return !more
}
function subcontent(events, eventIndex) {
var token = events[eventIndex][1]
var context = events[eventIndex][2]
var startPosition = eventIndex - 1
var startPositions = []
var tokenizer =
token._tokenizer || context.parser[token.contentType](token.start)
var childEvents = tokenizer.events
var jumps = []
var gaps = {}
var stream
var previous
var index
var entered
var end
var adjust
// Loop forward through the linked tokens to pass them in order to the
// subtokenizer.
while (token) {
// Find the position of the event for this token.
while (events[++startPosition][1] !== token) {
// Empty.
}
startPositions.push(startPosition)
if (!token._tokenizer) {
stream = context.sliceStream(token)
if (!token.next) {
stream.push(codes.eof)
}
if (previous) {
tokenizer.defineSkip(token.start)
}
if (token.isInFirstContentOfListItem) {
tokenizer._gfmTasklistFirstContentOfListItem = true
}
tokenizer.write(stream)
if (token.isInFirstContentOfListItem) {
tokenizer._gfmTasklistFirstContentOfListItem = undefined
}
}
// Unravel the next token.
previous = token
token = token.next
}
// Now, loop back through all events (and linked tokens), to figure out which
// parts belong where.
token = previous
index = childEvents.length
while (index--) {
// Make sure weve at least seen something (final eol is part of the last
// token).
if (childEvents[index][0] === 'enter') {
entered = true
} else if (
// Find a void token that includes a break.
entered &&
childEvents[index][1].type === childEvents[index - 1][1].type &&
childEvents[index][1].start.line !== childEvents[index][1].end.line
) {
add(childEvents.slice(index + 1, end))
assert__default['default'](token.previous, 'expected a previous token')
// Help GC.
token._tokenizer = token.next = undefined
token = token.previous
end = index + 1
}
}
assert__default['default'](!token.previous, 'expected no previous token')
// Help GC.
tokenizer.events = token._tokenizer = token.next = undefined
// Do head:
add(childEvents.slice(0, end))
index = -1
adjust = 0
while (++index < jumps.length) {
gaps[adjust + jumps[index][0]] = adjust + jumps[index][1]
adjust += jumps[index][1] - jumps[index][0] - 1
}
return gaps
function add(slice) {
var start = startPositions.pop()
jumps.unshift([start, start + slice.length - 1])
chunkedSplice(events, start, 2, slice)
}
}
module.exports = subtokenize

211
node_modules/micromark/lib/util/subtokenize.mjs generated vendored Normal file
View File

@@ -0,0 +1,211 @@
export default subtokenize
import assert from 'assert'
import codes from '../character/codes.mjs'
import assign from '../constant/assign.mjs'
import types from '../constant/types.mjs'
import chunkedSplice from './chunked-splice.mjs'
import shallow from './shallow.mjs'
function subtokenize(events) {
var jumps = {}
var index = -1
var event
var lineIndex
var otherIndex
var otherEvent
var parameters
var subevents
var more
while (++index < events.length) {
while (index in jumps) {
index = jumps[index]
}
event = events[index]
// Add a hook for the GFM tasklist extension, which needs to know if text
// is in the first content of a list item.
if (
index &&
event[1].type === types.chunkFlow &&
events[index - 1][1].type === types.listItemPrefix
) {
subevents = event[1]._tokenizer.events
otherIndex = 0
if (
otherIndex < subevents.length &&
subevents[otherIndex][1].type === types.lineEndingBlank
) {
otherIndex += 2
}
if (
otherIndex < subevents.length &&
subevents[otherIndex][1].type === types.content
) {
while (++otherIndex < subevents.length) {
if (subevents[otherIndex][1].type === types.content) {
break
}
if (subevents[otherIndex][1].type === types.chunkText) {
subevents[otherIndex][1].isInFirstContentOfListItem = true
otherIndex++
}
}
}
}
// Enter.
if (event[0] === 'enter') {
if (event[1].contentType) {
assign(jumps, subcontent(events, index))
index = jumps[index]
more = true
}
}
// Exit.
else if (event[1]._container || event[1]._movePreviousLineEndings) {
otherIndex = index
lineIndex = undefined
while (otherIndex--) {
otherEvent = events[otherIndex]
if (
otherEvent[1].type === types.lineEnding ||
otherEvent[1].type === types.lineEndingBlank
) {
if (otherEvent[0] === 'enter') {
if (lineIndex) {
events[lineIndex][1].type = types.lineEndingBlank
}
otherEvent[1].type = types.lineEnding
lineIndex = otherIndex
}
} else {
break
}
}
if (lineIndex) {
// Fix position.
event[1].end = shallow(events[lineIndex][1].start)
// Switch container exit w/ line endings.
parameters = events.slice(lineIndex, index)
parameters.unshift(event)
chunkedSplice(events, lineIndex, index - lineIndex + 1, parameters)
}
}
}
return !more
}
function subcontent(events, eventIndex) {
var token = events[eventIndex][1]
var context = events[eventIndex][2]
var startPosition = eventIndex - 1
var startPositions = []
var tokenizer =
token._tokenizer || context.parser[token.contentType](token.start)
var childEvents = tokenizer.events
var jumps = []
var gaps = {}
var stream
var previous
var index
var entered
var end
var adjust
// Loop forward through the linked tokens to pass them in order to the
// subtokenizer.
while (token) {
// Find the position of the event for this token.
while (events[++startPosition][1] !== token) {
// Empty.
}
startPositions.push(startPosition)
if (!token._tokenizer) {
stream = context.sliceStream(token)
if (!token.next) {
stream.push(codes.eof)
}
if (previous) {
tokenizer.defineSkip(token.start)
}
if (token.isInFirstContentOfListItem) {
tokenizer._gfmTasklistFirstContentOfListItem = true
}
tokenizer.write(stream)
if (token.isInFirstContentOfListItem) {
tokenizer._gfmTasklistFirstContentOfListItem = undefined
}
}
// Unravel the next token.
previous = token
token = token.next
}
// Now, loop back through all events (and linked tokens), to figure out which
// parts belong where.
token = previous
index = childEvents.length
while (index--) {
// Make sure weve at least seen something (final eol is part of the last
// token).
if (childEvents[index][0] === 'enter') {
entered = true
} else if (
// Find a void token that includes a break.
entered &&
childEvents[index][1].type === childEvents[index - 1][1].type &&
childEvents[index][1].start.line !== childEvents[index][1].end.line
) {
add(childEvents.slice(index + 1, end))
assert(token.previous, 'expected a previous token')
// Help GC.
token._tokenizer = token.next = undefined
token = token.previous
end = index + 1
}
}
assert(!token.previous, 'expected no previous token')
// Help GC.
tokenizer.events = token._tokenizer = token.next = undefined
// Do head:
add(childEvents.slice(0, end))
index = -1
adjust = 0
while (++index < jumps.length) {
gaps[adjust + jumps[index][0]] = adjust + jumps[index][1]
adjust += jumps[index][1] - jumps[index][0] - 1
}
return gaps
function add(slice) {
var start = startPositions.pop()
jumps.unshift([start, start + slice.length - 1])
chunkedSplice(events, start, 2, slice)
}
}