jam-cloud/jam-ui/node_modules/prism-react-renderer/lib/utils/normalizeTokens.js

106 lines
3.0 KiB
JavaScript
Executable File

"use strict";
exports.__esModule = true;
exports.default = void 0;
var newlineRe = /\r\n|\r|\n/; // Empty lines need to contain a single empty token, denoted with { empty: true }
var normalizeEmptyLines = function normalizeEmptyLines(line) {
if (line.length === 0) {
line.push({
types: ["plain"],
content: "",
empty: true
});
} else if (line.length === 1 && line[0].content === "") {
line[0].empty = true;
}
};
var appendTypes = function appendTypes(types, add) {
var typesSize = types.length;
if (typesSize > 0 && types[typesSize - 1] === add) {
return types;
}
return types.concat(add);
}; // Takes an array of Prism's tokens and groups them by line, turning plain
// strings into tokens as well. Tokens can become recursive in some cases,
// which means that their types are concatenated. Plain-string tokens however
// are always of type "plain".
// This is not recursive to avoid exceeding the call-stack limit, since it's unclear
// how nested Prism's tokens can become
var normalizeTokens = function normalizeTokens(tokens) {
var typeArrStack = [[]];
var tokenArrStack = [tokens];
var tokenArrIndexStack = [0];
var tokenArrSizeStack = [tokens.length];
var i = 0;
var stackIndex = 0;
var currentLine = [];
var acc = [currentLine];
while (stackIndex > -1) {
while ((i = tokenArrIndexStack[stackIndex]++) < tokenArrSizeStack[stackIndex]) {
var content = void 0;
var types = typeArrStack[stackIndex];
var tokenArr = tokenArrStack[stackIndex];
var token = tokenArr[i]; // Determine content and append type to types if necessary
if (typeof token === "string") {
types = stackIndex > 0 ? types : ["plain"];
content = token;
} else {
types = appendTypes(types, token.type);
if (token.alias) {
types = appendTypes(types, token.alias);
}
content = token.content;
} // If token.content is an array, increase the stack depth and repeat this while-loop
if (typeof content !== "string") {
stackIndex++;
typeArrStack.push(types);
tokenArrStack.push(content);
tokenArrIndexStack.push(0);
tokenArrSizeStack.push(content.length);
continue;
} // Split by newlines
var splitByNewlines = content.split(newlineRe);
var newlineCount = splitByNewlines.length;
currentLine.push({
types: types,
content: splitByNewlines[0]
}); // Create a new line for each string on a new line
for (var _i = 1; _i < newlineCount; _i++) {
normalizeEmptyLines(currentLine);
acc.push(currentLine = []);
currentLine.push({
types: types,
content: splitByNewlines[_i]
});
}
} // Decreate the stack depth
stackIndex--;
typeArrStack.pop();
tokenArrStack.pop();
tokenArrIndexStack.pop();
tokenArrSizeStack.pop();
}
normalizeEmptyLines(currentLine);
return acc;
};
var _default = normalizeTokens;
exports.default = _default;