Initial commit
This commit is contained in:
13
node_modules/@maplibre/mlt/dist/decoding/bigEndianDecode.d.ts
generated
vendored
Normal file
13
node_modules/@maplibre/mlt/dist/decoding/bigEndianDecode.d.ts
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
/**
|
||||
* Decodes big-endian bytes into `out` without allocating the output buffer.
|
||||
*
|
||||
* This function does not copy `bytes`; it writes decoded words into the provided `out` array.
|
||||
* For aligned inputs it may create a temporary typed-array view (`Uint32Array`) over `bytes.buffer`
|
||||
* to speed up decoding.
|
||||
*
|
||||
* If `byteLength` is not a multiple of 4, the final word is padded with zeros.
|
||||
*
|
||||
* @returns Number of int32 words written.
|
||||
* @throws RangeError If `(offset, byteLength)` is out of bounds, or if `out` is too small.
|
||||
*/
|
||||
export declare function decodeBigEndianInt32sInto(bytes: Uint8Array, offset: number, byteLength: number, out: Uint32Array): number;
|
||||
50
node_modules/@maplibre/mlt/dist/decoding/bigEndianDecode.js
generated
vendored
Normal file
50
node_modules/@maplibre/mlt/dist/decoding/bigEndianDecode.js
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
import { bswap32 } from "./fastPforShared";
|
||||
/**
|
||||
* Decodes big-endian bytes into `out` without allocating the output buffer.
|
||||
*
|
||||
* This function does not copy `bytes`; it writes decoded words into the provided `out` array.
|
||||
* For aligned inputs it may create a temporary typed-array view (`Uint32Array`) over `bytes.buffer`
|
||||
* to speed up decoding.
|
||||
*
|
||||
* If `byteLength` is not a multiple of 4, the final word is padded with zeros.
|
||||
*
|
||||
* @returns Number of int32 words written.
|
||||
* @throws RangeError If `(offset, byteLength)` is out of bounds, or if `out` is too small.
|
||||
*/
|
||||
export function decodeBigEndianInt32sInto(bytes, offset, byteLength, out) {
|
||||
if (offset < 0 || byteLength < 0 || offset + byteLength > bytes.length) {
|
||||
throw new RangeError(`decodeBigEndianInt32sInto: out of bounds (offset=${offset}, byteLength=${byteLength}, bytes.length=${bytes.length})`);
|
||||
}
|
||||
const numCompleteInts = Math.floor(byteLength / 4);
|
||||
const hasTrailingBytes = byteLength % 4 !== 0;
|
||||
const numInts = hasTrailingBytes ? numCompleteInts + 1 : numCompleteInts;
|
||||
if (out.length < numInts) {
|
||||
throw new RangeError(`decodeBigEndianInt32sInto: out.length=${out.length} < ${numInts}`);
|
||||
}
|
||||
if (numCompleteInts > 0) {
|
||||
const absoluteOffset = bytes.byteOffset + offset;
|
||||
if ((absoluteOffset & 3) === 0) {
|
||||
const u32 = new Uint32Array(bytes.buffer, absoluteOffset, numCompleteInts);
|
||||
for (let i = 0; i < numCompleteInts; i++) {
|
||||
out[i] = bswap32(u32[i]) | 0;
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (let i = 0; i < numCompleteInts; i++) {
|
||||
const base = offset + i * 4;
|
||||
out[i] = (bytes[base] << 24) | (bytes[base + 1] << 16) | (bytes[base + 2] << 8) | bytes[base + 3] | 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (hasTrailingBytes) {
|
||||
const base = offset + numCompleteInts * 4;
|
||||
const remaining = byteLength - numCompleteInts * 4;
|
||||
let v = 0;
|
||||
for (let i = 0; i < remaining; i++) {
|
||||
v |= bytes[base + i] << (24 - i * 8);
|
||||
}
|
||||
out[numCompleteInts] = v | 0;
|
||||
}
|
||||
return numInts;
|
||||
}
|
||||
//# sourceMappingURL=bigEndianDecode.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/decoding/bigEndianDecode.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/decoding/bigEndianDecode.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"bigEndianDecode.js","sourceRoot":"","sources":["../../src/decoding/bigEndianDecode.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,OAAO,EAAE,MAAM,kBAAkB,CAAC;AAE3C;;;;;;;;;;;GAWG;AACH,MAAM,UAAU,yBAAyB,CACrC,KAAiB,EACjB,MAAc,EACd,UAAkB,EAClB,GAAgB;IAEhB,IAAI,MAAM,GAAG,CAAC,IAAI,UAAU,GAAG,CAAC,IAAI,MAAM,GAAG,UAAU,GAAG,KAAK,CAAC,MAAM,EAAE,CAAC;QACrE,MAAM,IAAI,UAAU,CAChB,oDAAoD,MAAM,gBAAgB,UAAU,kBAAkB,KAAK,CAAC,MAAM,GAAG,CACxH,CAAC;IACN,CAAC;IAED,MAAM,eAAe,GAAG,IAAI,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC;IACnD,MAAM,gBAAgB,GAAG,UAAU,GAAG,CAAC,KAAK,CAAC,CAAC;IAC9C,MAAM,OAAO,GAAG,gBAAgB,CAAC,CAAC,CAAC,eAAe,GAAG,CAAC,CAAC,CAAC,CAAC,eAAe,CAAC;IAEzE,IAAI,GAAG,CAAC,MAAM,GAAG,OAAO,EAAE,CAAC;QACvB,MAAM,IAAI,UAAU,CAAC,yCAAyC,GAAG,CAAC,MAAM,MAAM,OAAO,EAAE,CAAC,CAAC;IAC7F,CAAC;IAED,IAAI,eAAe,GAAG,CAAC,EAAE,CAAC;QACtB,MAAM,cAAc,GAAG,KAAK,CAAC,UAAU,GAAG,MAAM,CAAC;QACjD,IAAI,CAAC,cAAc,GAAG,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC;YAC7B,MAAM,GAAG,GAAG,IAAI,WAAW,CAAC,KAAK,CAAC,MAAM,EAAE,cAAc,EAAE,eAAe,CAAC,CAAC;YAC3E,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,eAAe,EAAE,CAAC,EAAE,EAAE,CAAC;gBACvC,GAAG,CAAC,CAAC,CAAC,GAAG,OAAO,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;YACjC,CAAC;QACL,CAAC;aAAM,CAAC;YACJ,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,eAAe,EAAE,CAAC,EAAE,EAAE,CAAC;gBACvC,MAAM,IAAI,GAAG,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC;gBAC5B,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,IAAI,GAAG,CAAC,CAAC,IAAI,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,IAAI,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,GAAG,KAAK,CAAC,IAAI,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC;YAC1G,CAAC;QACL,CAAC;IACL,CAAC;IAED,IAAI,gBAAgB,EAAE,CAAC;QACnB,MAAM,IAAI,GAAG,MAAM,GAAG,eAAe,GAAG,CAAC,CAAC;QAC1C,MAAM,SAAS,GAAG,UAAU,GAAG,eAAe,GAAG,CAAC,CAAC;QACnD,IAAI,CAAC,GAAG,CAAC,CAAC;QACV,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,SAAS,EAAE,CAAC,EAAE,EAAE,CAAC;YACjC,CAAC,IAAI,KAAK,CAAC,IAAI,GAAG,CAAC,CAAC,IAAI,CAAC,EAAE,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC;QACzC,CAAC;QACD,GAAG,CAAC,eAAe,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC;IACjC,CAAC;IAED,OAAO,OAAO,CAAC;AACnB,CAAC","sourcesContent":["import { bswap32 } from \"./fastPforShared\";\n\n/**\n * Decodes big-endian bytes into `out` without allocating the output buffer.\n *\n * This function does not copy `bytes`; it writes decoded words into the provided `out` array.\n * For aligned inputs it may create a temporary typed-array view (`Uint32Array`) over `bytes.buffer`\n * to speed up decoding.\n *\n * If `byteLength` is not a multiple of 4, the final word is padded with zeros.\n *\n * @returns Number of int32 words written.\n * @throws RangeError If `(offset, byteLength)` is out of bounds, or if `out` is too small.\n */\nexport function decodeBigEndianInt32sInto(\n bytes: Uint8Array,\n offset: number,\n byteLength: number,\n out: Uint32Array,\n): number {\n if (offset < 0 || byteLength < 0 || offset + byteLength > bytes.length) {\n throw new RangeError(\n `decodeBigEndianInt32sInto: out of bounds (offset=${offset}, byteLength=${byteLength}, bytes.length=${bytes.length})`,\n );\n }\n\n const numCompleteInts = Math.floor(byteLength / 4);\n const hasTrailingBytes = byteLength % 4 !== 0;\n const numInts = hasTrailingBytes ? numCompleteInts + 1 : numCompleteInts;\n\n if (out.length < numInts) {\n throw new RangeError(`decodeBigEndianInt32sInto: out.length=${out.length} < ${numInts}`);\n }\n\n if (numCompleteInts > 0) {\n const absoluteOffset = bytes.byteOffset + offset;\n if ((absoluteOffset & 3) === 0) {\n const u32 = new Uint32Array(bytes.buffer, absoluteOffset, numCompleteInts);\n for (let i = 0; i < numCompleteInts; i++) {\n out[i] = bswap32(u32[i]) | 0;\n }\n } else {\n for (let i = 0; i < numCompleteInts; i++) {\n const base = offset + i * 4;\n out[i] = (bytes[base] << 24) | (bytes[base + 1] << 16) | (bytes[base + 2] << 8) | bytes[base + 3] | 0;\n }\n }\n }\n\n if (hasTrailingBytes) {\n const base = offset + numCompleteInts * 4;\n const remaining = byteLength - numCompleteInts * 4;\n let v = 0;\n for (let i = 0; i < remaining; i++) {\n v |= bytes[base + i] << (24 - i * 8);\n }\n out[numCompleteInts] = v | 0;\n }\n\n return numInts;\n}\n"]}
|
||||
75
node_modules/@maplibre/mlt/dist/decoding/decodingTestUtils.d.ts
generated
vendored
Normal file
75
node_modules/@maplibre/mlt/dist/decoding/decodingTestUtils.d.ts
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
import { PhysicalStreamType } from "../metadata/tile/physicalStreamType";
|
||||
import { LogicalLevelTechnique } from "../metadata/tile/logicalLevelTechnique";
|
||||
import { PhysicalLevelTechnique } from "../metadata/tile/physicalLevelTechnique";
|
||||
import { DictionaryType } from "../metadata/tile/dictionaryType";
|
||||
import { type Column } from "../metadata/tileset/tilesetMetadata";
|
||||
import type { RleEncodedStreamMetadata, StreamMetadata } from "../metadata/tile/streamMetadataDecoder";
|
||||
import type { LogicalStreamType } from "../metadata/tile/logicalStreamType";
|
||||
/**
|
||||
* Creates basic stream metadata with logical techniques.
|
||||
*/
|
||||
export declare function createStreamMetadata(logicalTechnique1: LogicalLevelTechnique, logicalTechnique2?: LogicalLevelTechnique, numValues?: number): StreamMetadata;
|
||||
/**
|
||||
* Creates RLE-encoded stream metadata.
|
||||
*/
|
||||
export declare function createRleMetadata(logicalTechnique1: LogicalLevelTechnique, logicalTechnique2: LogicalLevelTechnique, runs: number, numRleValues: number): RleEncodedStreamMetadata;
|
||||
/**
|
||||
* Creates column metadata for STRUCT type columns.
|
||||
*/
|
||||
export declare function createColumnMetadataForStruct(columnName: string, childFields: Array<{
|
||||
name: string;
|
||||
type?: number;
|
||||
}>): Column;
|
||||
/**
|
||||
* Creates a single stream with metadata and data.
|
||||
*/
|
||||
export declare function createStream(physicalType: PhysicalStreamType, data: Uint8Array, options?: {
|
||||
logical?: LogicalStreamType;
|
||||
technique?: PhysicalLevelTechnique;
|
||||
count?: number;
|
||||
}): Uint8Array;
|
||||
/**
|
||||
* Encodes FSST-compressed strings into a complete stream.
|
||||
* This uses hardcoded test data: ["cat", "dog", "cat"]
|
||||
* @returns Encoded Uint8Array that can be passed to decodeString
|
||||
*/
|
||||
export declare function encodeFsstStrings(): Uint8Array;
|
||||
/**
|
||||
* Encodes a shared dictionary for struct fields.
|
||||
* @param dictionaryStrings - Array of unique strings in the dictionary
|
||||
* @param options - Encoding options
|
||||
* @returns Object containing length and data streams
|
||||
*/
|
||||
export declare function encodeSharedDictionary(dictionaryStrings: string[], options?: {
|
||||
useFsst?: boolean;
|
||||
dictionaryType?: DictionaryType;
|
||||
}): {
|
||||
lengthStream: Uint8Array;
|
||||
dataStream: Uint8Array;
|
||||
symbolLengthStream?: Uint8Array;
|
||||
symbolDataStream?: Uint8Array;
|
||||
};
|
||||
/**
|
||||
* Encodes streams for a struct field.
|
||||
* @param offsetIndices - Indices into the shared dictionary
|
||||
* @param presentValues - Boolean array indicating which values are present
|
||||
* @param isPresent - Whether the field itself is present
|
||||
* @returns Encoded streams for the field
|
||||
*/
|
||||
export declare function encodeStructField(offsetIndices: number[], presentValues: boolean[], isPresent?: boolean): Uint8Array;
|
||||
/**
|
||||
* Builds a complete encoded stream by combining metadata and data.
|
||||
*/
|
||||
export declare function buildEncodedStream(streamMetadata: StreamMetadata | RleEncodedStreamMetadata, encodedData: Uint8Array): Uint8Array;
|
||||
/**
|
||||
* Encodes stream metadata into binary format.
|
||||
* - Byte 1: Stream type (physical type in upper 4 bits, logical subtype in lower 4 bits)
|
||||
* - Byte 2: Encodings (llt1[5-7], llt2[2-4], plt[0-1])
|
||||
* - Varints: numValues, byteLength
|
||||
* - If RLE: Varints: runs, numRleValues
|
||||
*/
|
||||
export declare function encodeStreamMetadata(metadata: StreamMetadata | RleEncodedStreamMetadata): Uint8Array;
|
||||
/**
|
||||
* Concatenates multiple Uint8Array buffers into a single buffer.
|
||||
*/
|
||||
export declare function concatenateBuffers(...buffers: Uint8Array[]): Uint8Array;
|
||||
284
node_modules/@maplibre/mlt/dist/decoding/decodingTestUtils.js
generated
vendored
Normal file
284
node_modules/@maplibre/mlt/dist/decoding/decodingTestUtils.js
generated
vendored
Normal file
@@ -0,0 +1,284 @@
|
||||
import { PhysicalStreamType } from "../metadata/tile/physicalStreamType";
|
||||
import { LogicalLevelTechnique } from "../metadata/tile/logicalLevelTechnique";
|
||||
import { PhysicalLevelTechnique } from "../metadata/tile/physicalLevelTechnique";
|
||||
import { DictionaryType } from "../metadata/tile/dictionaryType";
|
||||
import { LengthType } from "../metadata/tile/lengthType";
|
||||
import { OffsetType } from "../metadata/tile/offsetType";
|
||||
import IntWrapper from "./intWrapper";
|
||||
import { ComplexType, ScalarType } from "../metadata/tileset/tilesetMetadata";
|
||||
import { encodeBooleanRle, encodeStrings, createStringLengths } from "../encoding/encodingUtils";
|
||||
import { encodeVarintInt32Value, encodeVarintInt32 } from "../encoding/integerEncodingUtils";
|
||||
/**
|
||||
* Creates basic stream metadata with logical techniques.
|
||||
*/
|
||||
export function createStreamMetadata(logicalTechnique1, logicalTechnique2 = LogicalLevelTechnique.NONE, numValues = 3) {
|
||||
return {
|
||||
physicalStreamType: PhysicalStreamType.DATA,
|
||||
logicalStreamType: { dictionaryType: DictionaryType.NONE },
|
||||
logicalLevelTechnique1: logicalTechnique1,
|
||||
logicalLevelTechnique2: logicalTechnique2,
|
||||
physicalLevelTechnique: PhysicalLevelTechnique.VARINT,
|
||||
numValues,
|
||||
byteLength: 10,
|
||||
decompressedCount: numValues,
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Creates RLE-encoded stream metadata.
|
||||
*/
|
||||
export function createRleMetadata(logicalTechnique1, logicalTechnique2, runs, numRleValues) {
|
||||
return {
|
||||
physicalStreamType: PhysicalStreamType.DATA,
|
||||
logicalStreamType: { dictionaryType: DictionaryType.NONE },
|
||||
logicalLevelTechnique1: logicalTechnique1,
|
||||
logicalLevelTechnique2: logicalTechnique2,
|
||||
physicalLevelTechnique: PhysicalLevelTechnique.VARINT,
|
||||
numValues: runs * 2,
|
||||
byteLength: 10,
|
||||
decompressedCount: numRleValues,
|
||||
runs,
|
||||
numRleValues,
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Creates column metadata for STRUCT type columns.
|
||||
*/
|
||||
export function createColumnMetadataForStruct(columnName, childFields) {
|
||||
const children = childFields.map((fieldConfig) => ({
|
||||
name: fieldConfig.name,
|
||||
nullable: true,
|
||||
scalarField: {
|
||||
physicalType: fieldConfig.type ?? ScalarType.STRING,
|
||||
type: "physicalType",
|
||||
},
|
||||
type: "scalarField",
|
||||
}));
|
||||
return {
|
||||
name: columnName,
|
||||
nullable: false,
|
||||
complexType: {
|
||||
physicalType: ComplexType.STRUCT,
|
||||
children,
|
||||
type: "physicalType",
|
||||
},
|
||||
type: "complexType",
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Creates a single stream with metadata and data.
|
||||
*/
|
||||
export function createStream(physicalType, data, options = {}) {
|
||||
const count = options.count ?? 0;
|
||||
return buildEncodedStream({
|
||||
physicalStreamType: physicalType,
|
||||
logicalStreamType: options.logical ?? {},
|
||||
logicalLevelTechnique1: LogicalLevelTechnique.NONE,
|
||||
logicalLevelTechnique2: LogicalLevelTechnique.NONE,
|
||||
physicalLevelTechnique: options.technique ?? PhysicalLevelTechnique.NONE,
|
||||
numValues: count,
|
||||
byteLength: data.length,
|
||||
decompressedCount: count,
|
||||
}, data);
|
||||
}
|
||||
/**
|
||||
* Encodes FSST-compressed strings into a complete stream.
|
||||
* This uses hardcoded test data: ["cat", "dog", "cat"]
|
||||
* @returns Encoded Uint8Array that can be passed to decodeString
|
||||
*/
|
||||
export function encodeFsstStrings() {
|
||||
const symbolTable = new Uint8Array([99, 97, 116, 100, 111, 103]); // "catdog"
|
||||
const symbolLengths = new Uint32Array([3, 3]);
|
||||
const compressedDictionary = new Uint8Array([0, 1]);
|
||||
const dictionaryLengths = new Uint32Array([3, 3]);
|
||||
const offsets = new Uint32Array([0, 1, 0]); // "cat", "dog", "cat"
|
||||
const numValues = 3;
|
||||
return concatenateBuffers(createStream(PhysicalStreamType.PRESENT, encodeBooleanRle(new Array(numValues).fill(true)), {
|
||||
technique: PhysicalLevelTechnique.VARINT,
|
||||
count: numValues,
|
||||
}), createStream(PhysicalStreamType.DATA, symbolTable, {
|
||||
logical: { dictionaryType: DictionaryType.FSST },
|
||||
}), createStream(PhysicalStreamType.LENGTH, encodeVarintInt32(symbolLengths), {
|
||||
logical: { lengthType: LengthType.SYMBOL },
|
||||
technique: PhysicalLevelTechnique.VARINT,
|
||||
count: symbolLengths.length,
|
||||
}), createStream(PhysicalStreamType.OFFSET, encodeVarintInt32(offsets), {
|
||||
logical: { offsetType: OffsetType.STRING },
|
||||
technique: PhysicalLevelTechnique.VARINT,
|
||||
count: offsets.length,
|
||||
}), createStream(PhysicalStreamType.LENGTH, encodeVarintInt32(dictionaryLengths), {
|
||||
logical: { lengthType: LengthType.DICTIONARY },
|
||||
technique: PhysicalLevelTechnique.VARINT,
|
||||
count: dictionaryLengths.length,
|
||||
}), createStream(PhysicalStreamType.DATA, compressedDictionary, {
|
||||
logical: { dictionaryType: DictionaryType.SINGLE },
|
||||
}));
|
||||
}
|
||||
/**
|
||||
* Encodes a shared dictionary for struct fields.
|
||||
* @param dictionaryStrings - Array of unique strings in the dictionary
|
||||
* @param options - Encoding options
|
||||
* @returns Object containing length and data streams
|
||||
*/
|
||||
export function encodeSharedDictionary(dictionaryStrings, options = {}) {
|
||||
const { useFsst = false, dictionaryType = DictionaryType.SHARED } = options;
|
||||
const encodedDictionary = encodeStrings(dictionaryStrings);
|
||||
const dictionaryLengths = createStringLengths(dictionaryStrings);
|
||||
const lengthStream = createStream(PhysicalStreamType.LENGTH, encodeVarintInt32(new Uint32Array(dictionaryLengths)), {
|
||||
logical: { lengthType: LengthType.DICTIONARY },
|
||||
technique: PhysicalLevelTechnique.VARINT,
|
||||
count: dictionaryLengths.length,
|
||||
});
|
||||
const dataStream = createStream(PhysicalStreamType.DATA, encodedDictionary, {
|
||||
logical: { dictionaryType: dictionaryType },
|
||||
count: encodedDictionary.length,
|
||||
});
|
||||
if (useFsst) {
|
||||
const symbolTable = new Uint8Array([99, 97, 116, 100, 111, 103]); // "catdog"
|
||||
const symbolLengths = new Uint32Array([3, 3]);
|
||||
const symbolLengthStream = createStream(PhysicalStreamType.LENGTH, encodeVarintInt32(symbolLengths), {
|
||||
logical: { lengthType: LengthType.SYMBOL },
|
||||
technique: PhysicalLevelTechnique.VARINT,
|
||||
count: symbolLengths.length,
|
||||
});
|
||||
const symbolDataStream = createStream(PhysicalStreamType.DATA, symbolTable, {
|
||||
logical: { dictionaryType: DictionaryType.FSST },
|
||||
count: symbolTable.length,
|
||||
});
|
||||
return { lengthStream, dataStream, symbolLengthStream, symbolDataStream };
|
||||
}
|
||||
return { lengthStream, dataStream };
|
||||
}
|
||||
/**
|
||||
* Encodes streams for a struct field.
|
||||
* @param offsetIndices - Indices into the shared dictionary
|
||||
* @param presentValues - Boolean array indicating which values are present
|
||||
* @param isPresent - Whether the field itself is present
|
||||
* @returns Encoded streams for the field
|
||||
*/
|
||||
export function encodeStructField(offsetIndices, presentValues, isPresent = true) {
|
||||
if (!isPresent) {
|
||||
return encodeNumStreams(0);
|
||||
}
|
||||
const numStreamsEncoded = encodeNumStreams(2);
|
||||
const encodedPresent = createPresentStream(presentValues);
|
||||
const encodedOffsets = createOffsetStream(offsetIndices);
|
||||
return concatenateBuffers(numStreamsEncoded, encodedPresent, encodedOffsets);
|
||||
}
|
||||
function encodeNumStreams(numStreams) {
|
||||
const buffer = new Uint8Array(5);
|
||||
const offset = new IntWrapper(0);
|
||||
encodeVarintInt32Value(numStreams, buffer, offset);
|
||||
return buffer.slice(0, offset.get());
|
||||
}
|
||||
function createPresentStream(presentValues) {
|
||||
const metadata = {
|
||||
physicalStreamType: PhysicalStreamType.PRESENT,
|
||||
logicalStreamType: { dictionaryType: DictionaryType.NONE },
|
||||
logicalLevelTechnique1: LogicalLevelTechnique.NONE,
|
||||
logicalLevelTechnique2: LogicalLevelTechnique.NONE,
|
||||
physicalLevelTechnique: PhysicalLevelTechnique.VARINT,
|
||||
numValues: presentValues.length,
|
||||
byteLength: 0,
|
||||
decompressedCount: presentValues.length,
|
||||
};
|
||||
return buildEncodedStream(metadata, encodeBooleanRle(presentValues));
|
||||
}
|
||||
function createOffsetStream(offsetIndices) {
|
||||
const metadata = {
|
||||
physicalStreamType: PhysicalStreamType.OFFSET,
|
||||
logicalStreamType: { offsetType: OffsetType.STRING },
|
||||
logicalLevelTechnique1: LogicalLevelTechnique.NONE,
|
||||
logicalLevelTechnique2: LogicalLevelTechnique.NONE,
|
||||
physicalLevelTechnique: PhysicalLevelTechnique.VARINT,
|
||||
numValues: offsetIndices.length,
|
||||
byteLength: 0,
|
||||
decompressedCount: offsetIndices.length,
|
||||
};
|
||||
return buildEncodedStream(metadata, encodeVarintInt32(new Uint32Array(offsetIndices)));
|
||||
}
|
||||
/**
|
||||
* Builds a complete encoded stream by combining metadata and data.
|
||||
*/
|
||||
export function buildEncodedStream(streamMetadata, encodedData) {
|
||||
const updatedMetadata = {
|
||||
...streamMetadata,
|
||||
byteLength: encodedData.length,
|
||||
};
|
||||
const metadataBuffer = encodeStreamMetadata(updatedMetadata);
|
||||
const result = new Uint8Array(metadataBuffer.length + encodedData.length);
|
||||
result.set(metadataBuffer, 0);
|
||||
result.set(encodedData, metadataBuffer.length);
|
||||
return result;
|
||||
}
|
||||
/**
|
||||
* Encodes stream metadata into binary format.
|
||||
* - Byte 1: Stream type (physical type in upper 4 bits, logical subtype in lower 4 bits)
|
||||
* - Byte 2: Encodings (llt1[5-7], llt2[2-4], plt[0-1])
|
||||
* - Varints: numValues, byteLength
|
||||
* - If RLE: Varints: runs, numRleValues
|
||||
*/
|
||||
export function encodeStreamMetadata(metadata) {
|
||||
const buffer = new Uint8Array(100);
|
||||
let writeOffset = 0;
|
||||
// Byte 1: Stream type
|
||||
buffer[writeOffset++] = encodeStreamTypeByte(metadata);
|
||||
// Byte 2: Encoding techniques
|
||||
buffer[writeOffset++] = encodeEncodingsByte(metadata);
|
||||
// Variable-length fields
|
||||
const offset = new IntWrapper(writeOffset);
|
||||
encodeVarintInt32Value(metadata.numValues, buffer, offset);
|
||||
encodeVarintInt32Value(metadata.byteLength, buffer, offset);
|
||||
// RLE-specific fields
|
||||
if (isRleMetadata(metadata)) {
|
||||
encodeVarintInt32Value(metadata.runs, buffer, offset);
|
||||
encodeVarintInt32Value(metadata.numRleValues, buffer, offset);
|
||||
}
|
||||
return buffer.slice(0, offset.get());
|
||||
}
|
||||
function encodeStreamTypeByte(metadata) {
|
||||
const physicalTypeIndex = Object.values(PhysicalStreamType).indexOf(metadata.physicalStreamType);
|
||||
const lowerNibble = getLogicalSubtypeValue(metadata);
|
||||
return (physicalTypeIndex << 4) | lowerNibble;
|
||||
}
|
||||
function getLogicalSubtypeValue(metadata) {
|
||||
const { physicalStreamType, logicalStreamType } = metadata;
|
||||
switch (physicalStreamType) {
|
||||
case PhysicalStreamType.DATA:
|
||||
return logicalStreamType.dictionaryType !== undefined
|
||||
? Object.values(DictionaryType).indexOf(logicalStreamType.dictionaryType)
|
||||
: 0;
|
||||
case PhysicalStreamType.OFFSET:
|
||||
return logicalStreamType.offsetType !== undefined
|
||||
? Object.values(OffsetType).indexOf(logicalStreamType.offsetType)
|
||||
: 0;
|
||||
case PhysicalStreamType.LENGTH:
|
||||
return logicalStreamType.lengthType !== undefined
|
||||
? Object.values(LengthType).indexOf(logicalStreamType.lengthType)
|
||||
: 0;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
function encodeEncodingsByte(metadata) {
|
||||
const llt1Index = Object.values(LogicalLevelTechnique).indexOf(metadata.logicalLevelTechnique1);
|
||||
const llt2Index = Object.values(LogicalLevelTechnique).indexOf(metadata.logicalLevelTechnique2);
|
||||
const pltIndex = Object.values(PhysicalLevelTechnique).indexOf(metadata.physicalLevelTechnique);
|
||||
return (llt1Index << 5) | (llt2Index << 2) | pltIndex;
|
||||
}
|
||||
function isRleMetadata(metadata) {
|
||||
return "runs" in metadata && "numRleValues" in metadata;
|
||||
}
|
||||
/**
|
||||
* Concatenates multiple Uint8Array buffers into a single buffer.
|
||||
*/
|
||||
export function concatenateBuffers(...buffers) {
|
||||
const totalLength = buffers.reduce((sum, buf) => sum + buf.length, 0);
|
||||
const result = new Uint8Array(totalLength);
|
||||
let offset = 0;
|
||||
for (const buffer of buffers) {
|
||||
result.set(buffer, offset);
|
||||
offset += buffer.length;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
//# sourceMappingURL=decodingTestUtils.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/decoding/decodingTestUtils.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/decoding/decodingTestUtils.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
10
node_modules/@maplibre/mlt/dist/decoding/decodingUtils.d.ts
generated
vendored
Normal file
10
node_modules/@maplibre/mlt/dist/decoding/decodingUtils.d.ts
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
import type IntWrapper from "./intWrapper";
|
||||
import { VectorType } from "../vector/vectorType";
|
||||
import type BitVector from "../vector/flat/bitVector";
|
||||
export declare function skipColumn(numStreams: number, tile: Uint8Array, offset: IntWrapper): void;
|
||||
export declare function decodeBooleanRle(buffer: Uint8Array, numBooleans: number, byteLength: number, pos: IntWrapper, nullabilityBuffer?: BitVector): Uint8Array;
|
||||
export declare function decodeByteRle(buffer: Uint8Array, numBytes: number, byteLength: number, pos: IntWrapper): Uint8Array;
|
||||
export declare function decodeFloatsLE(encodedValues: Uint8Array, pos: IntWrapper, numValues: number, nullabilityBuffer?: BitVector): Float32Array;
|
||||
export declare function decodeDoublesLE(encodedValues: Uint8Array, pos: IntWrapper, numValues: number, nullabilityBuffer?: BitVector): Float64Array;
|
||||
export declare function decodeString(buf: Uint8Array, pos: number, end: number): string;
|
||||
export declare function getVectorTypeBooleanStream(numFeatures: number, byteLength: number, data: Uint8Array, offset: IntWrapper): VectorType;
|
||||
154
node_modules/@maplibre/mlt/dist/decoding/decodingUtils.js
generated
vendored
Normal file
154
node_modules/@maplibre/mlt/dist/decoding/decodingUtils.js
generated
vendored
Normal file
@@ -0,0 +1,154 @@
|
||||
import { VectorType } from "../vector/vectorType";
|
||||
import { decodeStreamMetadata } from "../metadata/tile/streamMetadataDecoder";
|
||||
import { unpackNullableBoolean, unpackNullable } from "./unpackNullableUtils";
|
||||
export function skipColumn(numStreams, tile, offset) {
|
||||
//TODO: add size of column in Mlt for fast skipping
|
||||
for (let i = 0; i < numStreams; i++) {
|
||||
const streamMetadata = decodeStreamMetadata(tile, offset);
|
||||
offset.add(streamMetadata.byteLength);
|
||||
}
|
||||
}
|
||||
export function decodeBooleanRle(buffer, numBooleans, byteLength, pos, nullabilityBuffer) {
|
||||
const numBytes = Math.ceil(numBooleans / 8.0);
|
||||
const values = decodeByteRle(buffer, numBytes, byteLength, pos);
|
||||
if (nullabilityBuffer) {
|
||||
return unpackNullableBoolean(values, numBooleans, nullabilityBuffer);
|
||||
}
|
||||
return values;
|
||||
}
|
||||
export function decodeByteRle(buffer, numBytes, byteLength, pos) {
|
||||
const values = new Uint8Array(numBytes);
|
||||
let valueOffset = 0;
|
||||
const streamEndPos = pos.get() + byteLength;
|
||||
while (valueOffset < numBytes) {
|
||||
if (pos.get() >= streamEndPos) {
|
||||
break;
|
||||
}
|
||||
const header = buffer[pos.increment()];
|
||||
/* Runs */
|
||||
if (header <= 0x7f) {
|
||||
const numRuns = header + 3;
|
||||
const value = buffer[pos.increment()];
|
||||
const endValueOffset = Math.min(valueOffset + numRuns, numBytes);
|
||||
values.fill(value, valueOffset, endValueOffset);
|
||||
valueOffset = endValueOffset;
|
||||
}
|
||||
else {
|
||||
/* Literals */
|
||||
const numLiterals = 256 - header;
|
||||
for (let i = 0; i < numLiterals && valueOffset < numBytes; i++) {
|
||||
values[valueOffset++] = buffer[pos.increment()];
|
||||
}
|
||||
}
|
||||
}
|
||||
pos.set(streamEndPos);
|
||||
return values;
|
||||
}
|
||||
export function decodeFloatsLE(encodedValues, pos, numValues, nullabilityBuffer) {
|
||||
const currentPos = pos.get();
|
||||
const newOffset = currentPos + numValues * Float32Array.BYTES_PER_ELEMENT;
|
||||
const newBuf = new Uint8Array(encodedValues.subarray(currentPos, newOffset)).buffer;
|
||||
const fb = new Float32Array(newBuf);
|
||||
pos.set(newOffset);
|
||||
if (nullabilityBuffer) {
|
||||
return unpackNullable(fb, nullabilityBuffer, 0);
|
||||
}
|
||||
return fb;
|
||||
}
|
||||
export function decodeDoublesLE(encodedValues, pos, numValues, nullabilityBuffer) {
|
||||
const currentPos = pos.get();
|
||||
const newOffset = currentPos + numValues * Float64Array.BYTES_PER_ELEMENT;
|
||||
const newBuf = new Uint8Array(encodedValues.subarray(currentPos, newOffset)).buffer;
|
||||
const fb = new Float64Array(newBuf);
|
||||
pos.set(newOffset);
|
||||
if (nullabilityBuffer) {
|
||||
return unpackNullable(fb, nullabilityBuffer, 0);
|
||||
}
|
||||
return fb;
|
||||
}
|
||||
const TEXT_DECODER_MIN_LENGTH = 12;
|
||||
const utf8TextDecoder = new TextDecoder();
|
||||
// Source: https://github.com/mapbox/pbf/issues/106
|
||||
export function decodeString(buf, pos, end) {
|
||||
if (end - pos >= TEXT_DECODER_MIN_LENGTH) {
|
||||
// longer strings are fast with the built-in browser TextDecoder API
|
||||
return utf8TextDecoder.decode(buf.subarray(pos, end));
|
||||
}
|
||||
// short strings are fast with custom implementation
|
||||
return readUtf8(buf, pos, end);
|
||||
}
|
||||
function readUtf8(buf, pos, end) {
|
||||
let str = "";
|
||||
let i = pos;
|
||||
while (i < end) {
|
||||
const b0 = buf[i];
|
||||
let c = null; // codepoint
|
||||
let bytesPerSequence = b0 > 0xef ? 4 : b0 > 0xdf ? 3 : b0 > 0xbf ? 2 : 1;
|
||||
if (i + bytesPerSequence > end)
|
||||
break;
|
||||
let b1;
|
||||
let b2;
|
||||
let b3;
|
||||
if (bytesPerSequence === 1) {
|
||||
if (b0 < 0x80) {
|
||||
c = b0;
|
||||
}
|
||||
}
|
||||
else if (bytesPerSequence === 2) {
|
||||
b1 = buf[i + 1];
|
||||
if ((b1 & 0xc0) === 0x80) {
|
||||
c = ((b0 & 0x1f) << 0x6) | (b1 & 0x3f);
|
||||
if (c <= 0x7f) {
|
||||
c = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (bytesPerSequence === 3) {
|
||||
b1 = buf[i + 1];
|
||||
b2 = buf[i + 2];
|
||||
if ((b1 & 0xc0) === 0x80 && (b2 & 0xc0) === 0x80) {
|
||||
c = ((b0 & 0xf) << 0xc) | ((b1 & 0x3f) << 0x6) | (b2 & 0x3f);
|
||||
if (c <= 0x7ff || (c >= 0xd800 && c <= 0xdfff)) {
|
||||
c = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (bytesPerSequence === 4) {
|
||||
b1 = buf[i + 1];
|
||||
b2 = buf[i + 2];
|
||||
b3 = buf[i + 3];
|
||||
if ((b1 & 0xc0) === 0x80 && (b2 & 0xc0) === 0x80 && (b3 & 0xc0) === 0x80) {
|
||||
c = ((b0 & 0xf) << 0x12) | ((b1 & 0x3f) << 0xc) | ((b2 & 0x3f) << 0x6) | (b3 & 0x3f);
|
||||
if (c <= 0xffff || c >= 0x110000) {
|
||||
c = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (c === null) {
|
||||
c = 0xfffd;
|
||||
bytesPerSequence = 1;
|
||||
}
|
||||
else if (c > 0xffff) {
|
||||
c -= 0x10000;
|
||||
str += String.fromCharCode(((c >>> 10) & 0x3ff) | 0xd800);
|
||||
c = 0xdc00 | (c & 0x3ff);
|
||||
}
|
||||
str += String.fromCharCode(c);
|
||||
i += bytesPerSequence;
|
||||
}
|
||||
return str;
|
||||
}
|
||||
export function getVectorTypeBooleanStream(numFeatures, byteLength, data, offset) {
|
||||
const valuesPerRun = 0x83;
|
||||
// TODO: use VectorType metadata field for to test which VectorType is used
|
||||
return Math.ceil(numFeatures / valuesPerRun) * 2 === byteLength &&
|
||||
/* Test the first value byte if all bits are set to true */
|
||||
(data[offset.get() + 1] & 0xff) === (bitCount(numFeatures) << 2) - 1
|
||||
? VectorType.CONST
|
||||
: VectorType.FLAT;
|
||||
}
|
||||
function bitCount(number) {
|
||||
//TODO: refactor to get rid of special case handling
|
||||
return number === 0 ? 1 : Math.floor(Math.log2(number) + 1);
|
||||
}
|
||||
//# sourceMappingURL=decodingUtils.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/decoding/decodingUtils.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/decoding/decodingUtils.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
47
node_modules/@maplibre/mlt/dist/decoding/fastPforDecoder.d.ts
generated
vendored
Normal file
47
node_modules/@maplibre/mlt/dist/decoding/fastPforDecoder.d.ts
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
/**
|
||||
* FastPFOR decoding implementation.
|
||||
*
|
||||
* @remarks
|
||||
* Terminology note: "exceptions" in FastPFOR refer to **outlier values** within a block that do not fit in the
|
||||
* chosen base bit-width for that block. These are stored in separate "exception streams" and later applied back
|
||||
* to the unpacked base values. This is unrelated to JavaScript/TypeScript runtime exceptions.
|
||||
*/
|
||||
/**
|
||||
* Workspace for the FastPFOR decoder.
|
||||
*/
|
||||
export type FastPforDecoderWorkspace = {
|
||||
dataToBePacked: Array<Uint32Array>;
|
||||
dataPointers: Int32Array;
|
||||
byteContainer: Uint8Array;
|
||||
byteContainerI32?: Int32Array;
|
||||
exceptionSizes: Int32Array;
|
||||
};
|
||||
/**
|
||||
* Workspace for decoding the FastPFOR *wire format* (big-endian int32 words).
|
||||
*
|
||||
* @remarks
|
||||
* This workspace owns:
|
||||
* - a scratch `encodedWords` buffer to materialize big-endian words
|
||||
* - the reusable `FastPforDecoderWorkspace` used by `decodeFastPforInt32`
|
||||
*
|
||||
* The caller is responsible for creating and reusing this object.
|
||||
*/
|
||||
export type FastPforWireDecodeWorkspace = {
|
||||
encodedWords: Uint32Array;
|
||||
decoderWorkspace: FastPforDecoderWorkspace;
|
||||
};
|
||||
/**
|
||||
* Creates an isolated workspace for decoding.
|
||||
* Reusing a workspace across calls avoids repeated allocations.
|
||||
*/
|
||||
export declare function createDecoderWorkspace(): FastPforDecoderWorkspace;
|
||||
export declare function createFastPforWireDecodeWorkspace(initialEncodedWordCapacity?: number): FastPforWireDecodeWorkspace;
|
||||
export declare function ensureFastPforWireEncodedWordsCapacity(workspace: FastPforWireDecodeWorkspace, requiredWordCount: number): Uint32Array;
|
||||
/**
|
||||
* Decodes a sequence of FastPFOR-encoded integers.
|
||||
*
|
||||
* @param encoded The input buffer containing FastPFOR encoded data.
|
||||
* @param numValues The number of integers expected to be decoded.
|
||||
* @param workspace Optional workspace for reuse across calls. If omitted, a new workspace is created per call.
|
||||
*/
|
||||
export declare function decodeFastPforInt32(encoded: Uint32Array, numValues: number, workspace?: FastPforDecoderWorkspace): Uint32Array;
|
||||
482
node_modules/@maplibre/mlt/dist/decoding/fastPforDecoder.js
generated
vendored
Normal file
482
node_modules/@maplibre/mlt/dist/decoding/fastPforDecoder.js
generated
vendored
Normal file
@@ -0,0 +1,482 @@
|
||||
import { MASKS, DEFAULT_PAGE_SIZE, BLOCK_SIZE, greatestMultiple, roundUpToMultipleOf32, normalizePageSize, } from "./fastPforShared";
|
||||
import { fastUnpack32_2, fastUnpack32_3, fastUnpack32_4, fastUnpack32_5, fastUnpack32_6, fastUnpack32_7, fastUnpack32_8, fastUnpack32_9, fastUnpack32_10, fastUnpack32_11, fastUnpack32_12, fastUnpack32_16, fastUnpack256_1, fastUnpack256_2, fastUnpack256_3, fastUnpack256_4, fastUnpack256_5, fastUnpack256_6, fastUnpack256_7, fastUnpack256_8, fastUnpack256_16, fastUnpack256_Generic, } from "./fastPforUnpack";
|
||||
const MAX_BIT_WIDTH = 32;
|
||||
const BIT_WIDTH_SLOTS = MAX_BIT_WIDTH + 1;
|
||||
const PAGE_SIZE = normalizePageSize(DEFAULT_PAGE_SIZE);
|
||||
const BYTE_CONTAINER_SIZE = ((3 * PAGE_SIZE) / BLOCK_SIZE + PAGE_SIZE) | 0;
|
||||
/**
|
||||
* Creates an isolated workspace for decoding.
|
||||
* Reusing a workspace across calls avoids repeated allocations.
|
||||
*/
|
||||
export function createDecoderWorkspace() {
|
||||
const byteContainer = new Uint8Array(BYTE_CONTAINER_SIZE);
|
||||
return {
|
||||
dataToBePacked: new Array(BIT_WIDTH_SLOTS),
|
||||
dataPointers: new Int32Array(BIT_WIDTH_SLOTS),
|
||||
byteContainer,
|
||||
byteContainerI32: new Int32Array(byteContainer.buffer, byteContainer.byteOffset, byteContainer.byteLength >>> 2),
|
||||
exceptionSizes: new Int32Array(BIT_WIDTH_SLOTS),
|
||||
};
|
||||
}
|
||||
export function createFastPforWireDecodeWorkspace(initialEncodedWordCapacity = 16) {
|
||||
if (initialEncodedWordCapacity < 0) {
|
||||
throw new RangeError(`initialEncodedWordCapacity must be >= 0, got ${initialEncodedWordCapacity}`);
|
||||
}
|
||||
const capacity = Math.max(16, initialEncodedWordCapacity | 0);
|
||||
return {
|
||||
encodedWords: new Uint32Array(capacity),
|
||||
decoderWorkspace: createDecoderWorkspace(),
|
||||
};
|
||||
}
|
||||
export function ensureFastPforWireEncodedWordsCapacity(workspace, requiredWordCount) {
|
||||
if (requiredWordCount <= workspace.encodedWords.length)
|
||||
return workspace.encodedWords;
|
||||
const next = new Uint32Array(Math.max(16, requiredWordCount * 2));
|
||||
workspace.encodedWords = next;
|
||||
return next;
|
||||
}
|
||||
function materializeByteContainer(inValues, byteContainerStart, byteSize, workspace) {
|
||||
if (workspace.byteContainer.length < byteSize) {
|
||||
workspace.byteContainer = new Uint8Array(byteSize * 2);
|
||||
workspace.byteContainerI32 = undefined;
|
||||
}
|
||||
const byteContainer = workspace.byteContainer;
|
||||
const numFullInts = byteSize >>> 2;
|
||||
if ((byteContainer.byteOffset & 3) === 0) {
|
||||
let intView = workspace.byteContainerI32;
|
||||
if (!intView ||
|
||||
intView.buffer !== byteContainer.buffer ||
|
||||
intView.byteOffset !== byteContainer.byteOffset ||
|
||||
intView.length < numFullInts) {
|
||||
intView = workspace.byteContainerI32 = new Int32Array(byteContainer.buffer, byteContainer.byteOffset, byteContainer.byteLength >>> 2);
|
||||
}
|
||||
intView.set(inValues.subarray(byteContainerStart, byteContainerStart + numFullInts));
|
||||
}
|
||||
else {
|
||||
for (let i = 0; i < numFullInts; i = (i + 1) | 0) {
|
||||
const val = inValues[(byteContainerStart + i) | 0] | 0;
|
||||
const base = i << 2;
|
||||
byteContainer[base] = val & 0xff;
|
||||
byteContainer[(base + 1) | 0] = (val >>> 8) & 0xff;
|
||||
byteContainer[(base + 2) | 0] = (val >>> 16) & 0xff;
|
||||
byteContainer[(base + 3) | 0] = (val >>> 24) & 0xff;
|
||||
}
|
||||
}
|
||||
const remainder = byteSize & 3;
|
||||
if (remainder > 0) {
|
||||
const lastIntIdx = (byteContainerStart + numFullInts) | 0;
|
||||
const lastVal = inValues[lastIntIdx] | 0;
|
||||
const base = numFullInts << 2;
|
||||
for (let r = 0; r < remainder; r = (r + 1) | 0) {
|
||||
byteContainer[(base + r) | 0] = (lastVal >>> (r << 3)) & 0xff;
|
||||
}
|
||||
}
|
||||
return byteContainer;
|
||||
}
|
||||
/**
|
||||
* Unpacks the per-bitWidth "exception streams" described by the page's bitmap.
|
||||
*
|
||||
* @remarks
|
||||
* For each bit-width present in the bitmap, a stream header gives the count of outlier values for that
|
||||
* bit-width, followed by packed bits representing those values.
|
||||
*
|
||||
* @param inValues - Packed input (32-bit words).
|
||||
* @param inExcept - Offset (32-bit word index) where the exception bitmap starts.
|
||||
* @param workspace - Decoder workspace used to store the unpacked exception streams.
|
||||
* @returns The new input offset (32-bit word index) after consuming all exception streams.
|
||||
*/
|
||||
function unpackExceptionStreams(inValues, inExcept, workspace) {
|
||||
const bitmap = inValues[inExcept++] | 0;
|
||||
const dataToBePacked = workspace.dataToBePacked;
|
||||
for (let bitWidth = 2; bitWidth <= MAX_BIT_WIDTH; bitWidth = (bitWidth + 1) | 0) {
|
||||
if (((bitmap >>> (bitWidth - 1)) & 1) === 0)
|
||||
continue;
|
||||
if (inExcept >= inValues.length) {
|
||||
throw new Error(`FastPFOR decode: truncated exception stream header (bitWidth=${bitWidth}, streamWordIndex=${inExcept}, needWords=1, availableWords=${inValues.length - inExcept}, encodedWords=${inValues.length})`);
|
||||
}
|
||||
const size = inValues[inExcept++] >>> 0;
|
||||
const roundedUp = roundUpToMultipleOf32(size);
|
||||
const wordsNeeded = (size * bitWidth + 31) >>> 5;
|
||||
if (inExcept + wordsNeeded > inValues.length) {
|
||||
throw new Error(`FastPFOR decode: truncated exception stream (bitWidth=${bitWidth}, size=${size}, streamWordIndex=${inExcept}, needWords=${wordsNeeded}, availableWords=${inValues.length - inExcept}, encodedWords=${inValues.length})`);
|
||||
}
|
||||
let exceptionStream = dataToBePacked[bitWidth];
|
||||
if (!exceptionStream || exceptionStream.length < roundedUp) {
|
||||
exceptionStream = dataToBePacked[bitWidth] = new Uint32Array(roundedUp);
|
||||
}
|
||||
let j = 0;
|
||||
for (; j < size; j = (j + 32) | 0) {
|
||||
fastUnpack32(inValues, inExcept, exceptionStream, j, bitWidth);
|
||||
inExcept = (inExcept + bitWidth) | 0;
|
||||
}
|
||||
const overflow = (j - size) | 0;
|
||||
inExcept = (inExcept - ((overflow * bitWidth) >>> 5)) | 0;
|
||||
workspace.exceptionSizes[bitWidth] = size;
|
||||
}
|
||||
return inExcept;
|
||||
}
|
||||
/**
|
||||
* Unpacks one 256-value block from the packed bitstream using a specialized implementation for common widths.
|
||||
*
|
||||
* @param inValues - Packed input (32-bit words).
|
||||
* @param inPos - Input offset (32-bit word index) where the packed block starts.
|
||||
* @param out - Output buffer.
|
||||
* @param outPos - Output offset where the 256 values will be written.
|
||||
* @param bitWidth - Base bit-width used for this block.
|
||||
* @returns The new input offset (32-bit word index) right after the packed block data.
|
||||
*/
|
||||
function unpackBlock256(inValues, inPos, out, outPos, bitWidth) {
|
||||
switch (bitWidth) {
|
||||
case 1:
|
||||
fastUnpack256_1(inValues, inPos, out, outPos);
|
||||
break;
|
||||
case 2:
|
||||
fastUnpack256_2(inValues, inPos, out, outPos);
|
||||
break;
|
||||
case 3:
|
||||
fastUnpack256_3(inValues, inPos, out, outPos);
|
||||
break;
|
||||
case 4:
|
||||
fastUnpack256_4(inValues, inPos, out, outPos);
|
||||
break;
|
||||
case 5:
|
||||
fastUnpack256_5(inValues, inPos, out, outPos);
|
||||
break;
|
||||
case 6:
|
||||
fastUnpack256_6(inValues, inPos, out, outPos);
|
||||
break;
|
||||
case 7:
|
||||
fastUnpack256_7(inValues, inPos, out, outPos);
|
||||
break;
|
||||
case 8:
|
||||
fastUnpack256_8(inValues, inPos, out, outPos);
|
||||
break;
|
||||
case 16:
|
||||
fastUnpack256_16(inValues, inPos, out, outPos);
|
||||
break;
|
||||
default:
|
||||
fastUnpack256_Generic(inValues, inPos, out, outPos, bitWidth);
|
||||
break;
|
||||
}
|
||||
return (inPos + (bitWidth << 3)) | 0;
|
||||
}
|
||||
/**
|
||||
* Reads and validates the 2-byte block header from the byteContainer.
|
||||
*
|
||||
* @remarks
|
||||
* The header is `[bitWidth, exceptionCount]`, both stored as single bytes.
|
||||
*
|
||||
* @param byteContainer - Byte metadata buffer for the page.
|
||||
* @param byteContainerLen - The valid byte length in `byteContainer` for this page.
|
||||
* @param bytePosIn - Current offset in `byteContainer`.
|
||||
* @param block - Block index within the page (for error messages).
|
||||
* @returns The parsed header and the updated `bytePosIn`.
|
||||
*/
|
||||
function readBlockHeader(byteContainer, byteContainerLen, bytePosIn, block) {
|
||||
if (bytePosIn + 2 > byteContainerLen) {
|
||||
throw new Error(`FastPFOR decode: byteContainer underflow at block=${block} (need 2 bytes for [bitWidth, exceptionCount], bytePos=${bytePosIn}, byteSize=${byteContainerLen})`);
|
||||
}
|
||||
const bitWidth = byteContainer[bytePosIn++];
|
||||
const exceptionCount = byteContainer[bytePosIn++];
|
||||
if (bitWidth > MAX_BIT_WIDTH) {
|
||||
throw new Error(`FastPFOR decode: invalid bitWidth=${bitWidth} at block=${block} (expected 0..${MAX_BIT_WIDTH}). This likely indicates corrupted or truncated input.`);
|
||||
}
|
||||
return { bitWidth, exceptionCount, bytePosIn };
|
||||
}
|
||||
/**
|
||||
* Reads and validates the exception header for a block.
|
||||
*
|
||||
* @remarks
|
||||
* The header contains `maxBits` (1 byte), which defines the width of the outlier values as
|
||||
* `exceptionBitWidth = maxBits - bitWidth`.
|
||||
*
|
||||
* @param byteContainer - Byte metadata buffer for the page.
|
||||
* @param byteContainerLen - The valid byte length in `byteContainer` for this page.
|
||||
* @param bytePosIn - Current offset in `byteContainer`.
|
||||
* @param bitWidth - Base bit-width for the block.
|
||||
* @param exceptionCount - Number of exceptions/outliers in this block.
|
||||
* @param block - Block index within the page (for error messages).
|
||||
* @returns Parsed `maxBits`, `exceptionBitWidth`, and the updated `bytePosIn`.
|
||||
*/
|
||||
function readBlockExceptionHeader(byteContainer, byteContainerLen, bytePosIn, bitWidth, exceptionCount, block) {
|
||||
if (bytePosIn + 1 > byteContainerLen) {
|
||||
throw new Error(`FastPFOR decode: exception header underflow at block=${block} (need 1 byte for maxBits, bytePos=${bytePosIn}, byteSize=${byteContainerLen})`);
|
||||
}
|
||||
const maxBits = byteContainer[bytePosIn++];
|
||||
if (maxBits < bitWidth || maxBits > MAX_BIT_WIDTH) {
|
||||
throw new Error(`FastPFOR decode: invalid maxBits=${maxBits} at block=${block} (bitWidth=${bitWidth}, expected ${bitWidth}..${MAX_BIT_WIDTH})`);
|
||||
}
|
||||
const exceptionBitWidth = (maxBits - bitWidth) | 0;
|
||||
if (exceptionBitWidth < 1 || exceptionBitWidth > MAX_BIT_WIDTH) {
|
||||
throw new Error(`FastPFOR decode: invalid exceptionBitWidth=${exceptionBitWidth} at block=${block} (bitWidth=${bitWidth}, maxBits=${maxBits})`);
|
||||
}
|
||||
if (bytePosIn + exceptionCount > byteContainerLen) {
|
||||
throw new Error(`FastPFOR decode: exception positions underflow at block=${block} (need=${exceptionCount}, have=${byteContainerLen - bytePosIn})`);
|
||||
}
|
||||
return { maxBits, exceptionBitWidth, bytePosIn };
|
||||
}
|
||||
/**
|
||||
* Applies (block-local) FastPFOR "exceptions" (outliers) to an already-unpacked base 256-value block.
|
||||
*
|
||||
* @param out - Output buffer containing the base unpacked values for the block.
|
||||
* @param blockOutPos - Offset in `out` where the 256-value block starts.
|
||||
* @param bitWidth - Base bit-width for the block.
|
||||
* @param exceptionCount - Number of exceptions/outliers in this block.
|
||||
* @param byteContainer - Byte metadata buffer for the page.
|
||||
* @param byteContainerLen - The valid byte length in `byteContainer` for this page.
|
||||
* @param bytePosIn - Current offset in `byteContainer` (right after `[bitWidth, exceptionCount]`).
|
||||
* @param workspace - Decoder workspace holding the unpacked exception streams.
|
||||
* @param block - Block index within the page (for error messages).
|
||||
* @returns The updated `bytePosIn` after consuming the exception metadata bytes.
|
||||
*
|
||||
* The exception metadata is stored in `byteContainer`:
|
||||
* - `maxBits` (1 byte): the maximum bit-width of any value in the block
|
||||
* - `exceptionCount` exception positions (1 byte each, 0..255)
|
||||
*
|
||||
* The exception values themselves are read from the pre-unpacked exception streams stored in `workspace`.
|
||||
* Returns the new position in the byteContainer after consuming the exception metadata bytes.
|
||||
*/
|
||||
function applyBlockExceptions(out, blockOutPos, bitWidth, exceptionCount, byteContainer, byteContainerLen, bytePosIn, workspace, block) {
|
||||
const { maxBits, exceptionBitWidth, bytePosIn: afterHeaderPos, } = readBlockExceptionHeader(byteContainer, byteContainerLen, bytePosIn, bitWidth, exceptionCount, block);
|
||||
bytePosIn = afterHeaderPos;
|
||||
if (exceptionBitWidth === 1) {
|
||||
const shift = 1 << bitWidth;
|
||||
for (let k = 0; k < exceptionCount; k = (k + 1) | 0) {
|
||||
const pos = byteContainer[bytePosIn++];
|
||||
out[(pos + blockOutPos) | 0] |= shift;
|
||||
}
|
||||
return bytePosIn;
|
||||
}
|
||||
const exceptionValues = workspace.dataToBePacked[exceptionBitWidth];
|
||||
if (!exceptionValues) {
|
||||
throw new Error(`FastPFOR decode: missing exception stream for exceptionBitWidth=${exceptionBitWidth} (bitWidth=${bitWidth}, maxBits=${maxBits}) at block ${block}`);
|
||||
}
|
||||
const exceptionPointers = workspace.dataPointers;
|
||||
let exPtr = exceptionPointers[exceptionBitWidth] | 0;
|
||||
const exSize = workspace.exceptionSizes[exceptionBitWidth] | 0;
|
||||
if (exPtr + exceptionCount > exSize) {
|
||||
throw new Error(`FastPFOR decode: exception stream overflow for exceptionBitWidth=${exceptionBitWidth} (ptr=${exPtr}, need ${exceptionCount}, size=${exSize}) at block ${block}`);
|
||||
}
|
||||
for (let k = 0; k < exceptionCount; k = (k + 1) | 0) {
|
||||
const pos = byteContainer[bytePosIn++];
|
||||
const val = exceptionValues[exPtr++] | 0;
|
||||
out[(pos + blockOutPos) | 0] |= val << bitWidth;
|
||||
}
|
||||
exceptionPointers[exceptionBitWidth] = exPtr;
|
||||
return bytePosIn;
|
||||
}
|
||||
function decodePageBlocks(inValues, pageStart, inPos, packedEnd, out, outPos, blocks, byteContainer, byteContainerLen, workspace) {
|
||||
let tmpInPos = inPos | 0;
|
||||
let bytePosIn = 0;
|
||||
for (let run = 0; run < blocks; run = (run + 1) | 0) {
|
||||
const header = readBlockHeader(byteContainer, byteContainerLen, bytePosIn, run);
|
||||
bytePosIn = header.bytePosIn;
|
||||
const bitWidth = header.bitWidth;
|
||||
const exceptionCount = header.exceptionCount;
|
||||
const blockOutPos = (outPos + run * BLOCK_SIZE) | 0;
|
||||
switch (bitWidth) {
|
||||
case 0:
|
||||
out.fill(0, blockOutPos, blockOutPos + BLOCK_SIZE);
|
||||
break;
|
||||
case 32:
|
||||
for (let i = 0; i < BLOCK_SIZE; i = (i + 1) | 0) {
|
||||
out[(blockOutPos + i) | 0] = inValues[(tmpInPos + i) | 0] | 0;
|
||||
}
|
||||
tmpInPos = (tmpInPos + BLOCK_SIZE) | 0;
|
||||
break;
|
||||
default:
|
||||
tmpInPos = unpackBlock256(inValues, tmpInPos, out, blockOutPos, bitWidth);
|
||||
break;
|
||||
}
|
||||
if (exceptionCount > 0) {
|
||||
bytePosIn = applyBlockExceptions(out, blockOutPos, bitWidth, exceptionCount, byteContainer, byteContainerLen, bytePosIn, workspace, run);
|
||||
}
|
||||
}
|
||||
if (tmpInPos !== packedEnd) {
|
||||
throw new Error(`FastPFOR decode: packed region mismatch (pageStart=${pageStart}, packedStart=${inPos}, consumedPackedEnd=${tmpInPos}, expectedPackedEnd=${packedEnd}, packedWords=${packedEnd - inPos}, encoded.length=${inValues.length})`);
|
||||
}
|
||||
return;
|
||||
}
|
||||
/**
|
||||
* Decodes one FastPFOR page (aligned to 256-value blocks).
|
||||
*/
|
||||
function decodePage(inValues, out, inPos, outPos, thisSize, workspace) {
|
||||
const pageStart = inPos | 0;
|
||||
const whereMeta = inValues[pageStart] | 0;
|
||||
if (whereMeta <= 0 || pageStart + whereMeta > inValues.length - 1) {
|
||||
throw new Error(`FastPFOR decode: invalid whereMeta=${whereMeta} at pageStart=${pageStart} (expected > 0 and pageStart+whereMeta < encoded.length=${inValues.length})`);
|
||||
}
|
||||
const packedStart = (pageStart + 1) | 0;
|
||||
const packedEnd = (pageStart + whereMeta) | 0;
|
||||
const byteSize = inValues[packedEnd] >>> 0;
|
||||
const metaInts = (byteSize + 3) >>> 2;
|
||||
const byteContainerStart = packedEnd + 1;
|
||||
const bitmapPos = byteContainerStart + metaInts;
|
||||
if (bitmapPos >= inValues.length) {
|
||||
throw new Error(`FastPFOR decode: invalid byteSize=${byteSize} (metaInts=${metaInts}, pageStart=${pageStart}, packedEnd=${packedEnd}, byteContainerStart=${byteContainerStart}) causes bitmapPos=${bitmapPos} out of bounds (encoded.length=${inValues.length})`);
|
||||
}
|
||||
const byteContainer = materializeByteContainer(inValues, byteContainerStart, byteSize, workspace);
|
||||
const byteContainerLen = byteSize;
|
||||
const inExcept = unpackExceptionStreams(inValues, bitmapPos, workspace);
|
||||
const exceptionPointers = workspace.dataPointers;
|
||||
exceptionPointers.fill(0);
|
||||
const startOutPos = outPos | 0;
|
||||
const blocks = (thisSize / BLOCK_SIZE) | 0;
|
||||
decodePageBlocks(inValues, pageStart, packedStart, packedEnd, out, startOutPos, blocks, byteContainer, byteContainerLen, workspace);
|
||||
return inExcept;
|
||||
}
|
||||
function decodeAlignedPages(inValues, out, inPos, outPos, outLength, workspace) {
|
||||
const alignedOutLength = greatestMultiple(outLength, BLOCK_SIZE);
|
||||
const finalOut = outPos + alignedOutLength;
|
||||
let tmpOutPos = outPos;
|
||||
let tmpInPos = inPos;
|
||||
while (tmpOutPos !== finalOut) {
|
||||
const thisSize = Math.min(PAGE_SIZE, finalOut - tmpOutPos);
|
||||
tmpInPos = decodePage(inValues, out, tmpInPos, tmpOutPos, thisSize, workspace);
|
||||
tmpOutPos = (tmpOutPos + thisSize) | 0;
|
||||
}
|
||||
return tmpInPos;
|
||||
}
|
||||
/**
|
||||
* Decodes the VariableByte tail (MSB=1 terminator, opposite of Protobuf Varint).
|
||||
*/
|
||||
function decodeVByte(inValues, inPos, inLength, out, outPos, expectedCount) {
|
||||
if (expectedCount === 0)
|
||||
return inPos;
|
||||
let bitOffset = 0;
|
||||
let wordIndex = inPos;
|
||||
const finalWordIndex = inPos + inLength;
|
||||
const outPos0 = outPos;
|
||||
let tmpOutPos = outPos;
|
||||
const targetOut = outPos + expectedCount;
|
||||
let accumulator = 0;
|
||||
let accumulatorShift = 0;
|
||||
while (wordIndex < finalWordIndex && tmpOutPos < targetOut) {
|
||||
const word = inValues[wordIndex];
|
||||
const byte = (word >>> bitOffset) & 0xff;
|
||||
bitOffset += 8;
|
||||
wordIndex += bitOffset >>> 5;
|
||||
bitOffset &= 31;
|
||||
accumulator |= (byte & 0x7f) << accumulatorShift;
|
||||
if ((byte & 0x80) !== 0) {
|
||||
out[tmpOutPos++] = accumulator | 0;
|
||||
accumulator = 0;
|
||||
accumulatorShift = 0;
|
||||
}
|
||||
else {
|
||||
accumulatorShift += 7;
|
||||
if (accumulatorShift > 28) {
|
||||
throw new Error(`FastPFOR VByte: unterminated value (expected MSB=1 terminator within 5 bytes; shift=${accumulatorShift}, partial=${accumulator}, decoded=${tmpOutPos - outPos0}/${expectedCount}, inPos=${wordIndex}, inEnd=${finalWordIndex})`);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (tmpOutPos !== targetOut) {
|
||||
throw new Error(`FastPFOR VByte: truncated stream (decoded=${tmpOutPos - outPos0}, expected=${expectedCount}, consumedWords=${wordIndex - inPos}/${inLength}, vbyteStart=${inPos}, vbyteEnd=${finalWordIndex})`);
|
||||
}
|
||||
return wordIndex;
|
||||
}
|
||||
/**
|
||||
* Decodes a sequence of FastPFOR-encoded integers.
|
||||
*
|
||||
* @param encoded The input buffer containing FastPFOR encoded data.
|
||||
* @param numValues The number of integers expected to be decoded.
|
||||
* @param workspace Optional workspace for reuse across calls. If omitted, a new workspace is created per call.
|
||||
*/
|
||||
export function decodeFastPforInt32(encoded, numValues, workspace) {
|
||||
let inPos = 0;
|
||||
let outPos = 0;
|
||||
const decoded = new Uint32Array(numValues);
|
||||
const decoderWorkspace = workspace ?? createDecoderWorkspace();
|
||||
if (encoded.length > 0) {
|
||||
const alignedLength = encoded[inPos] | 0;
|
||||
inPos = (inPos + 1) | 0;
|
||||
if ((alignedLength & (BLOCK_SIZE - 1)) !== 0) {
|
||||
throw new Error(`FastPFOR decode: invalid alignedLength=${alignedLength} (expected multiple of ${BLOCK_SIZE})`);
|
||||
}
|
||||
if (outPos + alignedLength > decoded.length) {
|
||||
throw new Error(`FastPFOR decode: output buffer too small (outPos=${outPos}, alignedLength=${alignedLength}, out.length=${decoded.length})`);
|
||||
}
|
||||
inPos = decodeAlignedPages(encoded, decoded, inPos, outPos, alignedLength, decoderWorkspace);
|
||||
outPos = (outPos + alignedLength) | 0;
|
||||
}
|
||||
const remainingLength = (encoded.length - inPos) | 0;
|
||||
const expectedTail = (numValues - outPos) | 0;
|
||||
decodeVByte(encoded, inPos, remainingLength, decoded, outPos, expectedTail);
|
||||
return decoded;
|
||||
}
|
||||
function fastUnpack32(inValues, inPos, out, outPos, bitWidth) {
|
||||
switch (bitWidth) {
|
||||
case 2:
|
||||
fastUnpack32_2(inValues, inPos, out, outPos);
|
||||
return;
|
||||
case 3:
|
||||
fastUnpack32_3(inValues, inPos, out, outPos);
|
||||
return;
|
||||
case 4:
|
||||
fastUnpack32_4(inValues, inPos, out, outPos);
|
||||
return;
|
||||
case 5:
|
||||
fastUnpack32_5(inValues, inPos, out, outPos);
|
||||
return;
|
||||
case 6:
|
||||
fastUnpack32_6(inValues, inPos, out, outPos);
|
||||
return;
|
||||
case 7:
|
||||
fastUnpack32_7(inValues, inPos, out, outPos);
|
||||
return;
|
||||
case 8:
|
||||
fastUnpack32_8(inValues, inPos, out, outPos);
|
||||
return;
|
||||
case 9:
|
||||
fastUnpack32_9(inValues, inPos, out, outPos);
|
||||
return;
|
||||
case 10:
|
||||
fastUnpack32_10(inValues, inPos, out, outPos);
|
||||
return;
|
||||
case 11:
|
||||
fastUnpack32_11(inValues, inPos, out, outPos);
|
||||
return;
|
||||
case 12:
|
||||
fastUnpack32_12(inValues, inPos, out, outPos);
|
||||
return;
|
||||
case 16:
|
||||
fastUnpack32_16(inValues, inPos, out, outPos);
|
||||
return;
|
||||
case 32:
|
||||
for (let i = 0; i < 32; i = (i + 1) | 0) {
|
||||
out[(outPos + i) | 0] = inValues[(inPos + i) | 0] | 0;
|
||||
}
|
||||
return;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
const valueMask = MASKS[bitWidth] >>> 0;
|
||||
let inputWordIndex = inPos;
|
||||
let bitOffset = 0;
|
||||
let currentWord = inValues[inputWordIndex] >>> 0;
|
||||
for (let i = 0; i < 32; i++) {
|
||||
if (bitOffset + bitWidth <= 32) {
|
||||
const value = (currentWord >>> bitOffset) & valueMask;
|
||||
out[outPos + i] = value | 0;
|
||||
bitOffset += bitWidth;
|
||||
if (bitOffset === 32) {
|
||||
bitOffset = 0;
|
||||
inputWordIndex++;
|
||||
if (i !== 31)
|
||||
currentWord = inValues[inputWordIndex] >>> 0;
|
||||
}
|
||||
}
|
||||
else {
|
||||
const lowBits = 32 - bitOffset;
|
||||
const low = currentWord >>> bitOffset;
|
||||
inputWordIndex++;
|
||||
currentWord = inValues[inputWordIndex] >>> 0;
|
||||
const highMask = MASKS[bitWidth - lowBits] >>> 0;
|
||||
const high = currentWord & highMask;
|
||||
const value = (low | (high << lowBits)) & valueMask;
|
||||
out[outPos + i] = value | 0;
|
||||
bitOffset = bitWidth - lowBits;
|
||||
}
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=fastPforDecoder.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/decoding/fastPforDecoder.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/decoding/fastPforDecoder.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
7
node_modules/@maplibre/mlt/dist/decoding/fastPforShared.d.ts
generated
vendored
Normal file
7
node_modules/@maplibre/mlt/dist/decoding/fastPforShared.d.ts
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
export declare const MASKS: Readonly<Uint32Array>;
|
||||
export declare const DEFAULT_PAGE_SIZE = 65536;
|
||||
export declare const BLOCK_SIZE = 256;
|
||||
export declare function greatestMultiple(value: number, factor: number): number;
|
||||
export declare function roundUpToMultipleOf32(value: number): number;
|
||||
export declare function normalizePageSize(pageSize: number): number;
|
||||
export declare function bswap32(value: number): number;
|
||||
29
node_modules/@maplibre/mlt/dist/decoding/fastPforShared.js
generated
vendored
Normal file
29
node_modules/@maplibre/mlt/dist/decoding/fastPforShared.js
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
/**
|
||||
* Bit masks for each bitwidth 0-32.
|
||||
* DO NOT MUTATE - this is a shared constant.
|
||||
*/
|
||||
const masks = new Uint32Array(33);
|
||||
masks[0] = 0;
|
||||
for (let bitWidth = 1; bitWidth <= 32; bitWidth++) {
|
||||
masks[bitWidth] = bitWidth === 32 ? 0xffffffff : 0xffffffff >>> (32 - bitWidth);
|
||||
}
|
||||
export const MASKS = masks;
|
||||
export const DEFAULT_PAGE_SIZE = 65536;
|
||||
export const BLOCK_SIZE = 256;
|
||||
export function greatestMultiple(value, factor) {
|
||||
return value - (value % factor);
|
||||
}
|
||||
export function roundUpToMultipleOf32(value) {
|
||||
return greatestMultiple(value + 31, 32);
|
||||
}
|
||||
export function normalizePageSize(pageSize) {
|
||||
if (!Number.isFinite(pageSize) || pageSize <= 0)
|
||||
return DEFAULT_PAGE_SIZE;
|
||||
const aligned = greatestMultiple(Math.floor(pageSize), BLOCK_SIZE);
|
||||
return aligned === 0 ? BLOCK_SIZE : aligned;
|
||||
}
|
||||
export function bswap32(value) {
|
||||
const x = value >>> 0;
|
||||
return (((x & 0xff) << 24) | ((x & 0xff00) << 8) | ((x >>> 8) & 0xff00) | ((x >>> 24) & 0xff)) >>> 0;
|
||||
}
|
||||
//# sourceMappingURL=fastPforShared.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/decoding/fastPforShared.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/decoding/fastPforShared.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"fastPforShared.js","sourceRoot":"","sources":["../../src/decoding/fastPforShared.ts"],"names":[],"mappings":"AAAA;;;GAGG;AACH,MAAM,KAAK,GAAG,IAAI,WAAW,CAAC,EAAE,CAAC,CAAC;AAClC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;AACb,KAAK,IAAI,QAAQ,GAAG,CAAC,EAAE,QAAQ,IAAI,EAAE,EAAE,QAAQ,EAAE,EAAE,CAAC;IAChD,KAAK,CAAC,QAAQ,CAAC,GAAG,QAAQ,KAAK,EAAE,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,UAAU,KAAK,CAAC,EAAE,GAAG,QAAQ,CAAC,CAAC;AACpF,CAAC;AACD,MAAM,CAAC,MAAM,KAAK,GAA0B,KAAK,CAAC;AAElD,MAAM,CAAC,MAAM,iBAAiB,GAAG,KAAK,CAAC;AACvC,MAAM,CAAC,MAAM,UAAU,GAAG,GAAG,CAAC;AAE9B,MAAM,UAAU,gBAAgB,CAAC,KAAa,EAAE,MAAc;IAC1D,OAAO,KAAK,GAAG,CAAC,KAAK,GAAG,MAAM,CAAC,CAAC;AACpC,CAAC;AAED,MAAM,UAAU,qBAAqB,CAAC,KAAa;IAC/C,OAAO,gBAAgB,CAAC,KAAK,GAAG,EAAE,EAAE,EAAE,CAAC,CAAC;AAC5C,CAAC;AAED,MAAM,UAAU,iBAAiB,CAAC,QAAgB;IAC9C,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,QAAQ,CAAC,IAAI,QAAQ,IAAI,CAAC;QAAE,OAAO,iBAAiB,CAAC;IAE1E,MAAM,OAAO,GAAG,gBAAgB,CAAC,IAAI,CAAC,KAAK,CAAC,QAAQ,CAAC,EAAE,UAAU,CAAC,CAAC;IACnE,OAAO,OAAO,KAAK,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,OAAO,CAAC;AAChD,CAAC;AAED,MAAM,UAAU,OAAO,CAAC,KAAa;IACjC,MAAM,CAAC,GAAG,KAAK,KAAK,CAAC,CAAC;IACtB,OAAO,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,IAAI,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,MAAM,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,GAAG,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC;AACzG,CAAC","sourcesContent":["/**\n * Bit masks for each bitwidth 0-32.\n * DO NOT MUTATE - this is a shared constant.\n */\nconst masks = new Uint32Array(33);\nmasks[0] = 0;\nfor (let bitWidth = 1; bitWidth <= 32; bitWidth++) {\n masks[bitWidth] = bitWidth === 32 ? 0xffffffff : 0xffffffff >>> (32 - bitWidth);\n}\nexport const MASKS: Readonly<Uint32Array> = masks;\n\nexport const DEFAULT_PAGE_SIZE = 65536;\nexport const BLOCK_SIZE = 256;\n\nexport function greatestMultiple(value: number, factor: number): number {\n return value - (value % factor);\n}\n\nexport function roundUpToMultipleOf32(value: number): number {\n return greatestMultiple(value + 31, 32);\n}\n\nexport function normalizePageSize(pageSize: number): number {\n if (!Number.isFinite(pageSize) || pageSize <= 0) return DEFAULT_PAGE_SIZE;\n\n const aligned = greatestMultiple(Math.floor(pageSize), BLOCK_SIZE);\n return aligned === 0 ? BLOCK_SIZE : aligned;\n}\n\nexport function bswap32(value: number): number {\n const x = value >>> 0;\n return (((x & 0xff) << 24) | ((x & 0xff00) << 8) | ((x >>> 8) & 0xff00) | ((x >>> 24) & 0xff)) >>> 0;\n}\n"]}
|
||||
23
node_modules/@maplibre/mlt/dist/decoding/fastPforUnpack.d.ts
generated
vendored
Normal file
23
node_modules/@maplibre/mlt/dist/decoding/fastPforUnpack.d.ts
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
export declare function fastUnpack32_1(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
|
||||
export declare function fastUnpack32_2(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
|
||||
export declare function fastUnpack32_3(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
|
||||
export declare function fastUnpack32_4(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
|
||||
export declare function fastUnpack32_5(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
|
||||
export declare function fastUnpack32_6(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
|
||||
export declare function fastUnpack32_7(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
|
||||
export declare function fastUnpack32_8(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
|
||||
export declare function fastUnpack32_9(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
|
||||
export declare function fastUnpack32_10(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
|
||||
export declare function fastUnpack32_11(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
|
||||
export declare function fastUnpack32_12(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
|
||||
export declare function fastUnpack32_16(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
|
||||
export declare function fastUnpack256_1(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
|
||||
export declare function fastUnpack256_2(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
|
||||
export declare function fastUnpack256_3(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
|
||||
export declare function fastUnpack256_4(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
|
||||
export declare function fastUnpack256_5(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
|
||||
export declare function fastUnpack256_6(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
|
||||
export declare function fastUnpack256_7(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
|
||||
export declare function fastUnpack256_8(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
|
||||
export declare function fastUnpack256_16(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
|
||||
export declare function fastUnpack256_Generic(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number, bitWidth: number): void;
|
||||
910
node_modules/@maplibre/mlt/dist/decoding/fastPforUnpack.js
generated
vendored
Normal file
910
node_modules/@maplibre/mlt/dist/decoding/fastPforUnpack.js
generated
vendored
Normal file
@@ -0,0 +1,910 @@
|
||||
import { MASKS } from "./fastPforShared";
|
||||
export function fastUnpack32_1(inValues, inPos, out, outPos) {
|
||||
const in0 = inValues[inPos] >>> 0;
|
||||
for (let i = 0; i < 32; i++) {
|
||||
out[outPos + i] = (in0 >>> i) & 1;
|
||||
}
|
||||
}
|
||||
export function fastUnpack32_2(inValues, inPos, out, outPos) {
|
||||
let op = outPos;
|
||||
const in0 = inValues[inPos] >>> 0;
|
||||
const in1 = inValues[inPos + 1] >>> 0;
|
||||
out[op++] = (in0 >>> 0) & 0x3;
|
||||
out[op++] = (in0 >>> 2) & 0x3;
|
||||
out[op++] = (in0 >>> 4) & 0x3;
|
||||
out[op++] = (in0 >>> 6) & 0x3;
|
||||
out[op++] = (in0 >>> 8) & 0x3;
|
||||
out[op++] = (in0 >>> 10) & 0x3;
|
||||
out[op++] = (in0 >>> 12) & 0x3;
|
||||
out[op++] = (in0 >>> 14) & 0x3;
|
||||
out[op++] = (in0 >>> 16) & 0x3;
|
||||
out[op++] = (in0 >>> 18) & 0x3;
|
||||
out[op++] = (in0 >>> 20) & 0x3;
|
||||
out[op++] = (in0 >>> 22) & 0x3;
|
||||
out[op++] = (in0 >>> 24) & 0x3;
|
||||
out[op++] = (in0 >>> 26) & 0x3;
|
||||
out[op++] = (in0 >>> 28) & 0x3;
|
||||
out[op++] = (in0 >>> 30) & 0x3;
|
||||
out[op++] = (in1 >>> 0) & 0x3;
|
||||
out[op++] = (in1 >>> 2) & 0x3;
|
||||
out[op++] = (in1 >>> 4) & 0x3;
|
||||
out[op++] = (in1 >>> 6) & 0x3;
|
||||
out[op++] = (in1 >>> 8) & 0x3;
|
||||
out[op++] = (in1 >>> 10) & 0x3;
|
||||
out[op++] = (in1 >>> 12) & 0x3;
|
||||
out[op++] = (in1 >>> 14) & 0x3;
|
||||
out[op++] = (in1 >>> 16) & 0x3;
|
||||
out[op++] = (in1 >>> 18) & 0x3;
|
||||
out[op++] = (in1 >>> 20) & 0x3;
|
||||
out[op++] = (in1 >>> 22) & 0x3;
|
||||
out[op++] = (in1 >>> 24) & 0x3;
|
||||
out[op++] = (in1 >>> 26) & 0x3;
|
||||
out[op++] = (in1 >>> 28) & 0x3;
|
||||
out[op] = (in1 >>> 30) & 0x3;
|
||||
}
|
||||
export function fastUnpack32_3(inValues, inPos, out, outPos) {
|
||||
let op = outPos;
|
||||
const in0 = inValues[inPos] >>> 0;
|
||||
const in1 = inValues[inPos + 1] >>> 0;
|
||||
const in2 = inValues[inPos + 2] >>> 0;
|
||||
out[op++] = (in0 >>> 0) & 0x7;
|
||||
out[op++] = (in0 >>> 3) & 0x7;
|
||||
out[op++] = (in0 >>> 6) & 0x7;
|
||||
out[op++] = (in0 >>> 9) & 0x7;
|
||||
out[op++] = (in0 >>> 12) & 0x7;
|
||||
out[op++] = (in0 >>> 15) & 0x7;
|
||||
out[op++] = (in0 >>> 18) & 0x7;
|
||||
out[op++] = (in0 >>> 21) & 0x7;
|
||||
out[op++] = (in0 >>> 24) & 0x7;
|
||||
out[op++] = (in0 >>> 27) & 0x7;
|
||||
out[op++] = ((in0 >>> 30) | ((in1 & 0x1) << 2)) & 0x7;
|
||||
out[op++] = (in1 >>> 1) & 0x7;
|
||||
out[op++] = (in1 >>> 4) & 0x7;
|
||||
out[op++] = (in1 >>> 7) & 0x7;
|
||||
out[op++] = (in1 >>> 10) & 0x7;
|
||||
out[op++] = (in1 >>> 13) & 0x7;
|
||||
out[op++] = (in1 >>> 16) & 0x7;
|
||||
out[op++] = (in1 >>> 19) & 0x7;
|
||||
out[op++] = (in1 >>> 22) & 0x7;
|
||||
out[op++] = (in1 >>> 25) & 0x7;
|
||||
out[op++] = (in1 >>> 28) & 0x7;
|
||||
out[op++] = ((in1 >>> 31) | ((in2 & 0x3) << 1)) & 0x7;
|
||||
out[op++] = (in2 >>> 2) & 0x7;
|
||||
out[op++] = (in2 >>> 5) & 0x7;
|
||||
out[op++] = (in2 >>> 8) & 0x7;
|
||||
out[op++] = (in2 >>> 11) & 0x7;
|
||||
out[op++] = (in2 >>> 14) & 0x7;
|
||||
out[op++] = (in2 >>> 17) & 0x7;
|
||||
out[op++] = (in2 >>> 20) & 0x7;
|
||||
out[op++] = (in2 >>> 23) & 0x7;
|
||||
out[op++] = (in2 >>> 26) & 0x7;
|
||||
out[op] = (in2 >>> 29) & 0x7;
|
||||
}
|
||||
export function fastUnpack32_4(inValues, inPos, out, outPos) {
|
||||
let op = outPos;
|
||||
const in0 = inValues[inPos] >>> 0;
|
||||
const in1 = inValues[inPos + 1] >>> 0;
|
||||
const in2 = inValues[inPos + 2] >>> 0;
|
||||
const in3 = inValues[inPos + 3] >>> 0;
|
||||
out[op++] = (in0 >>> 0) & 0xf;
|
||||
out[op++] = (in0 >>> 4) & 0xf;
|
||||
out[op++] = (in0 >>> 8) & 0xf;
|
||||
out[op++] = (in0 >>> 12) & 0xf;
|
||||
out[op++] = (in0 >>> 16) & 0xf;
|
||||
out[op++] = (in0 >>> 20) & 0xf;
|
||||
out[op++] = (in0 >>> 24) & 0xf;
|
||||
out[op++] = (in0 >>> 28) & 0xf;
|
||||
out[op++] = (in1 >>> 0) & 0xf;
|
||||
out[op++] = (in1 >>> 4) & 0xf;
|
||||
out[op++] = (in1 >>> 8) & 0xf;
|
||||
out[op++] = (in1 >>> 12) & 0xf;
|
||||
out[op++] = (in1 >>> 16) & 0xf;
|
||||
out[op++] = (in1 >>> 20) & 0xf;
|
||||
out[op++] = (in1 >>> 24) & 0xf;
|
||||
out[op++] = (in1 >>> 28) & 0xf;
|
||||
out[op++] = (in2 >>> 0) & 0xf;
|
||||
out[op++] = (in2 >>> 4) & 0xf;
|
||||
out[op++] = (in2 >>> 8) & 0xf;
|
||||
out[op++] = (in2 >>> 12) & 0xf;
|
||||
out[op++] = (in2 >>> 16) & 0xf;
|
||||
out[op++] = (in2 >>> 20) & 0xf;
|
||||
out[op++] = (in2 >>> 24) & 0xf;
|
||||
out[op++] = (in2 >>> 28) & 0xf;
|
||||
out[op++] = (in3 >>> 0) & 0xf;
|
||||
out[op++] = (in3 >>> 4) & 0xf;
|
||||
out[op++] = (in3 >>> 8) & 0xf;
|
||||
out[op++] = (in3 >>> 12) & 0xf;
|
||||
out[op++] = (in3 >>> 16) & 0xf;
|
||||
out[op++] = (in3 >>> 20) & 0xf;
|
||||
out[op++] = (in3 >>> 24) & 0xf;
|
||||
out[op] = (in3 >>> 28) & 0xf;
|
||||
}
|
||||
export function fastUnpack32_5(inValues, inPos, out, outPos) {
|
||||
let op = outPos;
|
||||
const in0 = inValues[inPos] >>> 0;
|
||||
const in1 = inValues[inPos + 1] >>> 0;
|
||||
const in2 = inValues[inPos + 2] >>> 0;
|
||||
const in3 = inValues[inPos + 3] >>> 0;
|
||||
const in4 = inValues[inPos + 4] >>> 0;
|
||||
out[op++] = (in0 >>> 0) & 0x1f;
|
||||
out[op++] = (in0 >>> 5) & 0x1f;
|
||||
out[op++] = (in0 >>> 10) & 0x1f;
|
||||
out[op++] = (in0 >>> 15) & 0x1f;
|
||||
out[op++] = (in0 >>> 20) & 0x1f;
|
||||
out[op++] = (in0 >>> 25) & 0x1f;
|
||||
out[op++] = ((in0 >>> 30) | ((in1 & 0x7) << 2)) & 0x1f;
|
||||
out[op++] = (in1 >>> 3) & 0x1f;
|
||||
out[op++] = (in1 >>> 8) & 0x1f;
|
||||
out[op++] = (in1 >>> 13) & 0x1f;
|
||||
out[op++] = (in1 >>> 18) & 0x1f;
|
||||
out[op++] = (in1 >>> 23) & 0x1f;
|
||||
out[op++] = ((in1 >>> 28) | ((in2 & 0x1) << 4)) & 0x1f;
|
||||
out[op++] = (in2 >>> 1) & 0x1f;
|
||||
out[op++] = (in2 >>> 6) & 0x1f;
|
||||
out[op++] = (in2 >>> 11) & 0x1f;
|
||||
out[op++] = (in2 >>> 16) & 0x1f;
|
||||
out[op++] = (in2 >>> 21) & 0x1f;
|
||||
out[op++] = (in2 >>> 26) & 0x1f;
|
||||
out[op++] = ((in2 >>> 31) | ((in3 & 0xf) << 1)) & 0x1f;
|
||||
out[op++] = (in3 >>> 4) & 0x1f;
|
||||
out[op++] = (in3 >>> 9) & 0x1f;
|
||||
out[op++] = (in3 >>> 14) & 0x1f;
|
||||
out[op++] = (in3 >>> 19) & 0x1f;
|
||||
out[op++] = (in3 >>> 24) & 0x1f;
|
||||
out[op++] = ((in3 >>> 29) | ((in4 & 0x3) << 3)) & 0x1f;
|
||||
out[op++] = (in4 >>> 2) & 0x1f;
|
||||
out[op++] = (in4 >>> 7) & 0x1f;
|
||||
out[op++] = (in4 >>> 12) & 0x1f;
|
||||
out[op++] = (in4 >>> 17) & 0x1f;
|
||||
out[op++] = (in4 >>> 22) & 0x1f;
|
||||
out[op] = (in4 >>> 27) & 0x1f;
|
||||
}
|
||||
export function fastUnpack32_6(inValues, inPos, out, outPos) {
|
||||
let op = outPos;
|
||||
const in0 = inValues[inPos] >>> 0;
|
||||
const in1 = inValues[inPos + 1] >>> 0;
|
||||
const in2 = inValues[inPos + 2] >>> 0;
|
||||
const in3 = inValues[inPos + 3] >>> 0;
|
||||
const in4 = inValues[inPos + 4] >>> 0;
|
||||
const in5 = inValues[inPos + 5] >>> 0;
|
||||
out[op++] = (in0 >>> 0) & 0x3f;
|
||||
out[op++] = (in0 >>> 6) & 0x3f;
|
||||
out[op++] = (in0 >>> 12) & 0x3f;
|
||||
out[op++] = (in0 >>> 18) & 0x3f;
|
||||
out[op++] = (in0 >>> 24) & 0x3f;
|
||||
out[op++] = ((in0 >>> 30) | ((in1 & 0xf) << 2)) & 0x3f;
|
||||
out[op++] = (in1 >>> 4) & 0x3f;
|
||||
out[op++] = (in1 >>> 10) & 0x3f;
|
||||
out[op++] = (in1 >>> 16) & 0x3f;
|
||||
out[op++] = (in1 >>> 22) & 0x3f;
|
||||
out[op++] = ((in1 >>> 28) | ((in2 & 0x3) << 4)) & 0x3f;
|
||||
out[op++] = (in2 >>> 2) & 0x3f;
|
||||
out[op++] = (in2 >>> 8) & 0x3f;
|
||||
out[op++] = (in2 >>> 14) & 0x3f;
|
||||
out[op++] = (in2 >>> 20) & 0x3f;
|
||||
out[op++] = (in2 >>> 26) & 0x3f;
|
||||
out[op++] = (in3 >>> 0) & 0x3f;
|
||||
out[op++] = (in3 >>> 6) & 0x3f;
|
||||
out[op++] = (in3 >>> 12) & 0x3f;
|
||||
out[op++] = (in3 >>> 18) & 0x3f;
|
||||
out[op++] = (in3 >>> 24) & 0x3f;
|
||||
out[op++] = ((in3 >>> 30) | ((in4 & 0xf) << 2)) & 0x3f;
|
||||
out[op++] = (in4 >>> 4) & 0x3f;
|
||||
out[op++] = (in4 >>> 10) & 0x3f;
|
||||
out[op++] = (in4 >>> 16) & 0x3f;
|
||||
out[op++] = (in4 >>> 22) & 0x3f;
|
||||
out[op++] = ((in4 >>> 28) | ((in5 & 0x3) << 4)) & 0x3f;
|
||||
out[op++] = (in5 >>> 2) & 0x3f;
|
||||
out[op++] = (in5 >>> 8) & 0x3f;
|
||||
out[op++] = (in5 >>> 14) & 0x3f;
|
||||
out[op++] = (in5 >>> 20) & 0x3f;
|
||||
out[op] = (in5 >>> 26) & 0x3f;
|
||||
}
|
||||
export function fastUnpack32_7(inValues, inPos, out, outPos) {
|
||||
let op = outPos;
|
||||
const in0 = inValues[inPos] >>> 0;
|
||||
const in1 = inValues[inPos + 1] >>> 0;
|
||||
const in2 = inValues[inPos + 2] >>> 0;
|
||||
const in3 = inValues[inPos + 3] >>> 0;
|
||||
const in4 = inValues[inPos + 4] >>> 0;
|
||||
const in5 = inValues[inPos + 5] >>> 0;
|
||||
const in6 = inValues[inPos + 6] >>> 0;
|
||||
out[op++] = (in0 >>> 0) & 0x7f;
|
||||
out[op++] = (in0 >>> 7) & 0x7f;
|
||||
out[op++] = (in0 >>> 14) & 0x7f;
|
||||
out[op++] = (in0 >>> 21) & 0x7f;
|
||||
out[op++] = ((in0 >>> 28) | ((in1 & 0x7) << 4)) & 0x7f;
|
||||
out[op++] = (in1 >>> 3) & 0x7f;
|
||||
out[op++] = (in1 >>> 10) & 0x7f;
|
||||
out[op++] = (in1 >>> 17) & 0x7f;
|
||||
out[op++] = (in1 >>> 24) & 0x7f;
|
||||
out[op++] = ((in1 >>> 31) | ((in2 & 0x3f) << 1)) & 0x7f;
|
||||
out[op++] = (in2 >>> 6) & 0x7f;
|
||||
out[op++] = (in2 >>> 13) & 0x7f;
|
||||
out[op++] = (in2 >>> 20) & 0x7f;
|
||||
out[op++] = ((in2 >>> 27) | ((in3 & 0x3) << 5)) & 0x7f;
|
||||
out[op++] = (in3 >>> 2) & 0x7f;
|
||||
out[op++] = (in3 >>> 9) & 0x7f;
|
||||
out[op++] = (in3 >>> 16) & 0x7f;
|
||||
out[op++] = (in3 >>> 23) & 0x7f;
|
||||
out[op++] = ((in3 >>> 30) | ((in4 & 0x1f) << 2)) & 0x7f;
|
||||
out[op++] = (in4 >>> 5) & 0x7f;
|
||||
out[op++] = (in4 >>> 12) & 0x7f;
|
||||
out[op++] = (in4 >>> 19) & 0x7f;
|
||||
out[op++] = ((in4 >>> 26) | ((in5 & 0x1) << 6)) & 0x7f;
|
||||
out[op++] = (in5 >>> 1) & 0x7f;
|
||||
out[op++] = (in5 >>> 8) & 0x7f;
|
||||
out[op++] = (in5 >>> 15) & 0x7f;
|
||||
out[op++] = (in5 >>> 22) & 0x7f;
|
||||
out[op++] = ((in5 >>> 29) | ((in6 & 0xf) << 3)) & 0x7f;
|
||||
out[op++] = (in6 >>> 4) & 0x7f;
|
||||
out[op++] = (in6 >>> 11) & 0x7f;
|
||||
out[op++] = (in6 >>> 18) & 0x7f;
|
||||
out[op] = (in6 >>> 25) & 0x7f;
|
||||
}
|
||||
export function fastUnpack32_8(inValues, inPos, out, outPos) {
|
||||
let op = outPos;
|
||||
const in0 = inValues[inPos] >>> 0;
|
||||
const in1 = inValues[inPos + 1] >>> 0;
|
||||
const in2 = inValues[inPos + 2] >>> 0;
|
||||
const in3 = inValues[inPos + 3] >>> 0;
|
||||
const in4 = inValues[inPos + 4] >>> 0;
|
||||
const in5 = inValues[inPos + 5] >>> 0;
|
||||
const in6 = inValues[inPos + 6] >>> 0;
|
||||
const in7 = inValues[inPos + 7] >>> 0;
|
||||
out[op++] = (in0 >>> 0) & 0xff;
|
||||
out[op++] = (in0 >>> 8) & 0xff;
|
||||
out[op++] = (in0 >>> 16) & 0xff;
|
||||
out[op++] = (in0 >>> 24) & 0xff;
|
||||
out[op++] = (in1 >>> 0) & 0xff;
|
||||
out[op++] = (in1 >>> 8) & 0xff;
|
||||
out[op++] = (in1 >>> 16) & 0xff;
|
||||
out[op++] = (in1 >>> 24) & 0xff;
|
||||
out[op++] = (in2 >>> 0) & 0xff;
|
||||
out[op++] = (in2 >>> 8) & 0xff;
|
||||
out[op++] = (in2 >>> 16) & 0xff;
|
||||
out[op++] = (in2 >>> 24) & 0xff;
|
||||
out[op++] = (in3 >>> 0) & 0xff;
|
||||
out[op++] = (in3 >>> 8) & 0xff;
|
||||
out[op++] = (in3 >>> 16) & 0xff;
|
||||
out[op++] = (in3 >>> 24) & 0xff;
|
||||
out[op++] = (in4 >>> 0) & 0xff;
|
||||
out[op++] = (in4 >>> 8) & 0xff;
|
||||
out[op++] = (in4 >>> 16) & 0xff;
|
||||
out[op++] = (in4 >>> 24) & 0xff;
|
||||
out[op++] = (in5 >>> 0) & 0xff;
|
||||
out[op++] = (in5 >>> 8) & 0xff;
|
||||
out[op++] = (in5 >>> 16) & 0xff;
|
||||
out[op++] = (in5 >>> 24) & 0xff;
|
||||
out[op++] = (in6 >>> 0) & 0xff;
|
||||
out[op++] = (in6 >>> 8) & 0xff;
|
||||
out[op++] = (in6 >>> 16) & 0xff;
|
||||
out[op++] = (in6 >>> 24) & 0xff;
|
||||
out[op++] = (in7 >>> 0) & 0xff;
|
||||
out[op++] = (in7 >>> 8) & 0xff;
|
||||
out[op++] = (in7 >>> 16) & 0xff;
|
||||
out[op] = (in7 >>> 24) & 0xff;
|
||||
}
|
||||
export function fastUnpack32_9(inValues, inPos, out, outPos) {
|
||||
let op = outPos;
|
||||
const in0 = inValues[inPos] >>> 0;
|
||||
const in1 = inValues[inPos + 1] >>> 0;
|
||||
const in2 = inValues[inPos + 2] >>> 0;
|
||||
const in3 = inValues[inPos + 3] >>> 0;
|
||||
const in4 = inValues[inPos + 4] >>> 0;
|
||||
const in5 = inValues[inPos + 5] >>> 0;
|
||||
const in6 = inValues[inPos + 6] >>> 0;
|
||||
const in7 = inValues[inPos + 7] >>> 0;
|
||||
const in8 = inValues[inPos + 8] >>> 0;
|
||||
out[op++] = (in0 >>> 0) & 0x1ff;
|
||||
out[op++] = (in0 >>> 9) & 0x1ff;
|
||||
out[op++] = (in0 >>> 18) & 0x1ff;
|
||||
out[op++] = ((in0 >>> 27) | ((in1 & 0xf) << 5)) & 0x1ff;
|
||||
out[op++] = (in1 >>> 4) & 0x1ff;
|
||||
out[op++] = (in1 >>> 13) & 0x1ff;
|
||||
out[op++] = (in1 >>> 22) & 0x1ff;
|
||||
out[op++] = ((in1 >>> 31) | ((in2 & 0xff) << 1)) & 0x1ff;
|
||||
out[op++] = (in2 >>> 8) & 0x1ff;
|
||||
out[op++] = (in2 >>> 17) & 0x1ff;
|
||||
out[op++] = ((in2 >>> 26) | ((in3 & 0x7) << 6)) & 0x1ff;
|
||||
out[op++] = (in3 >>> 3) & 0x1ff;
|
||||
out[op++] = (in3 >>> 12) & 0x1ff;
|
||||
out[op++] = (in3 >>> 21) & 0x1ff;
|
||||
out[op++] = ((in3 >>> 30) | ((in4 & 0x7f) << 2)) & 0x1ff;
|
||||
out[op++] = (in4 >>> 7) & 0x1ff;
|
||||
out[op++] = (in4 >>> 16) & 0x1ff;
|
||||
out[op++] = ((in4 >>> 25) | ((in5 & 0x3) << 7)) & 0x1ff;
|
||||
out[op++] = (in5 >>> 2) & 0x1ff;
|
||||
out[op++] = (in5 >>> 11) & 0x1ff;
|
||||
out[op++] = (in5 >>> 20) & 0x1ff;
|
||||
out[op++] = ((in5 >>> 29) | ((in6 & 0x3f) << 3)) & 0x1ff;
|
||||
out[op++] = (in6 >>> 6) & 0x1ff;
|
||||
out[op++] = (in6 >>> 15) & 0x1ff;
|
||||
out[op++] = ((in6 >>> 24) | ((in7 & 0x1) << 8)) & 0x1ff;
|
||||
out[op++] = (in7 >>> 1) & 0x1ff;
|
||||
out[op++] = (in7 >>> 10) & 0x1ff;
|
||||
out[op++] = (in7 >>> 19) & 0x1ff;
|
||||
out[op++] = ((in7 >>> 28) | ((in8 & 0x1f) << 4)) & 0x1ff;
|
||||
out[op++] = (in8 >>> 5) & 0x1ff;
|
||||
out[op++] = (in8 >>> 14) & 0x1ff;
|
||||
out[op] = (in8 >>> 23) & 0x1ff;
|
||||
}
|
||||
export function fastUnpack32_10(inValues, inPos, out, outPos) {
|
||||
let op = outPos;
|
||||
const in0 = inValues[inPos] >>> 0;
|
||||
const in1 = inValues[inPos + 1] >>> 0;
|
||||
const in2 = inValues[inPos + 2] >>> 0;
|
||||
const in3 = inValues[inPos + 3] >>> 0;
|
||||
const in4 = inValues[inPos + 4] >>> 0;
|
||||
const in5 = inValues[inPos + 5] >>> 0;
|
||||
const in6 = inValues[inPos + 6] >>> 0;
|
||||
const in7 = inValues[inPos + 7] >>> 0;
|
||||
const in8 = inValues[inPos + 8] >>> 0;
|
||||
const in9 = inValues[inPos + 9] >>> 0;
|
||||
out[op++] = (in0 >>> 0) & 0x3ff;
|
||||
out[op++] = (in0 >>> 10) & 0x3ff;
|
||||
out[op++] = (in0 >>> 20) & 0x3ff;
|
||||
out[op++] = ((in0 >>> 30) | ((in1 & 0xff) << 2)) & 0x3ff;
|
||||
out[op++] = (in1 >>> 8) & 0x3ff;
|
||||
out[op++] = (in1 >>> 18) & 0x3ff;
|
||||
out[op++] = ((in1 >>> 28) | ((in2 & 0x3f) << 4)) & 0x3ff;
|
||||
out[op++] = (in2 >>> 6) & 0x3ff;
|
||||
out[op++] = (in2 >>> 16) & 0x3ff;
|
||||
out[op++] = ((in2 >>> 26) | ((in3 & 0xf) << 6)) & 0x3ff;
|
||||
out[op++] = (in3 >>> 4) & 0x3ff;
|
||||
out[op++] = (in3 >>> 14) & 0x3ff;
|
||||
out[op++] = ((in3 >>> 24) | ((in4 & 0x3) << 8)) & 0x3ff;
|
||||
out[op++] = (in4 >>> 2) & 0x3ff;
|
||||
out[op++] = (in4 >>> 12) & 0x3ff;
|
||||
out[op++] = (in4 >>> 22) & 0x3ff;
|
||||
out[op++] = (in5 >>> 0) & 0x3ff;
|
||||
out[op++] = (in5 >>> 10) & 0x3ff;
|
||||
out[op++] = (in5 >>> 20) & 0x3ff;
|
||||
out[op++] = ((in5 >>> 30) | ((in6 & 0xff) << 2)) & 0x3ff;
|
||||
out[op++] = (in6 >>> 8) & 0x3ff;
|
||||
out[op++] = (in6 >>> 18) & 0x3ff;
|
||||
out[op++] = ((in6 >>> 28) | ((in7 & 0x3f) << 4)) & 0x3ff;
|
||||
out[op++] = (in7 >>> 6) & 0x3ff;
|
||||
out[op++] = (in7 >>> 16) & 0x3ff;
|
||||
out[op++] = ((in7 >>> 26) | ((in8 & 0xf) << 6)) & 0x3ff;
|
||||
out[op++] = (in8 >>> 4) & 0x3ff;
|
||||
out[op++] = (in8 >>> 14) & 0x3ff;
|
||||
out[op++] = ((in8 >>> 24) | ((in9 & 0x3) << 8)) & 0x3ff;
|
||||
out[op++] = (in9 >>> 2) & 0x3ff;
|
||||
out[op++] = (in9 >>> 12) & 0x3ff;
|
||||
out[op] = (in9 >>> 22) & 0x3ff;
|
||||
}
|
||||
export function fastUnpack32_11(inValues, inPos, out, outPos) {
|
||||
let op = outPos;
|
||||
const in0 = inValues[inPos] >>> 0;
|
||||
const in1 = inValues[inPos + 1] >>> 0;
|
||||
const in2 = inValues[inPos + 2] >>> 0;
|
||||
const in3 = inValues[inPos + 3] >>> 0;
|
||||
const in4 = inValues[inPos + 4] >>> 0;
|
||||
const in5 = inValues[inPos + 5] >>> 0;
|
||||
const in6 = inValues[inPos + 6] >>> 0;
|
||||
const in7 = inValues[inPos + 7] >>> 0;
|
||||
const in8 = inValues[inPos + 8] >>> 0;
|
||||
const in9 = inValues[inPos + 9] >>> 0;
|
||||
const in10 = inValues[inPos + 10] >>> 0;
|
||||
out[op++] = (in0 >>> 0) & 0x7ff;
|
||||
out[op++] = (in0 >>> 11) & 0x7ff;
|
||||
out[op++] = ((in0 >>> 22) | ((in1 & 0x1) << 10)) & 0x7ff;
|
||||
out[op++] = (in1 >>> 1) & 0x7ff;
|
||||
out[op++] = (in1 >>> 12) & 0x7ff;
|
||||
out[op++] = ((in1 >>> 23) | ((in2 & 0x3) << 9)) & 0x7ff;
|
||||
out[op++] = (in2 >>> 2) & 0x7ff;
|
||||
out[op++] = (in2 >>> 13) & 0x7ff;
|
||||
out[op++] = ((in2 >>> 24) | ((in3 & 0x7) << 8)) & 0x7ff;
|
||||
out[op++] = (in3 >>> 3) & 0x7ff;
|
||||
out[op++] = (in3 >>> 14) & 0x7ff;
|
||||
out[op++] = ((in3 >>> 25) | ((in4 & 0xf) << 7)) & 0x7ff;
|
||||
out[op++] = (in4 >>> 4) & 0x7ff;
|
||||
out[op++] = (in4 >>> 15) & 0x7ff;
|
||||
out[op++] = ((in4 >>> 26) | ((in5 & 0x1f) << 6)) & 0x7ff;
|
||||
out[op++] = (in5 >>> 5) & 0x7ff;
|
||||
out[op++] = (in5 >>> 16) & 0x7ff;
|
||||
out[op++] = ((in5 >>> 27) | ((in6 & 0x3f) << 5)) & 0x7ff;
|
||||
out[op++] = (in6 >>> 6) & 0x7ff;
|
||||
out[op++] = (in6 >>> 17) & 0x7ff;
|
||||
out[op++] = ((in6 >>> 28) | ((in7 & 0x7f) << 4)) & 0x7ff;
|
||||
out[op++] = (in7 >>> 7) & 0x7ff;
|
||||
out[op++] = (in7 >>> 18) & 0x7ff;
|
||||
out[op++] = ((in7 >>> 29) | ((in8 & 0xff) << 3)) & 0x7ff;
|
||||
out[op++] = (in8 >>> 8) & 0x7ff;
|
||||
out[op++] = (in8 >>> 19) & 0x7ff;
|
||||
out[op++] = ((in8 >>> 30) | ((in9 & 0x1ff) << 2)) & 0x7ff;
|
||||
out[op++] = (in9 >>> 9) & 0x7ff;
|
||||
out[op++] = (in9 >>> 20) & 0x7ff;
|
||||
out[op++] = ((in9 >>> 31) | ((in10 & 0x3ff) << 1)) & 0x7ff;
|
||||
out[op++] = (in10 >>> 10) & 0x7ff;
|
||||
out[op] = (in10 >>> 21) & 0x7ff;
|
||||
}
|
||||
export function fastUnpack32_12(inValues, inPos, out, outPos) {
|
||||
let op = outPos;
|
||||
const in0 = inValues[inPos] >>> 0;
|
||||
const in1 = inValues[inPos + 1] >>> 0;
|
||||
const in2 = inValues[inPos + 2] >>> 0;
|
||||
const in3 = inValues[inPos + 3] >>> 0;
|
||||
const in4 = inValues[inPos + 4] >>> 0;
|
||||
const in5 = inValues[inPos + 5] >>> 0;
|
||||
const in6 = inValues[inPos + 6] >>> 0;
|
||||
const in7 = inValues[inPos + 7] >>> 0;
|
||||
const in8 = inValues[inPos + 8] >>> 0;
|
||||
const in9 = inValues[inPos + 9] >>> 0;
|
||||
const in10 = inValues[inPos + 10] >>> 0;
|
||||
const in11 = inValues[inPos + 11] >>> 0;
|
||||
out[op++] = (in0 >>> 0) & 0xfff;
|
||||
out[op++] = (in0 >>> 12) & 0xfff;
|
||||
out[op++] = ((in0 >>> 24) | ((in1 & 0xf) << 8)) & 0xfff;
|
||||
out[op++] = (in1 >>> 4) & 0xfff;
|
||||
out[op++] = (in1 >>> 16) & 0xfff;
|
||||
out[op++] = ((in1 >>> 28) | ((in2 & 0xff) << 4)) & 0xfff;
|
||||
out[op++] = (in2 >>> 8) & 0xfff;
|
||||
out[op++] = (in2 >>> 20) & 0xfff;
|
||||
out[op++] = (in3 >>> 0) & 0xfff;
|
||||
out[op++] = (in3 >>> 12) & 0xfff;
|
||||
out[op++] = ((in3 >>> 24) | ((in4 & 0xf) << 8)) & 0xfff;
|
||||
out[op++] = (in4 >>> 4) & 0xfff;
|
||||
out[op++] = (in4 >>> 16) & 0xfff;
|
||||
out[op++] = ((in4 >>> 28) | ((in5 & 0xff) << 4)) & 0xfff;
|
||||
out[op++] = (in5 >>> 8) & 0xfff;
|
||||
out[op++] = (in5 >>> 20) & 0xfff;
|
||||
out[op++] = (in6 >>> 0) & 0xfff;
|
||||
out[op++] = (in6 >>> 12) & 0xfff;
|
||||
out[op++] = ((in6 >>> 24) | ((in7 & 0xf) << 8)) & 0xfff;
|
||||
out[op++] = (in7 >>> 4) & 0xfff;
|
||||
out[op++] = (in7 >>> 16) & 0xfff;
|
||||
out[op++] = ((in7 >>> 28) | ((in8 & 0xff) << 4)) & 0xfff;
|
||||
out[op++] = (in8 >>> 8) & 0xfff;
|
||||
out[op++] = (in8 >>> 20) & 0xfff;
|
||||
out[op++] = (in9 >>> 0) & 0xfff;
|
||||
out[op++] = (in9 >>> 12) & 0xfff;
|
||||
out[op++] = ((in9 >>> 24) | ((in10 & 0xf) << 8)) & 0xfff;
|
||||
out[op++] = (in10 >>> 4) & 0xfff;
|
||||
out[op++] = (in10 >>> 16) & 0xfff;
|
||||
out[op++] = ((in10 >>> 28) | ((in11 & 0xff) << 4)) & 0xfff;
|
||||
out[op++] = (in11 >>> 8) & 0xfff;
|
||||
out[op] = (in11 >>> 20) & 0xfff;
|
||||
}
|
||||
export function fastUnpack32_16(inValues, inPos, out, outPos) {
|
||||
let op = outPos;
|
||||
const in0 = inValues[inPos] >>> 0;
|
||||
const in1 = inValues[inPos + 1] >>> 0;
|
||||
const in2 = inValues[inPos + 2] >>> 0;
|
||||
const in3 = inValues[inPos + 3] >>> 0;
|
||||
const in4 = inValues[inPos + 4] >>> 0;
|
||||
const in5 = inValues[inPos + 5] >>> 0;
|
||||
const in6 = inValues[inPos + 6] >>> 0;
|
||||
const in7 = inValues[inPos + 7] >>> 0;
|
||||
const in8 = inValues[inPos + 8] >>> 0;
|
||||
const in9 = inValues[inPos + 9] >>> 0;
|
||||
const in10 = inValues[inPos + 10] >>> 0;
|
||||
const in11 = inValues[inPos + 11] >>> 0;
|
||||
const in12 = inValues[inPos + 12] >>> 0;
|
||||
const in13 = inValues[inPos + 13] >>> 0;
|
||||
const in14 = inValues[inPos + 14] >>> 0;
|
||||
const in15 = inValues[inPos + 15] >>> 0;
|
||||
out[op++] = (in0 >>> 0) & 0xffff;
|
||||
out[op++] = (in0 >>> 16) & 0xffff;
|
||||
out[op++] = (in1 >>> 0) & 0xffff;
|
||||
out[op++] = (in1 >>> 16) & 0xffff;
|
||||
out[op++] = (in2 >>> 0) & 0xffff;
|
||||
out[op++] = (in2 >>> 16) & 0xffff;
|
||||
out[op++] = (in3 >>> 0) & 0xffff;
|
||||
out[op++] = (in3 >>> 16) & 0xffff;
|
||||
out[op++] = (in4 >>> 0) & 0xffff;
|
||||
out[op++] = (in4 >>> 16) & 0xffff;
|
||||
out[op++] = (in5 >>> 0) & 0xffff;
|
||||
out[op++] = (in5 >>> 16) & 0xffff;
|
||||
out[op++] = (in6 >>> 0) & 0xffff;
|
||||
out[op++] = (in6 >>> 16) & 0xffff;
|
||||
out[op++] = (in7 >>> 0) & 0xffff;
|
||||
out[op++] = (in7 >>> 16) & 0xffff;
|
||||
out[op++] = (in8 >>> 0) & 0xffff;
|
||||
out[op++] = (in8 >>> 16) & 0xffff;
|
||||
out[op++] = (in9 >>> 0) & 0xffff;
|
||||
out[op++] = (in9 >>> 16) & 0xffff;
|
||||
out[op++] = (in10 >>> 0) & 0xffff;
|
||||
out[op++] = (in10 >>> 16) & 0xffff;
|
||||
out[op++] = (in11 >>> 0) & 0xffff;
|
||||
out[op++] = (in11 >>> 16) & 0xffff;
|
||||
out[op++] = (in12 >>> 0) & 0xffff;
|
||||
out[op++] = (in12 >>> 16) & 0xffff;
|
||||
out[op++] = (in13 >>> 0) & 0xffff;
|
||||
out[op++] = (in13 >>> 16) & 0xffff;
|
||||
out[op++] = (in14 >>> 0) & 0xffff;
|
||||
out[op++] = (in14 >>> 16) & 0xffff;
|
||||
out[op++] = (in15 >>> 0) & 0xffff;
|
||||
out[op] = (in15 >>> 16) & 0xffff;
|
||||
}
|
||||
export function fastUnpack256_1(inValues, inPos, out, outPos) {
|
||||
let op = outPos;
|
||||
let ip = inPos;
|
||||
for (let c = 0; c < 8; c++) {
|
||||
const in0 = inValues[ip++] >>> 0;
|
||||
out[op++] = (in0 >>> 0) & 0x1;
|
||||
out[op++] = (in0 >>> 1) & 0x1;
|
||||
out[op++] = (in0 >>> 2) & 0x1;
|
||||
out[op++] = (in0 >>> 3) & 0x1;
|
||||
out[op++] = (in0 >>> 4) & 0x1;
|
||||
out[op++] = (in0 >>> 5) & 0x1;
|
||||
out[op++] = (in0 >>> 6) & 0x1;
|
||||
out[op++] = (in0 >>> 7) & 0x1;
|
||||
out[op++] = (in0 >>> 8) & 0x1;
|
||||
out[op++] = (in0 >>> 9) & 0x1;
|
||||
out[op++] = (in0 >>> 10) & 0x1;
|
||||
out[op++] = (in0 >>> 11) & 0x1;
|
||||
out[op++] = (in0 >>> 12) & 0x1;
|
||||
out[op++] = (in0 >>> 13) & 0x1;
|
||||
out[op++] = (in0 >>> 14) & 0x1;
|
||||
out[op++] = (in0 >>> 15) & 0x1;
|
||||
out[op++] = (in0 >>> 16) & 0x1;
|
||||
out[op++] = (in0 >>> 17) & 0x1;
|
||||
out[op++] = (in0 >>> 18) & 0x1;
|
||||
out[op++] = (in0 >>> 19) & 0x1;
|
||||
out[op++] = (in0 >>> 20) & 0x1;
|
||||
out[op++] = (in0 >>> 21) & 0x1;
|
||||
out[op++] = (in0 >>> 22) & 0x1;
|
||||
out[op++] = (in0 >>> 23) & 0x1;
|
||||
out[op++] = (in0 >>> 24) & 0x1;
|
||||
out[op++] = (in0 >>> 25) & 0x1;
|
||||
out[op++] = (in0 >>> 26) & 0x1;
|
||||
out[op++] = (in0 >>> 27) & 0x1;
|
||||
out[op++] = (in0 >>> 28) & 0x1;
|
||||
out[op++] = (in0 >>> 29) & 0x1;
|
||||
out[op++] = (in0 >>> 30) & 0x1;
|
||||
out[op++] = (in0 >>> 31) & 0x1;
|
||||
}
|
||||
}
|
||||
export function fastUnpack256_2(inValues, inPos, out, outPos) {
|
||||
let op = outPos;
|
||||
let ip = inPos;
|
||||
for (let c = 0; c < 8; c++) {
|
||||
const in0 = inValues[ip++] >>> 0;
|
||||
const in1 = inValues[ip++] >>> 0;
|
||||
out[op++] = (in0 >>> 0) & 0x3;
|
||||
out[op++] = (in0 >>> 2) & 0x3;
|
||||
out[op++] = (in0 >>> 4) & 0x3;
|
||||
out[op++] = (in0 >>> 6) & 0x3;
|
||||
out[op++] = (in0 >>> 8) & 0x3;
|
||||
out[op++] = (in0 >>> 10) & 0x3;
|
||||
out[op++] = (in0 >>> 12) & 0x3;
|
||||
out[op++] = (in0 >>> 14) & 0x3;
|
||||
out[op++] = (in0 >>> 16) & 0x3;
|
||||
out[op++] = (in0 >>> 18) & 0x3;
|
||||
out[op++] = (in0 >>> 20) & 0x3;
|
||||
out[op++] = (in0 >>> 22) & 0x3;
|
||||
out[op++] = (in0 >>> 24) & 0x3;
|
||||
out[op++] = (in0 >>> 26) & 0x3;
|
||||
out[op++] = (in0 >>> 28) & 0x3;
|
||||
out[op++] = (in0 >>> 30) & 0x3;
|
||||
out[op++] = (in1 >>> 0) & 0x3;
|
||||
out[op++] = (in1 >>> 2) & 0x3;
|
||||
out[op++] = (in1 >>> 4) & 0x3;
|
||||
out[op++] = (in1 >>> 6) & 0x3;
|
||||
out[op++] = (in1 >>> 8) & 0x3;
|
||||
out[op++] = (in1 >>> 10) & 0x3;
|
||||
out[op++] = (in1 >>> 12) & 0x3;
|
||||
out[op++] = (in1 >>> 14) & 0x3;
|
||||
out[op++] = (in1 >>> 16) & 0x3;
|
||||
out[op++] = (in1 >>> 18) & 0x3;
|
||||
out[op++] = (in1 >>> 20) & 0x3;
|
||||
out[op++] = (in1 >>> 22) & 0x3;
|
||||
out[op++] = (in1 >>> 24) & 0x3;
|
||||
out[op++] = (in1 >>> 26) & 0x3;
|
||||
out[op++] = (in1 >>> 28) & 0x3;
|
||||
out[op++] = (in1 >>> 30) & 0x3;
|
||||
}
|
||||
}
|
||||
export function fastUnpack256_3(inValues, inPos, out, outPos) {
|
||||
let op = outPos;
|
||||
let ip = inPos;
|
||||
for (let c = 0; c < 8; c++) {
|
||||
const in0 = inValues[ip++] >>> 0;
|
||||
const in1 = inValues[ip++] >>> 0;
|
||||
const in2 = inValues[ip++] >>> 0;
|
||||
out[op++] = (in0 >>> 0) & 0x7;
|
||||
out[op++] = (in0 >>> 3) & 0x7;
|
||||
out[op++] = (in0 >>> 6) & 0x7;
|
||||
out[op++] = (in0 >>> 9) & 0x7;
|
||||
out[op++] = (in0 >>> 12) & 0x7;
|
||||
out[op++] = (in0 >>> 15) & 0x7;
|
||||
out[op++] = (in0 >>> 18) & 0x7;
|
||||
out[op++] = (in0 >>> 21) & 0x7;
|
||||
out[op++] = (in0 >>> 24) & 0x7;
|
||||
out[op++] = (in0 >>> 27) & 0x7;
|
||||
out[op++] = ((in0 >>> 30) | ((in1 & 0x1) << 2)) & 0x7;
|
||||
out[op++] = (in1 >>> 1) & 0x7;
|
||||
out[op++] = (in1 >>> 4) & 0x7;
|
||||
out[op++] = (in1 >>> 7) & 0x7;
|
||||
out[op++] = (in1 >>> 10) & 0x7;
|
||||
out[op++] = (in1 >>> 13) & 0x7;
|
||||
out[op++] = (in1 >>> 16) & 0x7;
|
||||
out[op++] = (in1 >>> 19) & 0x7;
|
||||
out[op++] = (in1 >>> 22) & 0x7;
|
||||
out[op++] = (in1 >>> 25) & 0x7;
|
||||
out[op++] = (in1 >>> 28) & 0x7;
|
||||
out[op++] = ((in1 >>> 31) | ((in2 & 0x3) << 1)) & 0x7;
|
||||
out[op++] = (in2 >>> 2) & 0x7;
|
||||
out[op++] = (in2 >>> 5) & 0x7;
|
||||
out[op++] = (in2 >>> 8) & 0x7;
|
||||
out[op++] = (in2 >>> 11) & 0x7;
|
||||
out[op++] = (in2 >>> 14) & 0x7;
|
||||
out[op++] = (in2 >>> 17) & 0x7;
|
||||
out[op++] = (in2 >>> 20) & 0x7;
|
||||
out[op++] = (in2 >>> 23) & 0x7;
|
||||
out[op++] = (in2 >>> 26) & 0x7;
|
||||
out[op++] = (in2 >>> 29) & 0x7;
|
||||
}
|
||||
}
|
||||
export function fastUnpack256_4(inValues, inPos, out, outPos) {
|
||||
let op = outPos;
|
||||
let ip = inPos;
|
||||
for (let c = 0; c < 8; c++) {
|
||||
const in0 = inValues[ip++] >>> 0;
|
||||
const in1 = inValues[ip++] >>> 0;
|
||||
const in2 = inValues[ip++] >>> 0;
|
||||
const in3 = inValues[ip++] >>> 0;
|
||||
out[op++] = (in0 >>> 0) & 0xf;
|
||||
out[op++] = (in0 >>> 4) & 0xf;
|
||||
out[op++] = (in0 >>> 8) & 0xf;
|
||||
out[op++] = (in0 >>> 12) & 0xf;
|
||||
out[op++] = (in0 >>> 16) & 0xf;
|
||||
out[op++] = (in0 >>> 20) & 0xf;
|
||||
out[op++] = (in0 >>> 24) & 0xf;
|
||||
out[op++] = (in0 >>> 28) & 0xf;
|
||||
out[op++] = (in1 >>> 0) & 0xf;
|
||||
out[op++] = (in1 >>> 4) & 0xf;
|
||||
out[op++] = (in1 >>> 8) & 0xf;
|
||||
out[op++] = (in1 >>> 12) & 0xf;
|
||||
out[op++] = (in1 >>> 16) & 0xf;
|
||||
out[op++] = (in1 >>> 20) & 0xf;
|
||||
out[op++] = (in1 >>> 24) & 0xf;
|
||||
out[op++] = (in1 >>> 28) & 0xf;
|
||||
out[op++] = (in2 >>> 0) & 0xf;
|
||||
out[op++] = (in2 >>> 4) & 0xf;
|
||||
out[op++] = (in2 >>> 8) & 0xf;
|
||||
out[op++] = (in2 >>> 12) & 0xf;
|
||||
out[op++] = (in2 >>> 16) & 0xf;
|
||||
out[op++] = (in2 >>> 20) & 0xf;
|
||||
out[op++] = (in2 >>> 24) & 0xf;
|
||||
out[op++] = (in2 >>> 28) & 0xf;
|
||||
out[op++] = (in3 >>> 0) & 0xf;
|
||||
out[op++] = (in3 >>> 4) & 0xf;
|
||||
out[op++] = (in3 >>> 8) & 0xf;
|
||||
out[op++] = (in3 >>> 12) & 0xf;
|
||||
out[op++] = (in3 >>> 16) & 0xf;
|
||||
out[op++] = (in3 >>> 20) & 0xf;
|
||||
out[op++] = (in3 >>> 24) & 0xf;
|
||||
out[op++] = (in3 >>> 28) & 0xf;
|
||||
}
|
||||
}
|
||||
export function fastUnpack256_5(inValues, inPos, out, outPos) {
|
||||
let op = outPos;
|
||||
let ip = inPos;
|
||||
for (let c = 0; c < 8; c++) {
|
||||
const in0 = inValues[ip++] >>> 0;
|
||||
const in1 = inValues[ip++] >>> 0;
|
||||
const in2 = inValues[ip++] >>> 0;
|
||||
const in3 = inValues[ip++] >>> 0;
|
||||
const in4 = inValues[ip++] >>> 0;
|
||||
out[op++] = (in0 >>> 0) & 0x1f;
|
||||
out[op++] = (in0 >>> 5) & 0x1f;
|
||||
out[op++] = (in0 >>> 10) & 0x1f;
|
||||
out[op++] = (in0 >>> 15) & 0x1f;
|
||||
out[op++] = (in0 >>> 20) & 0x1f;
|
||||
out[op++] = (in0 >>> 25) & 0x1f;
|
||||
out[op++] = ((in0 >>> 30) | ((in1 & 0x7) << 2)) & 0x1f;
|
||||
out[op++] = (in1 >>> 3) & 0x1f;
|
||||
out[op++] = (in1 >>> 8) & 0x1f;
|
||||
out[op++] = (in1 >>> 13) & 0x1f;
|
||||
out[op++] = (in1 >>> 18) & 0x1f;
|
||||
out[op++] = (in1 >>> 23) & 0x1f;
|
||||
out[op++] = ((in1 >>> 28) | ((in2 & 0x1) << 4)) & 0x1f;
|
||||
out[op++] = (in2 >>> 1) & 0x1f;
|
||||
out[op++] = (in2 >>> 6) & 0x1f;
|
||||
out[op++] = (in2 >>> 11) & 0x1f;
|
||||
out[op++] = (in2 >>> 16) & 0x1f;
|
||||
out[op++] = (in2 >>> 21) & 0x1f;
|
||||
out[op++] = (in2 >>> 26) & 0x1f;
|
||||
out[op++] = ((in2 >>> 31) | ((in3 & 0xf) << 1)) & 0x1f;
|
||||
out[op++] = (in3 >>> 4) & 0x1f;
|
||||
out[op++] = (in3 >>> 9) & 0x1f;
|
||||
out[op++] = (in3 >>> 14) & 0x1f;
|
||||
out[op++] = (in3 >>> 19) & 0x1f;
|
||||
out[op++] = (in3 >>> 24) & 0x1f;
|
||||
out[op++] = ((in3 >>> 29) | ((in4 & 0x3) << 3)) & 0x1f;
|
||||
out[op++] = (in4 >>> 2) & 0x1f;
|
||||
out[op++] = (in4 >>> 7) & 0x1f;
|
||||
out[op++] = (in4 >>> 12) & 0x1f;
|
||||
out[op++] = (in4 >>> 17) & 0x1f;
|
||||
out[op++] = (in4 >>> 22) & 0x1f;
|
||||
out[op++] = (in4 >>> 27) & 0x1f;
|
||||
}
|
||||
}
|
||||
export function fastUnpack256_6(inValues, inPos, out, outPos) {
|
||||
let op = outPos;
|
||||
let ip = inPos;
|
||||
for (let c = 0; c < 8; c++) {
|
||||
const in0 = inValues[ip++] >>> 0;
|
||||
const in1 = inValues[ip++] >>> 0;
|
||||
const in2 = inValues[ip++] >>> 0;
|
||||
const in3 = inValues[ip++] >>> 0;
|
||||
const in4 = inValues[ip++] >>> 0;
|
||||
const in5 = inValues[ip++] >>> 0;
|
||||
out[op++] = (in0 >>> 0) & 0x3f;
|
||||
out[op++] = (in0 >>> 6) & 0x3f;
|
||||
out[op++] = (in0 >>> 12) & 0x3f;
|
||||
out[op++] = (in0 >>> 18) & 0x3f;
|
||||
out[op++] = (in0 >>> 24) & 0x3f;
|
||||
out[op++] = ((in0 >>> 30) | ((in1 & 0xf) << 2)) & 0x3f;
|
||||
out[op++] = (in1 >>> 4) & 0x3f;
|
||||
out[op++] = (in1 >>> 10) & 0x3f;
|
||||
out[op++] = (in1 >>> 16) & 0x3f;
|
||||
out[op++] = (in1 >>> 22) & 0x3f;
|
||||
out[op++] = ((in1 >>> 28) | ((in2 & 0x3) << 4)) & 0x3f;
|
||||
out[op++] = (in2 >>> 2) & 0x3f;
|
||||
out[op++] = (in2 >>> 8) & 0x3f;
|
||||
out[op++] = (in2 >>> 14) & 0x3f;
|
||||
out[op++] = (in2 >>> 20) & 0x3f;
|
||||
out[op++] = (in2 >>> 26) & 0x3f;
|
||||
out[op++] = (in3 >>> 0) & 0x3f;
|
||||
out[op++] = (in3 >>> 6) & 0x3f;
|
||||
out[op++] = (in3 >>> 12) & 0x3f;
|
||||
out[op++] = (in3 >>> 18) & 0x3f;
|
||||
out[op++] = (in3 >>> 24) & 0x3f;
|
||||
out[op++] = ((in3 >>> 30) | ((in4 & 0xf) << 2)) & 0x3f;
|
||||
out[op++] = (in4 >>> 4) & 0x3f;
|
||||
out[op++] = (in4 >>> 10) & 0x3f;
|
||||
out[op++] = (in4 >>> 16) & 0x3f;
|
||||
out[op++] = (in4 >>> 22) & 0x3f;
|
||||
out[op++] = ((in4 >>> 28) | ((in5 & 0x3) << 4)) & 0x3f;
|
||||
out[op++] = (in5 >>> 2) & 0x3f;
|
||||
out[op++] = (in5 >>> 8) & 0x3f;
|
||||
out[op++] = (in5 >>> 14) & 0x3f;
|
||||
out[op++] = (in5 >>> 20) & 0x3f;
|
||||
out[op++] = (in5 >>> 26) & 0x3f;
|
||||
}
|
||||
}
|
||||
export function fastUnpack256_7(inValues, inPos, out, outPos) {
|
||||
let op = outPos;
|
||||
let ip = inPos;
|
||||
for (let c = 0; c < 8; c++) {
|
||||
const in0 = inValues[ip++] >>> 0;
|
||||
const in1 = inValues[ip++] >>> 0;
|
||||
const in2 = inValues[ip++] >>> 0;
|
||||
const in3 = inValues[ip++] >>> 0;
|
||||
const in4 = inValues[ip++] >>> 0;
|
||||
const in5 = inValues[ip++] >>> 0;
|
||||
const in6 = inValues[ip++] >>> 0;
|
||||
out[op++] = (in0 >>> 0) & 0x7f;
|
||||
out[op++] = (in0 >>> 7) & 0x7f;
|
||||
out[op++] = (in0 >>> 14) & 0x7f;
|
||||
out[op++] = (in0 >>> 21) & 0x7f;
|
||||
out[op++] = ((in0 >>> 28) | ((in1 & 0x7) << 4)) & 0x7f;
|
||||
out[op++] = (in1 >>> 3) & 0x7f;
|
||||
out[op++] = (in1 >>> 10) & 0x7f;
|
||||
out[op++] = (in1 >>> 17) & 0x7f;
|
||||
out[op++] = (in1 >>> 24) & 0x7f;
|
||||
out[op++] = ((in1 >>> 31) | ((in2 & 0x3f) << 1)) & 0x7f;
|
||||
out[op++] = (in2 >>> 6) & 0x7f;
|
||||
out[op++] = (in2 >>> 13) & 0x7f;
|
||||
out[op++] = (in2 >>> 20) & 0x7f;
|
||||
out[op++] = ((in2 >>> 27) | ((in3 & 0x3) << 5)) & 0x7f;
|
||||
out[op++] = (in3 >>> 2) & 0x7f;
|
||||
out[op++] = (in3 >>> 9) & 0x7f;
|
||||
out[op++] = (in3 >>> 16) & 0x7f;
|
||||
out[op++] = (in3 >>> 23) & 0x7f;
|
||||
out[op++] = ((in3 >>> 30) | ((in4 & 0x1f) << 2)) & 0x7f;
|
||||
out[op++] = (in4 >>> 5) & 0x7f;
|
||||
out[op++] = (in4 >>> 12) & 0x7f;
|
||||
out[op++] = (in4 >>> 19) & 0x7f;
|
||||
out[op++] = ((in4 >>> 26) | ((in5 & 0x1) << 6)) & 0x7f;
|
||||
out[op++] = (in5 >>> 1) & 0x7f;
|
||||
out[op++] = (in5 >>> 8) & 0x7f;
|
||||
out[op++] = (in5 >>> 15) & 0x7f;
|
||||
out[op++] = (in5 >>> 22) & 0x7f;
|
||||
out[op++] = ((in5 >>> 29) | ((in6 & 0xf) << 3)) & 0x7f;
|
||||
out[op++] = (in6 >>> 4) & 0x7f;
|
||||
out[op++] = (in6 >>> 11) & 0x7f;
|
||||
out[op++] = (in6 >>> 18) & 0x7f;
|
||||
out[op++] = (in6 >>> 25) & 0x7f;
|
||||
}
|
||||
}
|
||||
export function fastUnpack256_8(inValues, inPos, out, outPos) {
|
||||
let op = outPos;
|
||||
let ip = inPos;
|
||||
for (let c = 0; c < 8; c++) {
|
||||
const in0 = inValues[ip++] >>> 0;
|
||||
const in1 = inValues[ip++] >>> 0;
|
||||
const in2 = inValues[ip++] >>> 0;
|
||||
const in3 = inValues[ip++] >>> 0;
|
||||
const in4 = inValues[ip++] >>> 0;
|
||||
const in5 = inValues[ip++] >>> 0;
|
||||
const in6 = inValues[ip++] >>> 0;
|
||||
const in7 = inValues[ip++] >>> 0;
|
||||
out[op++] = (in0 >>> 0) & 0xff;
|
||||
out[op++] = (in0 >>> 8) & 0xff;
|
||||
out[op++] = (in0 >>> 16) & 0xff;
|
||||
out[op++] = (in0 >>> 24) & 0xff;
|
||||
out[op++] = (in1 >>> 0) & 0xff;
|
||||
out[op++] = (in1 >>> 8) & 0xff;
|
||||
out[op++] = (in1 >>> 16) & 0xff;
|
||||
out[op++] = (in1 >>> 24) & 0xff;
|
||||
out[op++] = (in2 >>> 0) & 0xff;
|
||||
out[op++] = (in2 >>> 8) & 0xff;
|
||||
out[op++] = (in2 >>> 16) & 0xff;
|
||||
out[op++] = (in2 >>> 24) & 0xff;
|
||||
out[op++] = (in3 >>> 0) & 0xff;
|
||||
out[op++] = (in3 >>> 8) & 0xff;
|
||||
out[op++] = (in3 >>> 16) & 0xff;
|
||||
out[op++] = (in3 >>> 24) & 0xff;
|
||||
out[op++] = (in4 >>> 0) & 0xff;
|
||||
out[op++] = (in4 >>> 8) & 0xff;
|
||||
out[op++] = (in4 >>> 16) & 0xff;
|
||||
out[op++] = (in4 >>> 24) & 0xff;
|
||||
out[op++] = (in5 >>> 0) & 0xff;
|
||||
out[op++] = (in5 >>> 8) & 0xff;
|
||||
out[op++] = (in5 >>> 16) & 0xff;
|
||||
out[op++] = (in5 >>> 24) & 0xff;
|
||||
out[op++] = (in6 >>> 0) & 0xff;
|
||||
out[op++] = (in6 >>> 8) & 0xff;
|
||||
out[op++] = (in6 >>> 16) & 0xff;
|
||||
out[op++] = (in6 >>> 24) & 0xff;
|
||||
out[op++] = (in7 >>> 0) & 0xff;
|
||||
out[op++] = (in7 >>> 8) & 0xff;
|
||||
out[op++] = (in7 >>> 16) & 0xff;
|
||||
out[op++] = (in7 >>> 24) & 0xff;
|
||||
}
|
||||
}
|
||||
export function fastUnpack256_16(inValues, inPos, out, outPos) {
|
||||
let op = outPos;
|
||||
let ip = inPos;
|
||||
for (let i = 0; i < 128; i++) {
|
||||
const in0 = inValues[ip++] >>> 0;
|
||||
out[op++] = in0 & 0xffff;
|
||||
out[op++] = (in0 >>> 16) & 0xffff;
|
||||
}
|
||||
}
|
||||
export function fastUnpack256_Generic(inValues, inPos, out, outPos, bitWidth) {
|
||||
const mask = MASKS[bitWidth] >>> 0;
|
||||
let inputWordIndex = inPos;
|
||||
let bitOffset = 0;
|
||||
let currentWord = inValues[inputWordIndex] >>> 0;
|
||||
let op = outPos;
|
||||
for (let c = 0; c < 8; c++) {
|
||||
for (let i = 0; i < 32; i++) {
|
||||
if (bitOffset + bitWidth <= 32) {
|
||||
const value = (currentWord >>> bitOffset) & mask;
|
||||
out[op + i] = value | 0;
|
||||
bitOffset += bitWidth;
|
||||
if (bitOffset === 32) {
|
||||
bitOffset = 0;
|
||||
inputWordIndex++;
|
||||
if (i !== 31) {
|
||||
currentWord = inValues[inputWordIndex] >>> 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
const lowBits = 32 - bitOffset;
|
||||
const low = currentWord >>> bitOffset;
|
||||
inputWordIndex++;
|
||||
currentWord = inValues[inputWordIndex] >>> 0;
|
||||
const highBits = bitWidth - lowBits;
|
||||
const highMask = (-1 >>> (32 - highBits)) >>> 0;
|
||||
const high = currentWord & highMask;
|
||||
const value = (low | (high << lowBits)) & mask;
|
||||
out[op + i] = value | 0;
|
||||
bitOffset = highBits;
|
||||
}
|
||||
}
|
||||
op += 32;
|
||||
bitOffset = 0;
|
||||
if (c < 7) {
|
||||
currentWord = inValues[inputWordIndex] >>> 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=fastPforUnpack.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/decoding/fastPforUnpack.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/decoding/fastPforUnpack.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
9
node_modules/@maplibre/mlt/dist/decoding/fsstDecoder.d.ts
generated
vendored
Normal file
9
node_modules/@maplibre/mlt/dist/decoding/fsstDecoder.d.ts
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
/**
|
||||
* Decode FSST compressed data
|
||||
*
|
||||
* @param symbols Array of symbols, where each symbol can be between 1 and 8 bytes
|
||||
* @param symbolLengths Array of symbol lengths, length of each symbol in symbols array
|
||||
* @param compressedData FSST Compressed data, where each entry is an index to the symbols array
|
||||
* @returns Decoded data as Uint8Array
|
||||
*/
|
||||
export declare function decodeFsst(symbols: Uint8Array, symbolLengths: Uint32Array, compressedData: Uint8Array): Uint8Array;
|
||||
31
node_modules/@maplibre/mlt/dist/decoding/fsstDecoder.js
generated
vendored
Normal file
31
node_modules/@maplibre/mlt/dist/decoding/fsstDecoder.js
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
/**
|
||||
* Decode FSST compressed data
|
||||
*
|
||||
* @param symbols Array of symbols, where each symbol can be between 1 and 8 bytes
|
||||
* @param symbolLengths Array of symbol lengths, length of each symbol in symbols array
|
||||
* @param compressedData FSST Compressed data, where each entry is an index to the symbols array
|
||||
* @returns Decoded data as Uint8Array
|
||||
*/
|
||||
//TODO: improve -> quick and dirty implementation
|
||||
export function decodeFsst(symbols, symbolLengths, compressedData) {
|
||||
//TODO: use typed array directly
|
||||
const decodedData = [];
|
||||
const symbolOffsets = new Array(symbolLengths.length).fill(0);
|
||||
for (let i = 1; i < symbolLengths.length; i++) {
|
||||
symbolOffsets[i] = symbolOffsets[i - 1] + symbolLengths[i - 1];
|
||||
}
|
||||
for (let i = 0; i < compressedData.length; i++) {
|
||||
if (compressedData[i] === 255) {
|
||||
decodedData.push(compressedData[++i]);
|
||||
}
|
||||
else {
|
||||
const symbolLength = symbolLengths[compressedData[i]];
|
||||
const symbolOffset = symbolOffsets[compressedData[i]];
|
||||
for (let j = 0; j < symbolLength; j++) {
|
||||
decodedData.push(symbols[symbolOffset + j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
return new Uint8Array(decodedData);
|
||||
}
|
||||
//# sourceMappingURL=fsstDecoder.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/decoding/fsstDecoder.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/decoding/fsstDecoder.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"fsstDecoder.js","sourceRoot":"","sources":["../../src/decoding/fsstDecoder.ts"],"names":[],"mappings":"AAAA;;;;;;;GAOG;AACH,iDAAiD;AACjD,MAAM,UAAU,UAAU,CAAC,OAAmB,EAAE,aAA0B,EAAE,cAA0B;IAClG,gCAAgC;IAChC,MAAM,WAAW,GAAa,EAAE,CAAC;IACjC,MAAM,aAAa,GAAa,IAAI,KAAK,CAAC,aAAa,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;IAExE,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,aAAa,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;QAC5C,aAAa,CAAC,CAAC,CAAC,GAAG,aAAa,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,aAAa,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC;IACnE,CAAC;IAED,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,cAAc,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;QAC7C,IAAI,cAAc,CAAC,CAAC,CAAC,KAAK,GAAG,EAAE,CAAC;YAC5B,WAAW,CAAC,IAAI,CAAC,cAAc,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;QAC1C,CAAC;aAAM,CAAC;YACJ,MAAM,YAAY,GAAG,aAAa,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC;YACtD,MAAM,YAAY,GAAG,aAAa,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC;YACtD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,YAAY,EAAE,CAAC,EAAE,EAAE,CAAC;gBACpC,WAAW,CAAC,IAAI,CAAC,OAAO,CAAC,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC;YAChD,CAAC;QACL,CAAC;IACL,CAAC;IACD,OAAO,IAAI,UAAU,CAAC,WAAW,CAAC,CAAC;AACvC,CAAC","sourcesContent":["/**\n * Decode FSST compressed data\n *\n * @param symbols Array of symbols, where each symbol can be between 1 and 8 bytes\n * @param symbolLengths Array of symbol lengths, length of each symbol in symbols array\n * @param compressedData FSST Compressed data, where each entry is an index to the symbols array\n * @returns Decoded data as Uint8Array\n */\n//TODO: improve -> quick and dirty implementation\nexport function decodeFsst(symbols: Uint8Array, symbolLengths: Uint32Array, compressedData: Uint8Array): Uint8Array {\n //TODO: use typed array directly\n const decodedData: number[] = [];\n const symbolOffsets: number[] = new Array(symbolLengths.length).fill(0);\n\n for (let i = 1; i < symbolLengths.length; i++) {\n symbolOffsets[i] = symbolOffsets[i - 1] + symbolLengths[i - 1];\n }\n\n for (let i = 0; i < compressedData.length; i++) {\n if (compressedData[i] === 255) {\n decodedData.push(compressedData[++i]);\n } else {\n const symbolLength = symbolLengths[compressedData[i]];\n const symbolOffset = symbolOffsets[compressedData[i]];\n for (let j = 0; j < symbolLength; j++) {\n decodedData.push(symbols[symbolOffset + j]);\n }\n }\n }\n return new Uint8Array(decodedData);\n}\n"]}
|
||||
5
node_modules/@maplibre/mlt/dist/decoding/geometryDecoder.d.ts
generated
vendored
Normal file
5
node_modules/@maplibre/mlt/dist/decoding/geometryDecoder.d.ts
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
import type IntWrapper from "./intWrapper";
|
||||
import type { GeometryVector } from "../vector/geometry/geometryVector";
|
||||
import type { GpuVector } from "../vector/geometry/gpuVector";
|
||||
import type GeometryScaling from "./geometryScaling";
|
||||
export declare function decodeGeometryColumn(tile: Uint8Array, numStreams: number, offset: IntWrapper, numFeatures: number, scalingData?: GeometryScaling): GeometryVector | GpuVector;
|
||||
289
node_modules/@maplibre/mlt/dist/decoding/geometryDecoder.js
generated
vendored
Normal file
289
node_modules/@maplibre/mlt/dist/decoding/geometryDecoder.js
generated
vendored
Normal file
@@ -0,0 +1,289 @@
|
||||
import { decodeStreamMetadata } from "../metadata/tile/streamMetadataDecoder";
|
||||
import { decodeSignedInt32Stream, decodeLengthStreamToOffsetBuffer, decodeUnsignedConstInt32Stream, decodeUnsignedInt32Stream, getVectorType, } from "./integerStreamDecoder";
|
||||
import { VectorType } from "../vector/vectorType";
|
||||
import { PhysicalStreamType } from "../metadata/tile/physicalStreamType";
|
||||
import { LengthType } from "../metadata/tile/lengthType";
|
||||
import { DictionaryType } from "../metadata/tile/dictionaryType";
|
||||
import { createConstGeometryVector, createMortonEncodedConstGeometryVector, } from "../vector/geometry/constGeometryVector";
|
||||
import { createFlatGeometryVector, createFlatGeometryVectorMortonEncoded } from "../vector/geometry/flatGeometryVector";
|
||||
import { OffsetType } from "../metadata/tile/offsetType";
|
||||
import { createConstGpuVector } from "../vector/geometry/constGpuVector";
|
||||
import { createFlatGpuVector } from "../vector/geometry/flatGpuVector";
|
||||
// TODO: get rid of numFeatures parameter
|
||||
export function decodeGeometryColumn(tile, numStreams, offset, numFeatures, scalingData) {
|
||||
const geometryTypeMetadata = decodeStreamMetadata(tile, offset);
|
||||
const geometryTypesVectorType = getVectorType(geometryTypeMetadata, numFeatures, tile, offset);
|
||||
let vertexOffsets;
|
||||
let vertexBuffer;
|
||||
let mortonSettings;
|
||||
let indexBuffer;
|
||||
if (geometryTypesVectorType === VectorType.CONST) {
|
||||
/* All geometries in the column have the same geometry type */
|
||||
const geometryType = decodeUnsignedConstInt32Stream(tile, offset, geometryTypeMetadata);
|
||||
// Variables for const geometry path (directly decoded as offsets)
|
||||
let geometryOffsets;
|
||||
let partOffsets;
|
||||
let ringOffsets;
|
||||
//TODO: use geometryOffsets for that? -> but then tessellated polygons can't be used with normal polygons
|
||||
// in one FeatureTable?
|
||||
let triangleOffsets;
|
||||
for (let i = 0; i < numStreams - 1; i++) {
|
||||
const geometryStreamMetadata = decodeStreamMetadata(tile, offset);
|
||||
switch (geometryStreamMetadata.physicalStreamType) {
|
||||
case PhysicalStreamType.LENGTH:
|
||||
switch (geometryStreamMetadata.logicalStreamType.lengthType) {
|
||||
case LengthType.GEOMETRIES:
|
||||
geometryOffsets = decodeLengthStreamToOffsetBuffer(tile, offset, geometryStreamMetadata);
|
||||
break;
|
||||
case LengthType.PARTS:
|
||||
partOffsets = decodeLengthStreamToOffsetBuffer(tile, offset, geometryStreamMetadata);
|
||||
break;
|
||||
case LengthType.RINGS:
|
||||
ringOffsets = decodeLengthStreamToOffsetBuffer(tile, offset, geometryStreamMetadata);
|
||||
break;
|
||||
case LengthType.TRIANGLES:
|
||||
triangleOffsets = decodeLengthStreamToOffsetBuffer(tile, offset, geometryStreamMetadata);
|
||||
}
|
||||
break;
|
||||
case PhysicalStreamType.OFFSET: {
|
||||
switch (geometryStreamMetadata.logicalStreamType.offsetType) {
|
||||
case OffsetType.VERTEX:
|
||||
vertexOffsets = decodeUnsignedInt32Stream(tile, offset, geometryStreamMetadata);
|
||||
break;
|
||||
case OffsetType.INDEX:
|
||||
indexBuffer = decodeUnsignedInt32Stream(tile, offset, geometryStreamMetadata);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case PhysicalStreamType.DATA: {
|
||||
if (DictionaryType.VERTEX === geometryStreamMetadata.logicalStreamType.dictionaryType) {
|
||||
vertexBuffer = decodeSignedInt32Stream(tile, offset, geometryStreamMetadata, scalingData);
|
||||
}
|
||||
else {
|
||||
const mortonMetadata = geometryStreamMetadata;
|
||||
mortonSettings = {
|
||||
numBits: mortonMetadata.numBits,
|
||||
coordinateShift: mortonMetadata.coordinateShift,
|
||||
};
|
||||
vertexBuffer = decodeUnsignedInt32Stream(tile, offset, geometryStreamMetadata, scalingData);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (indexBuffer) {
|
||||
if (geometryOffsets !== undefined || partOffsets !== undefined) {
|
||||
/* Case when the indices of a Polygon outline are encoded in the tile */
|
||||
const topologyVector = { geometryOffsets, partOffsets, ringOffsets };
|
||||
return createConstGpuVector(numFeatures, geometryType, triangleOffsets, indexBuffer, vertexBuffer, topologyVector);
|
||||
}
|
||||
/* Case when the no Polygon outlines are encoded in the tile */
|
||||
return createConstGpuVector(numFeatures, geometryType, triangleOffsets, indexBuffer, vertexBuffer);
|
||||
}
|
||||
return mortonSettings === undefined
|
||||
? /* Currently only 2D coordinates (Vec2) are implemented in the encoder */
|
||||
createConstGeometryVector(numFeatures, geometryType, { geometryOffsets, partOffsets, ringOffsets }, vertexOffsets, vertexBuffer)
|
||||
: createMortonEncodedConstGeometryVector(numFeatures, geometryType, { geometryOffsets, partOffsets, ringOffsets }, vertexOffsets, vertexBuffer, mortonSettings);
|
||||
}
|
||||
/* Different geometry types are mixed in the geometry column */
|
||||
const geometryTypeVector = decodeUnsignedInt32Stream(tile, offset, geometryTypeMetadata);
|
||||
// Variables for flat geometry path (decoded as lengths, then converted to offsets)
|
||||
let geometryLengths;
|
||||
let partLengths;
|
||||
let ringLengths;
|
||||
//TODO: use geometryOffsets for that? -> but then tessellated polygons can't be used with normal polygons
|
||||
// in one FeatureTable?
|
||||
let triangleOffsets;
|
||||
for (let i = 0; i < numStreams - 1; i++) {
|
||||
const geometryStreamMetadata = decodeStreamMetadata(tile, offset);
|
||||
switch (geometryStreamMetadata.physicalStreamType) {
|
||||
case PhysicalStreamType.LENGTH:
|
||||
switch (geometryStreamMetadata.logicalStreamType.lengthType) {
|
||||
case LengthType.GEOMETRIES:
|
||||
geometryLengths = decodeUnsignedInt32Stream(tile, offset, geometryStreamMetadata);
|
||||
break;
|
||||
case LengthType.PARTS:
|
||||
partLengths = decodeUnsignedInt32Stream(tile, offset, geometryStreamMetadata);
|
||||
break;
|
||||
case LengthType.RINGS:
|
||||
ringLengths = decodeUnsignedInt32Stream(tile, offset, geometryStreamMetadata);
|
||||
break;
|
||||
case LengthType.TRIANGLES:
|
||||
triangleOffsets = decodeLengthStreamToOffsetBuffer(tile, offset, geometryStreamMetadata);
|
||||
}
|
||||
break;
|
||||
case PhysicalStreamType.OFFSET:
|
||||
switch (geometryStreamMetadata.logicalStreamType.offsetType) {
|
||||
case OffsetType.VERTEX:
|
||||
vertexOffsets = decodeUnsignedInt32Stream(tile, offset, geometryStreamMetadata);
|
||||
break;
|
||||
case OffsetType.INDEX:
|
||||
indexBuffer = decodeUnsignedInt32Stream(tile, offset, geometryStreamMetadata);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case PhysicalStreamType.DATA:
|
||||
if (DictionaryType.VERTEX === geometryStreamMetadata.logicalStreamType.dictionaryType) {
|
||||
vertexBuffer = decodeSignedInt32Stream(tile, offset, geometryStreamMetadata, scalingData);
|
||||
}
|
||||
else {
|
||||
const mortonMetadata = geometryStreamMetadata;
|
||||
mortonSettings = {
|
||||
numBits: mortonMetadata.numBits,
|
||||
coordinateShift: mortonMetadata.coordinateShift,
|
||||
};
|
||||
vertexBuffer = decodeUnsignedInt32Stream(tile, offset, geometryStreamMetadata, scalingData);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
// TODO: refactor the following instructions -> decode in one pass for performance reasons
|
||||
/* Calculate the offsets from the length buffer for util access */
|
||||
let geometryOffsets;
|
||||
let partOffsets;
|
||||
let ringOffsets;
|
||||
if (geometryLengths) {
|
||||
geometryOffsets = decodeRootLengthStream(geometryTypeVector, geometryLengths, 2);
|
||||
if (partLengths && ringLengths) {
|
||||
partOffsets = decodeLevel1LengthStream(geometryTypeVector, geometryOffsets, partLengths, false);
|
||||
ringOffsets = decodeLevel2LengthStream(geometryTypeVector, geometryOffsets, partOffsets, ringLengths);
|
||||
}
|
||||
else if (partLengths) {
|
||||
partOffsets = decodeLevel1WithoutRingBufferLengthStream(geometryTypeVector, geometryOffsets, partLengths);
|
||||
}
|
||||
}
|
||||
else if (partLengths && ringLengths) {
|
||||
partOffsets = decodeRootLengthStream(geometryTypeVector, partLengths, 1);
|
||||
ringOffsets = decodeLevel1LengthStream(geometryTypeVector, partOffsets, ringLengths, true);
|
||||
}
|
||||
else if (partLengths) {
|
||||
partOffsets = decodeRootLengthStream(geometryTypeVector, partLengths, 0);
|
||||
}
|
||||
if (indexBuffer && !partOffsets) {
|
||||
/* Case when the indices of a Polygon outline are not encoded in the data so no
|
||||
* topology data are present in the tile */
|
||||
return createFlatGpuVector(geometryTypeVector, triangleOffsets, indexBuffer, vertexBuffer);
|
||||
}
|
||||
if (indexBuffer) {
|
||||
/* Case when the indices of a Polygon outline are encoded in the tile */
|
||||
return createFlatGpuVector(geometryTypeVector, triangleOffsets, indexBuffer, vertexBuffer, {
|
||||
geometryOffsets,
|
||||
partOffsets,
|
||||
ringOffsets,
|
||||
});
|
||||
}
|
||||
return mortonSettings === undefined /* Currently only 2D coordinates (Vec2) are implemented in the encoder */
|
||||
? createFlatGeometryVector(geometryTypeVector, { geometryOffsets, partOffsets, ringOffsets }, vertexOffsets, vertexBuffer)
|
||||
: createFlatGeometryVectorMortonEncoded(geometryTypeVector, { geometryOffsets, partOffsets, ringOffsets }, vertexOffsets, vertexBuffer, mortonSettings);
|
||||
}
|
||||
/*
|
||||
* Handle the parsing of the different topology length buffers separate not generic to reduce the
|
||||
* branching and improve the performance
|
||||
*/
|
||||
function decodeRootLengthStream(geometryTypes, rootLengthStream, bufferId) {
|
||||
const rootBufferOffsets = new Uint32Array(geometryTypes.length + 1);
|
||||
let previousOffset = 0;
|
||||
rootBufferOffsets[0] = previousOffset;
|
||||
let rootLengthCounter = 0;
|
||||
for (let i = 0; i < geometryTypes.length; i++) {
|
||||
/* Test if the geometry has and entry in the root buffer
|
||||
* BufferId: 2 GeometryOffsets -> MultiPolygon, MultiLineString, MultiPoint
|
||||
* BufferId: 1 PartOffsets -> Polygon
|
||||
* BufferId: 0 PartOffsets, RingOffsets -> LineString
|
||||
* */
|
||||
previousOffset = rootBufferOffsets[i + 1] =
|
||||
previousOffset + (geometryTypes[i] > bufferId ? rootLengthStream[rootLengthCounter++] : 1);
|
||||
}
|
||||
return rootBufferOffsets;
|
||||
}
|
||||
function decodeLevel1LengthStream(geometryTypes, rootOffsetBuffer, level1LengthBuffer, isLineStringPresent) {
|
||||
const level1BufferOffsets = new Uint32Array(rootOffsetBuffer[rootOffsetBuffer.length - 1] + 1);
|
||||
let previousOffset = 0;
|
||||
level1BufferOffsets[0] = previousOffset;
|
||||
let level1BufferCounter = 1;
|
||||
let level1LengthBufferCounter = 0;
|
||||
for (let i = 0; i < geometryTypes.length; i++) {
|
||||
const geometryType = geometryTypes[i];
|
||||
const numGeometries = rootOffsetBuffer[i + 1] - rootOffsetBuffer[i];
|
||||
if (geometryType === 5 ||
|
||||
geometryType === 2 ||
|
||||
(isLineStringPresent && (geometryType === 4 || geometryType === 1))) {
|
||||
/* For MultiPolygon, Polygon and in some cases for MultiLineString and LineString
|
||||
* a value in the level1LengthBuffer exists */
|
||||
for (let j = 0; j < numGeometries; j++) {
|
||||
previousOffset = level1BufferOffsets[level1BufferCounter++] =
|
||||
previousOffset + level1LengthBuffer[level1LengthBufferCounter++];
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* For MultiPoint and Point and in some cases for MultiLineString and LineString no value in the
|
||||
* level1LengthBuffer exists */
|
||||
for (let j = 0; j < numGeometries; j++) {
|
||||
level1BufferOffsets[level1BufferCounter++] = ++previousOffset;
|
||||
}
|
||||
}
|
||||
}
|
||||
return level1BufferOffsets;
|
||||
}
|
||||
/*
|
||||
* Case where no ring buffer exists so no MultiPolygon or Polygon geometry is part of the buffer
|
||||
*/
|
||||
function decodeLevel1WithoutRingBufferLengthStream(geometryTypes, rootOffsetBuffer, level1LengthBuffer) {
|
||||
const level1BufferOffsets = new Uint32Array(rootOffsetBuffer[rootOffsetBuffer.length - 1] + 1);
|
||||
let previousOffset = 0;
|
||||
level1BufferOffsets[0] = previousOffset;
|
||||
let level1OffsetBufferCounter = 1;
|
||||
let level1LengthCounter = 0;
|
||||
for (let i = 0; i < geometryTypes.length; i++) {
|
||||
const geometryType = geometryTypes[i];
|
||||
const numGeometries = rootOffsetBuffer[i + 1] - rootOffsetBuffer[i];
|
||||
if (geometryType === 4 || geometryType === 1) {
|
||||
/* For MultiLineString and LineString a value in the level1LengthBuffer exists */
|
||||
for (let j = 0; j < numGeometries; j++) {
|
||||
previousOffset = level1BufferOffsets[level1OffsetBufferCounter++] =
|
||||
previousOffset + level1LengthBuffer[level1LengthCounter++];
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* For MultiPoint and Point no value in level1LengthBuffer exists */
|
||||
for (let j = 0; j < numGeometries; j++) {
|
||||
level1BufferOffsets[level1OffsetBufferCounter++] = ++previousOffset;
|
||||
}
|
||||
}
|
||||
}
|
||||
return level1BufferOffsets;
|
||||
}
|
||||
function decodeLevel2LengthStream(geometryTypes, rootOffsetBuffer, level1OffsetBuffer, level2LengthBuffer) {
|
||||
const level2BufferOffsets = new Uint32Array(level1OffsetBuffer[level1OffsetBuffer.length - 1] + 1);
|
||||
let previousOffset = 0;
|
||||
level2BufferOffsets[0] = previousOffset;
|
||||
let level1OffsetBufferCounter = 1;
|
||||
let level2OffsetBufferCounter = 1;
|
||||
let level2LengthBufferCounter = 0;
|
||||
for (let i = 0; i < geometryTypes.length; i++) {
|
||||
const geometryType = geometryTypes[i];
|
||||
const numGeometries = rootOffsetBuffer[i + 1] - rootOffsetBuffer[i];
|
||||
if (geometryType !== 0 && geometryType !== 3) {
|
||||
/* For MultiPolygon, MultiLineString, Polygon and LineString a value in level2LengthBuffer
|
||||
* exists */
|
||||
for (let j = 0; j < numGeometries; j++) {
|
||||
const numParts = level1OffsetBuffer[level1OffsetBufferCounter] - level1OffsetBuffer[level1OffsetBufferCounter - 1];
|
||||
level1OffsetBufferCounter++;
|
||||
for (let k = 0; k < numParts; k++) {
|
||||
previousOffset = level2BufferOffsets[level2OffsetBufferCounter++] =
|
||||
previousOffset + level2LengthBuffer[level2LengthBufferCounter++];
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* For MultiPoint and Point no value in level2LengthBuffer exists */
|
||||
for (let j = 0; j < numGeometries; j++) {
|
||||
level2BufferOffsets[level2OffsetBufferCounter++] = ++previousOffset;
|
||||
level1OffsetBufferCounter++;
|
||||
}
|
||||
}
|
||||
}
|
||||
return level2BufferOffsets;
|
||||
}
|
||||
//# sourceMappingURL=geometryDecoder.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/decoding/geometryDecoder.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/decoding/geometryDecoder.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
6
node_modules/@maplibre/mlt/dist/decoding/geometryScaling.d.ts
generated
vendored
Normal file
6
node_modules/@maplibre/mlt/dist/decoding/geometryScaling.d.ts
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
export default interface GeometryScaling {
|
||||
extent: number;
|
||||
min: number;
|
||||
max: number;
|
||||
scale?: number;
|
||||
}
|
||||
2
node_modules/@maplibre/mlt/dist/decoding/geometryScaling.js
generated
vendored
Normal file
2
node_modules/@maplibre/mlt/dist/decoding/geometryScaling.js
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
export {};
|
||||
//# sourceMappingURL=geometryScaling.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/decoding/geometryScaling.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/decoding/geometryScaling.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"geometryScaling.js","sourceRoot":"","sources":["../../src/decoding/geometryScaling.ts"],"names":[],"mappings":"","sourcesContent":["export default interface GeometryScaling {\n extent: number;\n min: number;\n max: number;\n scale?: number;\n}\n"]}
|
||||
8
node_modules/@maplibre/mlt/dist/decoding/intWrapper.d.ts
generated
vendored
Normal file
8
node_modules/@maplibre/mlt/dist/decoding/intWrapper.d.ts
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
export default class IntWrapper {
|
||||
private value;
|
||||
constructor(value: number);
|
||||
get(): number;
|
||||
set(v: number): void;
|
||||
increment(): number;
|
||||
add(v: number): void;
|
||||
}
|
||||
19
node_modules/@maplibre/mlt/dist/decoding/intWrapper.js
generated
vendored
Normal file
19
node_modules/@maplibre/mlt/dist/decoding/intWrapper.js
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
// Ported from https://github.com/lemire/JavaFastPFOR/blob/master/src/main/java/me/lemire/integercompression/IntWrapper.java
|
||||
export default class IntWrapper {
|
||||
constructor(value) {
|
||||
this.value = value;
|
||||
}
|
||||
get() {
|
||||
return this.value;
|
||||
}
|
||||
set(v) {
|
||||
this.value = v;
|
||||
}
|
||||
increment() {
|
||||
return this.value++;
|
||||
}
|
||||
add(v) {
|
||||
this.value += v;
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=intWrapper.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/decoding/intWrapper.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/decoding/intWrapper.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"intWrapper.js","sourceRoot":"","sources":["../../src/decoding/intWrapper.ts"],"names":[],"mappings":"AAAA,4HAA4H;AAE5H,MAAM,CAAC,OAAO,OAAO,UAAU;IAC3B,YAAoB,KAAa;QAAb,UAAK,GAAL,KAAK,CAAQ;IAAG,CAAC;IAE9B,GAAG;QACN,OAAO,IAAI,CAAC,KAAK,CAAC;IACtB,CAAC;IAEM,GAAG,CAAC,CAAS;QAChB,IAAI,CAAC,KAAK,GAAG,CAAC,CAAC;IACnB,CAAC;IAEM,SAAS;QACZ,OAAO,IAAI,CAAC,KAAK,EAAE,CAAC;IACxB,CAAC;IAEM,GAAG,CAAC,CAAS;QAChB,IAAI,CAAC,KAAK,IAAI,CAAC,CAAC;IACpB,CAAC;CACJ","sourcesContent":["// Ported from https://github.com/lemire/JavaFastPFOR/blob/master/src/main/java/me/lemire/integercompression/IntWrapper.java\n\nexport default class IntWrapper {\n constructor(private value: number) {}\n\n public get(): number {\n return this.value;\n }\n\n public set(v: number): void {\n this.value = v;\n }\n\n public increment(): number {\n return this.value++;\n }\n\n public add(v: number): void {\n this.value += v;\n }\n}\n"]}
|
||||
54
node_modules/@maplibre/mlt/dist/decoding/integerDecodingUtils.d.ts
generated
vendored
Normal file
54
node_modules/@maplibre/mlt/dist/decoding/integerDecodingUtils.d.ts
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
import type IntWrapper from "./intWrapper";
|
||||
import { type FastPforWireDecodeWorkspace } from "./fastPforDecoder";
|
||||
export type { FastPforWireDecodeWorkspace } from "./fastPforDecoder";
|
||||
export { createFastPforWireDecodeWorkspace } from "./fastPforDecoder";
|
||||
export declare function decodeVarintInt32(buf: Uint8Array, bufferOffset: IntWrapper, numValues: number): Uint32Array;
|
||||
export declare function decodeVarintInt64(src: Uint8Array, offset: IntWrapper, numValues: number): BigUint64Array;
|
||||
export declare function decodeVarintFloat64(src: Uint8Array, offset: IntWrapper, numValues: number): Float64Array;
|
||||
export declare function decodeFastPfor(encodedBytes: Uint8Array, expectedValueCount: number, encodedByteLength: number, offset: IntWrapper): Uint32Array;
|
||||
export declare function decodeFastPforWithWorkspace(encodedBytes: Uint8Array, expectedValueCount: number, encodedByteLength: number, offset: IntWrapper, workspace: FastPforWireDecodeWorkspace): Uint32Array;
|
||||
export declare function decodeZigZagInt32Value(encoded: number): number;
|
||||
export declare function decodeZigZagInt64Value(encoded: bigint): bigint;
|
||||
export declare function decodeZigZagFloat64Value(encoded: number): number;
|
||||
export declare function decodeZigZagInt32(encodedData: Uint32Array): Int32Array;
|
||||
export declare function decodeZigZagInt64(encodedData: BigUint64Array): BigInt64Array;
|
||||
export declare function decodeZigZagFloat64(encodedData: Float64Array): void;
|
||||
export declare function decodeUnsignedRleInt32(encodedData: Uint32Array, numRuns: number, numTotalValues?: number): Uint32Array;
|
||||
export declare function decodeUnsignedRleInt64(encodedData: BigUint64Array, numRuns: number, numTotalValues?: number): BigUint64Array;
|
||||
export declare function decodeUnsignedRleFloat64(encodedData: Float64Array, numRuns: number, numTotalValues: number): Float64Array;
|
||||
export declare function decodeZigZagDeltaInt32(data: Uint32Array): Int32Array;
|
||||
export declare function decodeZigZagDeltaInt64(data: BigInt64Array | BigUint64Array): BigInt64Array;
|
||||
export declare function decodeZigZagDeltaFloat64(data: Float64Array): void;
|
||||
export declare function decodeZigZagRleInt32(data: Uint32Array, numRuns: number, numTotalValues?: number): Int32Array;
|
||||
export declare function decodeZigZagRleInt64(data: BigUint64Array, numRuns: number, numTotalValues?: number): BigInt64Array;
|
||||
export declare function decodeZigZagRleFloat64(data: Float64Array, numRuns: number, numTotalValues: number): Float64Array;
|
||||
export declare function fastInverseDelta(data: Uint32Array | Int32Array): void;
|
||||
export declare function inverseDelta(data: Uint32Array): void;
|
||||
export declare function decodeComponentwiseDeltaVec2(data: Uint32Array): Int32Array;
|
||||
export declare function decodeComponentwiseDeltaVec2Scaled(data: Uint32Array, scale: number, min: number, max: number): Int32Array;
|
||||
export declare function decodeZigZagDeltaOfDeltaInt32(data: Uint32Array): Uint32Array;
|
||||
export declare function decodeZigZagRleDeltaInt32(data: Uint32Array, numRuns: number, numTotalValues: number): Int32Array;
|
||||
export declare function decodeRleDeltaInt32(data: Uint32Array, numRuns: number, numTotalValues: number): Uint32Array;
|
||||
/**
|
||||
* Decode Delta-RLE with multiple runs by fully reconstructing values.
|
||||
*
|
||||
* @param data RLE encoded data: [run1, run2, ..., value1, value2, ...]
|
||||
* @param numRuns Number of runs in the RLE encoding
|
||||
* @param numValues Total number of values to reconstruct
|
||||
* @returns Reconstructed values with deltas applied
|
||||
*/
|
||||
export declare function decodeDeltaRleInt32(data: Uint32Array, numRuns: number, numValues: number): Int32Array;
|
||||
/**
|
||||
* Decode Delta-RLE with multiple runs for 64-bit integers.
|
||||
*/
|
||||
export declare function decodeDeltaRleInt64(data: BigUint64Array, numRuns: number, numValues: number): BigInt64Array;
|
||||
export declare function decodeUnsignedZigZagDeltaInt32(data: Uint32Array): Uint32Array;
|
||||
export declare function decodeUnsignedZigZagDeltaInt64(data: BigUint64Array): BigUint64Array;
|
||||
export declare function decodeUnsignedComponentwiseDeltaVec2(data: Uint32Array): Uint32Array;
|
||||
export declare function decodeUnsignedComponentwiseDeltaVec2Scaled(data: Uint32Array, scale: number, min: number, max: number): Uint32Array;
|
||||
export declare function decodeUnsignedConstRleInt32(data: Int32Array | Uint32Array): number;
|
||||
export declare function decodeZigZagConstRleInt32(data: Int32Array | Uint32Array): number;
|
||||
export declare function decodeZigZagSequenceRleInt32(data: Int32Array | Uint32Array): [baseValue: number, delta: number];
|
||||
export declare function decodeUnsignedConstRleInt64(data: BigInt64Array | BigUint64Array): bigint;
|
||||
export declare function decodeZigZagConstRleInt64(data: BigInt64Array | BigUint64Array): bigint;
|
||||
export declare function decodeZigZagSequenceRleInt64(data: BigInt64Array | BigUint64Array): [baseValue: bigint, delta: bigint];
|
||||
598
node_modules/@maplibre/mlt/dist/decoding/integerDecodingUtils.js
generated
vendored
Normal file
598
node_modules/@maplibre/mlt/dist/decoding/integerDecodingUtils.js
generated
vendored
Normal file
@@ -0,0 +1,598 @@
|
||||
import { createFastPforWireDecodeWorkspace, decodeFastPforInt32, ensureFastPforWireEncodedWordsCapacity, } from "./fastPforDecoder";
|
||||
import { decodeBigEndianInt32sInto } from "./bigEndianDecode";
|
||||
export { createFastPforWireDecodeWorkspace } from "./fastPforDecoder";
|
||||
//based on https://github.com/mapbox/pbf/blob/main/index.js
|
||||
export function decodeVarintInt32(buf, bufferOffset, numValues) {
|
||||
const dst = new Uint32Array(numValues);
|
||||
let dstOffset = 0;
|
||||
let offset = bufferOffset.get();
|
||||
for (let i = 0; i < dst.length; i++) {
|
||||
let b = buf[offset++];
|
||||
let val = b & 0x7f;
|
||||
if (b < 0x80) {
|
||||
dst[dstOffset++] = val;
|
||||
continue;
|
||||
}
|
||||
b = buf[offset++];
|
||||
val |= (b & 0x7f) << 7;
|
||||
if (b < 0x80) {
|
||||
dst[dstOffset++] = val;
|
||||
continue;
|
||||
}
|
||||
b = buf[offset++];
|
||||
val |= (b & 0x7f) << 14;
|
||||
if (b < 0x80) {
|
||||
dst[dstOffset++] = val;
|
||||
continue;
|
||||
}
|
||||
b = buf[offset++];
|
||||
val |= (b & 0x7f) << 21;
|
||||
if (b < 0x80) {
|
||||
dst[dstOffset++] = val;
|
||||
continue;
|
||||
}
|
||||
b = buf[offset++];
|
||||
val |= (b & 0x0f) << 28;
|
||||
dst[dstOffset++] = val;
|
||||
}
|
||||
bufferOffset.set(offset);
|
||||
return dst;
|
||||
}
|
||||
export function decodeVarintInt64(src, offset, numValues) {
|
||||
const dst = new BigUint64Array(numValues);
|
||||
for (let i = 0; i < dst.length; i++) {
|
||||
dst[i] = decodeVarintInt64Value(src, offset);
|
||||
}
|
||||
return dst;
|
||||
}
|
||||
// Source: https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/util/VarInt.java
|
||||
function decodeVarintInt64Value(bytes, pos) {
|
||||
let value = 0n;
|
||||
let shift = 0;
|
||||
let index = pos.get();
|
||||
while (index < bytes.length) {
|
||||
const b = bytes[index++];
|
||||
value |= BigInt(b & 0x7f) << BigInt(shift);
|
||||
if ((b & 0x80) === 0) {
|
||||
break;
|
||||
}
|
||||
shift += 7;
|
||||
if (shift >= 64) {
|
||||
throw new Error("Varint too long");
|
||||
}
|
||||
}
|
||||
pos.set(index);
|
||||
return value;
|
||||
}
|
||||
/*
|
||||
* Since decoding Int64 values to BigInt is more than an order of magnitude slower in the tests then using a Float64,
|
||||
* this decoding method limits the max size of a Long value to 53 bits
|
||||
*/
|
||||
export function decodeVarintFloat64(src, offset, numValues) {
|
||||
const dst = new Float64Array(numValues);
|
||||
for (let i = 0; i < numValues; i++) {
|
||||
dst[i] = decodeVarintFloat64Value(src, offset);
|
||||
}
|
||||
return dst;
|
||||
}
|
||||
//based on https://github.com/mapbox/pbf/blob/main/index.js
|
||||
function decodeVarintFloat64Value(buf, offset) {
|
||||
let val;
|
||||
let b;
|
||||
b = buf[offset.get()];
|
||||
offset.increment();
|
||||
val = b & 0x7f;
|
||||
if (b < 0x80)
|
||||
return val;
|
||||
b = buf[offset.get()];
|
||||
offset.increment();
|
||||
val |= (b & 0x7f) << 7;
|
||||
if (b < 0x80)
|
||||
return val;
|
||||
b = buf[offset.get()];
|
||||
offset.increment();
|
||||
val |= (b & 0x7f) << 14;
|
||||
if (b < 0x80)
|
||||
return val;
|
||||
b = buf[offset.get()];
|
||||
offset.increment();
|
||||
val |= (b & 0x7f) << 21;
|
||||
if (b < 0x80)
|
||||
return val;
|
||||
b = buf[offset.get()];
|
||||
val |= (b & 0x0f) << 28;
|
||||
return decodeVarintRemainder(val, buf, offset);
|
||||
}
|
||||
function decodeVarintRemainder(l, buf, offset) {
|
||||
let h;
|
||||
let b;
|
||||
b = buf[offset.get()];
|
||||
offset.increment();
|
||||
h = (b & 0x70) >> 4;
|
||||
if (b < 0x80)
|
||||
return h * 0x100000000 + (l >>> 0);
|
||||
b = buf[offset.get()];
|
||||
offset.increment();
|
||||
h |= (b & 0x7f) << 3;
|
||||
if (b < 0x80)
|
||||
return h * 0x100000000 + (l >>> 0);
|
||||
b = buf[offset.get()];
|
||||
offset.increment();
|
||||
h |= (b & 0x7f) << 10;
|
||||
if (b < 0x80)
|
||||
return h * 0x100000000 + (l >>> 0);
|
||||
b = buf[offset.get()];
|
||||
offset.increment();
|
||||
h |= (b & 0x7f) << 17;
|
||||
if (b < 0x80)
|
||||
return h * 0x100000000 + (l >>> 0);
|
||||
b = buf[offset.get()];
|
||||
offset.increment();
|
||||
h |= (b & 0x7f) << 24;
|
||||
if (b < 0x80)
|
||||
return h * 0x100000000 + (l >>> 0);
|
||||
b = buf[offset.get()];
|
||||
offset.increment();
|
||||
h |= (b & 0x01) << 31;
|
||||
if (b < 0x80)
|
||||
return h * 0x100000000 + (l >>> 0);
|
||||
throw new Error("Expected varint not more than 10 bytes");
|
||||
}
|
||||
export function decodeFastPfor(encodedBytes, expectedValueCount, encodedByteLength, offset) {
|
||||
const workspace = createFastPforWireDecodeWorkspace(encodedByteLength >>> 2);
|
||||
return decodeFastPforWithWorkspace(encodedBytes, expectedValueCount, encodedByteLength, offset, workspace);
|
||||
}
|
||||
export function decodeFastPforWithWorkspace(encodedBytes, expectedValueCount, encodedByteLength, offset, workspace) {
|
||||
const inputByteOffset = offset.get();
|
||||
if ((encodedByteLength & 3) !== 0) {
|
||||
throw new Error(`FastPFOR: invalid encodedByteLength=${encodedByteLength} at offset=${inputByteOffset} (encodedBytes.length=${encodedBytes.length}; expected a multiple of 4 bytes for an int32 big-endian word stream)`);
|
||||
}
|
||||
const encodedWordCount = encodedByteLength >>> 2;
|
||||
const encodedWordBuffer = ensureFastPforWireEncodedWordsCapacity(workspace, encodedWordCount);
|
||||
decodeBigEndianInt32sInto(encodedBytes, inputByteOffset, encodedByteLength, encodedWordBuffer);
|
||||
const decodedValues = decodeFastPforInt32(encodedWordBuffer.subarray(0, encodedWordCount), expectedValueCount, workspace.decoderWorkspace);
|
||||
offset.add(encodedByteLength);
|
||||
return decodedValues;
|
||||
}
|
||||
export function decodeZigZagInt32Value(encoded) {
|
||||
return (encoded >>> 1) ^ -(encoded & 1);
|
||||
}
|
||||
export function decodeZigZagInt64Value(encoded) {
|
||||
return (encoded >> 1n) ^ -(encoded & 1n);
|
||||
}
|
||||
export function decodeZigZagFloat64Value(encoded) {
|
||||
return encoded % 2 === 1 ? (encoded + 1) / -2 : encoded / 2;
|
||||
}
|
||||
export function decodeZigZagInt32(encodedData) {
|
||||
const decodedValues = new Int32Array(encodedData.length);
|
||||
for (let i = 0; i < encodedData.length; i++) {
|
||||
decodedValues[i] = decodeZigZagInt32Value(encodedData[i]);
|
||||
}
|
||||
return decodedValues;
|
||||
}
|
||||
export function decodeZigZagInt64(encodedData) {
|
||||
const decodedValues = new BigInt64Array(encodedData.length);
|
||||
for (let i = 0; i < encodedData.length; i++) {
|
||||
decodedValues[i] = decodeZigZagInt64Value(encodedData[i]);
|
||||
}
|
||||
return decodedValues;
|
||||
}
|
||||
export function decodeZigZagFloat64(encodedData) {
|
||||
for (let i = 0; i < encodedData.length; i++) {
|
||||
encodedData[i] = decodeZigZagFloat64Value(encodedData[i]);
|
||||
}
|
||||
}
|
||||
export function decodeUnsignedRleInt32(encodedData, numRuns, numTotalValues) {
|
||||
// If numTotalValues not provided, calculate from runs (nullable case)
|
||||
if (numTotalValues === undefined) {
|
||||
numTotalValues = 0;
|
||||
for (let i = 0; i < numRuns; i++) {
|
||||
numTotalValues += encodedData[i];
|
||||
}
|
||||
}
|
||||
const decodedValues = new Uint32Array(numTotalValues);
|
||||
let offset = 0;
|
||||
for (let i = 0; i < numRuns; i++) {
|
||||
const runLength = encodedData[i];
|
||||
const value = encodedData[i + numRuns];
|
||||
decodedValues.fill(value, offset, offset + runLength);
|
||||
offset += runLength;
|
||||
}
|
||||
return decodedValues;
|
||||
}
|
||||
export function decodeUnsignedRleInt64(encodedData, numRuns, numTotalValues) {
|
||||
// If numTotalValues not provided, calculate from runs (nullable case)
|
||||
if (numTotalValues === undefined) {
|
||||
numTotalValues = 0;
|
||||
for (let i = 0; i < numRuns; i++) {
|
||||
numTotalValues += Number(encodedData[i]);
|
||||
}
|
||||
}
|
||||
const decodedValues = new BigUint64Array(numTotalValues);
|
||||
let offset = 0;
|
||||
for (let i = 0; i < numRuns; i++) {
|
||||
const runLength = Number(encodedData[i]);
|
||||
const value = encodedData[i + numRuns];
|
||||
decodedValues.fill(value, offset, offset + runLength);
|
||||
offset += runLength;
|
||||
}
|
||||
return decodedValues;
|
||||
}
|
||||
export function decodeUnsignedRleFloat64(encodedData, numRuns, numTotalValues) {
|
||||
const decodedValues = new Float64Array(numTotalValues);
|
||||
let offset = 0;
|
||||
for (let i = 0; i < numRuns; i++) {
|
||||
const runLength = encodedData[i];
|
||||
const value = encodedData[i + numRuns];
|
||||
decodedValues.fill(value, offset, offset + runLength);
|
||||
offset += runLength;
|
||||
}
|
||||
return decodedValues;
|
||||
}
|
||||
/*
|
||||
* In place decoding of the zigzag encoded delta values.
|
||||
* Inspired by https://github.com/lemire/JavaFastPFOR/blob/master/src/main/java/me/lemire/integercompression/differential/Delta.java
|
||||
*/
|
||||
export function decodeZigZagDeltaInt32(data) {
|
||||
const decodedValues = new Int32Array(data.length);
|
||||
decodedValues[0] = decodeZigZagInt32Value(data[0]);
|
||||
const sz0 = (data.length / 4) * 4;
|
||||
let i = 1;
|
||||
if (sz0 >= 4) {
|
||||
for (; i < sz0 - 4; i += 4) {
|
||||
const data1 = data[i];
|
||||
const data2 = data[i + 1];
|
||||
const data3 = data[i + 2];
|
||||
const data4 = data[i + 3];
|
||||
decodedValues[i] = decodeZigZagInt32Value(data1) + decodedValues[i - 1];
|
||||
decodedValues[i + 1] = decodeZigZagInt32Value(data2) + decodedValues[i];
|
||||
decodedValues[i + 2] = decodeZigZagInt32Value(data3) + decodedValues[i + 1];
|
||||
decodedValues[i + 3] = decodeZigZagInt32Value(data4) + decodedValues[i + 2];
|
||||
}
|
||||
}
|
||||
for (; i !== data.length; ++i) {
|
||||
decodedValues[i] = decodeZigZagInt32Value(data[i]) + decodedValues[i - 1];
|
||||
}
|
||||
return decodedValues;
|
||||
}
|
||||
export function decodeZigZagDeltaInt64(data) {
|
||||
const decodedValues = new BigInt64Array(data.length);
|
||||
decodedValues[0] = decodeZigZagInt64Value(data[0]);
|
||||
const sz0 = (data.length / 4) * 4;
|
||||
let i = 1;
|
||||
if (sz0 >= 4) {
|
||||
for (; i < sz0 - 4; i += 4) {
|
||||
const data1 = data[i];
|
||||
const data2 = data[i + 1];
|
||||
const data3 = data[i + 2];
|
||||
const data4 = data[i + 3];
|
||||
decodedValues[i] = decodeZigZagInt64Value(data1) + decodedValues[i - 1];
|
||||
decodedValues[i + 1] = decodeZigZagInt64Value(data2) + decodedValues[i];
|
||||
decodedValues[i + 2] = decodeZigZagInt64Value(data3) + decodedValues[i + 1];
|
||||
decodedValues[i + 3] = decodeZigZagInt64Value(data4) + decodedValues[i + 2];
|
||||
}
|
||||
}
|
||||
for (; i !== decodedValues.length; ++i) {
|
||||
decodedValues[i] = decodeZigZagInt64Value(data[i]) + decodedValues[i - 1];
|
||||
}
|
||||
return decodedValues;
|
||||
}
|
||||
export function decodeZigZagDeltaFloat64(data) {
|
||||
data[0] = decodeZigZagFloat64Value(data[0]);
|
||||
const sz0 = (data.length / 4) * 4;
|
||||
let i = 1;
|
||||
if (sz0 >= 4) {
|
||||
for (; i < sz0 - 4; i += 4) {
|
||||
const data1 = data[i];
|
||||
const data2 = data[i + 1];
|
||||
const data3 = data[i + 2];
|
||||
const data4 = data[i + 3];
|
||||
data[i] = decodeZigZagFloat64Value(data1) + data[i - 1];
|
||||
data[i + 1] = decodeZigZagFloat64Value(data2) + data[i];
|
||||
data[i + 2] = decodeZigZagFloat64Value(data3) + data[i + 1];
|
||||
data[i + 3] = decodeZigZagFloat64Value(data4) + data[i + 2];
|
||||
}
|
||||
}
|
||||
for (; i !== data.length; ++i) {
|
||||
data[i] = decodeZigZagFloat64Value(data[i]) + data[i - 1];
|
||||
}
|
||||
}
|
||||
export function decodeZigZagRleInt32(data, numRuns, numTotalValues) {
|
||||
// If numTotalValues not provided, calculate from runs (nullable case)
|
||||
if (numTotalValues === undefined) {
|
||||
numTotalValues = 0;
|
||||
for (let i = 0; i < numRuns; i++) {
|
||||
numTotalValues += data[i];
|
||||
}
|
||||
}
|
||||
const decodedValues = new Int32Array(numTotalValues);
|
||||
let offset = 0;
|
||||
for (let i = 0; i < numRuns; i++) {
|
||||
const runLength = data[i];
|
||||
let value = data[i + numRuns];
|
||||
value = decodeZigZagInt32Value(value);
|
||||
decodedValues.fill(value, offset, offset + runLength);
|
||||
offset += runLength;
|
||||
}
|
||||
return decodedValues;
|
||||
}
|
||||
export function decodeZigZagRleInt64(data, numRuns, numTotalValues) {
|
||||
// If numTotalValues not provided, calculate from runs (nullable case)
|
||||
if (numTotalValues === undefined) {
|
||||
numTotalValues = 0;
|
||||
for (let i = 0; i < numRuns; i++) {
|
||||
numTotalValues += Number(data[i]);
|
||||
}
|
||||
}
|
||||
const decodedValues = new BigInt64Array(numTotalValues);
|
||||
let offset = 0;
|
||||
for (let i = 0; i < numRuns; i++) {
|
||||
const runLength = Number(data[i]);
|
||||
let value = data[i + numRuns];
|
||||
value = decodeZigZagInt64Value(value);
|
||||
decodedValues.fill(value, offset, offset + runLength);
|
||||
offset += runLength;
|
||||
}
|
||||
return decodedValues;
|
||||
}
|
||||
export function decodeZigZagRleFloat64(data, numRuns, numTotalValues) {
|
||||
const decodedValues = new Float64Array(numTotalValues);
|
||||
let offset = 0;
|
||||
for (let i = 0; i < numRuns; i++) {
|
||||
const runLength = data[i];
|
||||
let value = data[i + numRuns];
|
||||
value = decodeZigZagFloat64Value(value);
|
||||
decodedValues.fill(value, offset, offset + runLength);
|
||||
offset += runLength;
|
||||
}
|
||||
return decodedValues;
|
||||
}
|
||||
/*
|
||||
* Inspired by https://github.com/lemire/JavaFastPFOR/blob/master/src/main/java/me/lemire/integercompression/differential/Delta.java
|
||||
*/
|
||||
export function fastInverseDelta(data) {
|
||||
const sz0 = (data.length / 4) * 4;
|
||||
let i = 1;
|
||||
if (sz0 >= 4) {
|
||||
for (let a = data[0]; i < sz0 - 4; i += 4) {
|
||||
a = data[i] += a;
|
||||
a = data[i + 1] += a;
|
||||
a = data[i + 2] += a;
|
||||
a = data[i + 3] += a;
|
||||
}
|
||||
}
|
||||
while (i !== data.length) {
|
||||
data[i] += data[i - 1];
|
||||
++i;
|
||||
}
|
||||
}
|
||||
export function inverseDelta(data) {
|
||||
let prevValue = 0;
|
||||
for (let i = 0; i < data.length; i++) {
|
||||
data[i] += prevValue;
|
||||
prevValue = data[i];
|
||||
}
|
||||
}
|
||||
/*
|
||||
* In place decoding of the zigzag delta encoded Vec2.
|
||||
* Inspired by https://github.com/lemire/JavaFastPFOR/blob/master/src/main/java/me/lemire/integercompression/differential/Delta.java
|
||||
*/
|
||||
export function decodeComponentwiseDeltaVec2(data) {
|
||||
if (data.length < 2)
|
||||
return new Int32Array(data);
|
||||
const decodedData = new Int32Array(data.length);
|
||||
decodedData[0] = decodeZigZagInt32Value(data[0]);
|
||||
decodedData[1] = decodeZigZagInt32Value(data[1]);
|
||||
const sz0 = (data.length / 4) * 4;
|
||||
let i = 2;
|
||||
if (sz0 >= 4) {
|
||||
for (; i < sz0 - 4; i += 4) {
|
||||
const x1 = data[i];
|
||||
const y1 = data[i + 1];
|
||||
const x2 = data[i + 2];
|
||||
const y2 = data[i + 3];
|
||||
decodedData[i] = decodeZigZagInt32Value(x1) + decodedData[i - 2];
|
||||
decodedData[i + 1] = decodeZigZagInt32Value(y1) + decodedData[i - 1];
|
||||
decodedData[i + 2] = decodeZigZagInt32Value(x2) + decodedData[i];
|
||||
decodedData[i + 3] = decodeZigZagInt32Value(y2) + decodedData[i + 1];
|
||||
}
|
||||
}
|
||||
for (; i !== data.length; i += 2) {
|
||||
decodedData[i] = decodeZigZagInt32Value(data[i]) + decodedData[i - 2];
|
||||
decodedData[i + 1] = decodeZigZagInt32Value(data[i + 1]) + decodedData[i - 1];
|
||||
}
|
||||
return decodedData;
|
||||
}
|
||||
export function decodeComponentwiseDeltaVec2Scaled(data, scale, min, max) {
|
||||
if (data.length < 2)
|
||||
return new Int32Array(data);
|
||||
const decodedData = new Int32Array(data.length);
|
||||
let previousVertexX = decodeZigZagInt32Value(data[0]);
|
||||
let previousVertexY = decodeZigZagInt32Value(data[1]);
|
||||
decodedData[0] = clamp(Math.round(previousVertexX * scale), min, max);
|
||||
decodedData[1] = clamp(Math.round(previousVertexY * scale), min, max);
|
||||
const sz0 = data.length / 16;
|
||||
let i = 2;
|
||||
if (sz0 >= 4) {
|
||||
for (; i < sz0 - 4; i += 4) {
|
||||
const x1 = data[i];
|
||||
const y1 = data[i + 1];
|
||||
const currentVertexX = decodeZigZagInt32Value(x1) + previousVertexX;
|
||||
const currentVertexY = decodeZigZagInt32Value(y1) + previousVertexY;
|
||||
decodedData[i] = clamp(Math.round(currentVertexX * scale), min, max);
|
||||
decodedData[i + 1] = clamp(Math.round(currentVertexY * scale), min, max);
|
||||
const x2 = data[i + 2];
|
||||
const y2 = data[i + 3];
|
||||
previousVertexX = decodeZigZagInt32Value(x2) + currentVertexX;
|
||||
previousVertexY = decodeZigZagInt32Value(y2) + currentVertexY;
|
||||
decodedData[i + 2] = clamp(Math.round(previousVertexX * scale), min, max);
|
||||
decodedData[i + 3] = clamp(Math.round(previousVertexY * scale), min, max);
|
||||
}
|
||||
}
|
||||
for (; i !== data.length; i += 2) {
|
||||
previousVertexX += decodeZigZagInt32Value(data[i]);
|
||||
previousVertexY += decodeZigZagInt32Value(data[i + 1]);
|
||||
decodedData[i] = clamp(Math.round(previousVertexX * scale), min, max);
|
||||
decodedData[i + 1] = clamp(Math.round(previousVertexY * scale), min, max);
|
||||
}
|
||||
return decodedData;
|
||||
}
|
||||
function clamp(n, min, max) {
|
||||
return Math.min(max, Math.max(min, n));
|
||||
}
|
||||
/* Transform data to allow util access ------------------------------------------------------------------------ */
|
||||
export function decodeZigZagDeltaOfDeltaInt32(data) {
|
||||
const decodedData = new Int32Array(data.length + 1);
|
||||
decodedData[0] = 0;
|
||||
decodedData[1] = decodeZigZagInt32Value(data[0]);
|
||||
let deltaSum = decodedData[1];
|
||||
for (let i = 2; i !== decodedData.length; ++i) {
|
||||
const zigZagValue = data[i - 1];
|
||||
const delta = decodeZigZagInt32Value(zigZagValue);
|
||||
deltaSum += delta;
|
||||
decodedData[i] = decodedData[i - 1] + deltaSum;
|
||||
}
|
||||
return new Uint32Array(decodedData);
|
||||
}
|
||||
export function decodeZigZagRleDeltaInt32(data, numRuns, numTotalValues) {
|
||||
const decodedValues = new Int32Array(numTotalValues + 1);
|
||||
decodedValues[0] = 0;
|
||||
let offset = 1;
|
||||
let previousValue = decodedValues[0];
|
||||
for (let i = 0; i < numRuns; i++) {
|
||||
const runLength = data[i];
|
||||
let value = data[i + numRuns];
|
||||
value = decodeZigZagInt32Value(value);
|
||||
for (let j = offset; j < offset + runLength; j++) {
|
||||
decodedValues[j] = value + previousValue;
|
||||
previousValue = decodedValues[j];
|
||||
}
|
||||
offset += runLength;
|
||||
}
|
||||
return decodedValues;
|
||||
}
|
||||
export function decodeRleDeltaInt32(data, numRuns, numTotalValues) {
|
||||
const decodedValues = new Uint32Array(numTotalValues + 1);
|
||||
decodedValues[0] = 0;
|
||||
let offset = 1;
|
||||
let previousValue = decodedValues[0];
|
||||
for (let i = 0; i < numRuns; i++) {
|
||||
const runLength = data[i];
|
||||
const value = data[i + numRuns];
|
||||
for (let j = offset; j < offset + runLength; j++) {
|
||||
decodedValues[j] = value + previousValue;
|
||||
previousValue = decodedValues[j];
|
||||
}
|
||||
offset += runLength;
|
||||
}
|
||||
return decodedValues;
|
||||
}
|
||||
/**
|
||||
* Decode Delta-RLE with multiple runs by fully reconstructing values.
|
||||
*
|
||||
* @param data RLE encoded data: [run1, run2, ..., value1, value2, ...]
|
||||
* @param numRuns Number of runs in the RLE encoding
|
||||
* @param numValues Total number of values to reconstruct
|
||||
* @returns Reconstructed values with deltas applied
|
||||
*/
|
||||
export function decodeDeltaRleInt32(data, numRuns, numValues) {
|
||||
const result = new Int32Array(numValues);
|
||||
let outPos = 0;
|
||||
let previousValue = 0;
|
||||
for (let i = 0; i < numRuns; i++) {
|
||||
const runLength = data[i];
|
||||
const zigZagDelta = data[i + numRuns];
|
||||
const delta = decodeZigZagInt32Value(zigZagDelta);
|
||||
for (let j = 0; j < runLength; j++) {
|
||||
previousValue += delta;
|
||||
result[outPos++] = previousValue;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
/**
|
||||
* Decode Delta-RLE with multiple runs for 64-bit integers.
|
||||
*/
|
||||
export function decodeDeltaRleInt64(data, numRuns, numValues) {
|
||||
const result = new BigInt64Array(numValues);
|
||||
let outPos = 0;
|
||||
let previousValue = 0n;
|
||||
for (let i = 0; i < numRuns; i++) {
|
||||
const runLength = Number(data[i]);
|
||||
const zigZagDelta = data[i + numRuns];
|
||||
const delta = decodeZigZagInt64Value(zigZagDelta);
|
||||
for (let j = 0; j < runLength; j++) {
|
||||
previousValue += delta;
|
||||
result[outPos++] = previousValue;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
export function decodeUnsignedZigZagDeltaInt32(data) {
|
||||
const decodedValues = new Uint32Array(data.length);
|
||||
decodedValues[0] = decodeZigZagInt32Value(data[0]) >>> 0;
|
||||
for (let i = 1; i < data.length; i++) {
|
||||
decodedValues[i] = (decodedValues[i - 1] + decodeZigZagInt32Value(data[i])) >>> 0;
|
||||
}
|
||||
return decodedValues;
|
||||
}
|
||||
export function decodeUnsignedZigZagDeltaInt64(data) {
|
||||
const decodedValues = new BigUint64Array(data.length);
|
||||
decodedValues[0] = BigInt.asUintN(64, decodeZigZagInt64Value(data[0]));
|
||||
for (let i = 1; i < data.length; i++) {
|
||||
decodedValues[i] = BigInt.asUintN(64, decodedValues[i - 1] + decodeZigZagInt64Value(data[i]));
|
||||
}
|
||||
return decodedValues;
|
||||
}
|
||||
export function decodeUnsignedComponentwiseDeltaVec2(data) {
|
||||
if (data.length < 2) {
|
||||
return new Uint32Array(data);
|
||||
}
|
||||
const decodedData = new Uint32Array(data.length);
|
||||
decodedData[0] = decodeZigZagInt32Value(data[0]) >>> 0;
|
||||
decodedData[1] = decodeZigZagInt32Value(data[1]) >>> 0;
|
||||
for (let i = 2; i < data.length; i += 2) {
|
||||
decodedData[i] = (decodedData[i - 2] + decodeZigZagInt32Value(data[i])) >>> 0;
|
||||
decodedData[i + 1] = (decodedData[i - 1] + decodeZigZagInt32Value(data[i + 1])) >>> 0;
|
||||
}
|
||||
return decodedData;
|
||||
}
|
||||
export function decodeUnsignedComponentwiseDeltaVec2Scaled(data, scale, min, max) {
|
||||
const scaledValues = decodeComponentwiseDeltaVec2Scaled(data, scale, min, max);
|
||||
return new Uint32Array(scaledValues);
|
||||
}
|
||||
export function decodeUnsignedConstRleInt32(data) {
|
||||
return data[1];
|
||||
}
|
||||
export function decodeZigZagConstRleInt32(data) {
|
||||
return decodeZigZagInt32Value(data[1]);
|
||||
}
|
||||
export function decodeZigZagSequenceRleInt32(data) {
|
||||
/* base value and delta value are equal */
|
||||
if (data.length === 2) {
|
||||
const value = decodeZigZagInt32Value(data[1]);
|
||||
return [value, value];
|
||||
}
|
||||
/* base value and delta value are not equal -> 2 runs and 2 values*/
|
||||
const base = decodeZigZagInt32Value(data[2]);
|
||||
const delta = decodeZigZagInt32Value(data[3]);
|
||||
return [base, delta];
|
||||
}
|
||||
export function decodeUnsignedConstRleInt64(data) {
|
||||
return data[1];
|
||||
}
|
||||
export function decodeZigZagConstRleInt64(data) {
|
||||
return decodeZigZagInt64Value(data[1]);
|
||||
}
|
||||
export function decodeZigZagSequenceRleInt64(data) {
|
||||
/* base value and delta value are equal */
|
||||
if (data.length === 2) {
|
||||
const value = decodeZigZagInt64Value(data[1]);
|
||||
return [value, value];
|
||||
}
|
||||
/* base value and delta value are not equal -> 2 runs and 2 values*/
|
||||
const base = decodeZigZagInt64Value(data[2]);
|
||||
const delta = decodeZigZagInt64Value(data[3]);
|
||||
return [base, delta];
|
||||
}
|
||||
//# sourceMappingURL=integerDecodingUtils.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/decoding/integerDecodingUtils.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/decoding/integerDecodingUtils.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
19
node_modules/@maplibre/mlt/dist/decoding/integerStreamDecoder.d.ts
generated
vendored
Normal file
19
node_modules/@maplibre/mlt/dist/decoding/integerStreamDecoder.d.ts
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
import IntWrapper from "./intWrapper";
|
||||
import type { StreamMetadata } from "../metadata/tile/streamMetadataDecoder";
|
||||
import BitVector from "../vector/flat/bitVector";
|
||||
import { VectorType } from "../vector/vectorType";
|
||||
import type GeometryScaling from "./geometryScaling";
|
||||
export declare function decodeSignedInt32Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata, scalingData?: GeometryScaling, nullabilityBuffer?: BitVector): Int32Array;
|
||||
export declare function decodeUnsignedInt32Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata, scalingData?: GeometryScaling, nullabilityBuffer?: BitVector): Uint32Array;
|
||||
export declare function decodeLengthStreamToOffsetBuffer(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata): Uint32Array;
|
||||
export declare function decodeSignedConstInt32Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata): number;
|
||||
export declare function decodeUnsignedConstInt32Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata): number;
|
||||
export declare function decodeSequenceInt32Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata): [baseValue: number, delta: number];
|
||||
export declare function decodeSequenceInt64Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata): [baseValue: bigint, delta: bigint];
|
||||
export declare function decodeSignedInt64Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata, nullabilityBuffer?: BitVector): BigInt64Array;
|
||||
export declare function decodeUnsignedInt64Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata, nullabilityBuffer?: BitVector): BigUint64Array;
|
||||
export declare function decodeSignedInt64AsFloat64Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata): Float64Array;
|
||||
export declare function decodeUnsignedInt64AsFloat64Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata): Float64Array;
|
||||
export declare function decodeSignedConstInt64Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata): bigint;
|
||||
export declare function decodeUnsignedConstInt64Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata): bigint;
|
||||
export declare function getVectorType(streamMetadata: StreamMetadata, sizeOrNullabilityBuffer: number | BitVector, data: Uint8Array, offset: IntWrapper, varintWidth?: "int32" | "int64"): VectorType;
|
||||
339
node_modules/@maplibre/mlt/dist/decoding/integerStreamDecoder.js
generated
vendored
Normal file
339
node_modules/@maplibre/mlt/dist/decoding/integerStreamDecoder.js
generated
vendored
Normal file
@@ -0,0 +1,339 @@
|
||||
import { PhysicalLevelTechnique } from "../metadata/tile/physicalLevelTechnique";
|
||||
import IntWrapper from "./intWrapper";
|
||||
import { decodeComponentwiseDeltaVec2, decodeComponentwiseDeltaVec2Scaled, decodeDeltaRleInt32, decodeDeltaRleInt64, decodeFastPfor, decodeUnsignedComponentwiseDeltaVec2, decodeUnsignedComponentwiseDeltaVec2Scaled, decodeUnsignedConstRleInt32, decodeUnsignedConstRleInt64, decodeUnsignedRleInt32, decodeUnsignedRleInt64, decodeUnsignedRleFloat64, decodeUnsignedZigZagDeltaInt32, decodeUnsignedZigZagDeltaInt64, decodeVarintInt32, decodeVarintInt64, decodeVarintFloat64, decodeZigZagInt32, decodeZigZagInt64, decodeZigZagFloat64, decodeZigZagConstRleInt32, decodeZigZagConstRleInt64, decodeZigZagDeltaInt32, decodeZigZagDeltaInt64, decodeZigZagDeltaFloat64, decodeZigZagSequenceRleInt32, decodeZigZagSequenceRleInt64, decodeZigZagInt32Value, decodeZigZagInt64Value, fastInverseDelta, inverseDelta, decodeRleDeltaInt32, decodeZigZagDeltaOfDeltaInt32, decodeZigZagRleDeltaInt32, decodeZigZagRleInt32, decodeZigZagRleInt64, decodeZigZagRleFloat64, } from "./integerDecodingUtils";
|
||||
import { LogicalLevelTechnique } from "../metadata/tile/logicalLevelTechnique";
|
||||
import BitVector from "../vector/flat/bitVector";
|
||||
import { VectorType } from "../vector/vectorType";
|
||||
import { unpackNullable } from "./unpackNullableUtils";
|
||||
export function decodeSignedInt32Stream(data, offset, streamMetadata, scalingData, nullabilityBuffer) {
|
||||
const values = decodePhysicalLevelTechnique(data, offset, streamMetadata);
|
||||
return decodeSignedInt32(values, streamMetadata, scalingData, nullabilityBuffer);
|
||||
}
|
||||
export function decodeUnsignedInt32Stream(data, offset, streamMetadata, scalingData, nullabilityBuffer) {
|
||||
const values = decodePhysicalLevelTechnique(data, offset, streamMetadata);
|
||||
return decodeUnsignedInt32(values, streamMetadata, scalingData, nullabilityBuffer);
|
||||
}
|
||||
export function decodeLengthStreamToOffsetBuffer(data, offset, streamMetadata) {
|
||||
const values = decodePhysicalLevelTechnique(data, offset, streamMetadata);
|
||||
return decodeLengthToOffsetBuffer(values, streamMetadata);
|
||||
}
|
||||
function decodePhysicalLevelTechnique(data, offset, streamMetadata) {
|
||||
const physicalLevelTechnique = streamMetadata.physicalLevelTechnique;
|
||||
switch (physicalLevelTechnique) {
|
||||
case PhysicalLevelTechnique.FAST_PFOR:
|
||||
return decodeFastPfor(data, streamMetadata.numValues, streamMetadata.byteLength, offset);
|
||||
case PhysicalLevelTechnique.VARINT:
|
||||
return decodeVarintInt32(data, offset, streamMetadata.numValues);
|
||||
case PhysicalLevelTechnique.NONE: {
|
||||
const dataOffset = offset.get();
|
||||
const byteLength = streamMetadata.byteLength;
|
||||
offset.add(byteLength);
|
||||
const slice = data.subarray(dataOffset, offset.get());
|
||||
return new Uint32Array(slice);
|
||||
}
|
||||
default:
|
||||
throw new Error(`Specified physicalLevelTechnique ${physicalLevelTechnique} is not supported (yet).`);
|
||||
}
|
||||
}
|
||||
export function decodeSignedConstInt32Stream(data, offset, streamMetadata) {
|
||||
const values = decodePhysicalLevelTechnique(data, offset, streamMetadata);
|
||||
if (values.length === 1) {
|
||||
return decodeZigZagInt32Value(values[0]);
|
||||
}
|
||||
return decodeZigZagConstRleInt32(values);
|
||||
}
|
||||
export function decodeUnsignedConstInt32Stream(data, offset, streamMetadata) {
|
||||
const values = decodePhysicalLevelTechnique(data, offset, streamMetadata);
|
||||
if (values.length === 1) {
|
||||
return values[0];
|
||||
}
|
||||
return decodeUnsignedConstRleInt32(values);
|
||||
}
|
||||
export function decodeSequenceInt32Stream(data, offset, streamMetadata) {
|
||||
const values = decodePhysicalLevelTechnique(data, offset, streamMetadata);
|
||||
return decodeZigZagSequenceRleInt32(values);
|
||||
}
|
||||
export function decodeSequenceInt64Stream(data, offset, streamMetadata) {
|
||||
const values = decodeVarintInt64(data, offset, streamMetadata.numValues);
|
||||
return decodeZigZagSequenceRleInt64(values);
|
||||
}
|
||||
export function decodeSignedInt64Stream(data, offset, streamMetadata, nullabilityBuffer) {
|
||||
const values = decodeVarintInt64(data, offset, streamMetadata.numValues);
|
||||
return decodeSignedInt64(values, streamMetadata, nullabilityBuffer);
|
||||
}
|
||||
export function decodeUnsignedInt64Stream(data, offset, streamMetadata, nullabilityBuffer) {
|
||||
const values = decodeVarintInt64(data, offset, streamMetadata.numValues);
|
||||
return decodeUnsignedInt64(values, streamMetadata, nullabilityBuffer);
|
||||
}
|
||||
export function decodeSignedInt64AsFloat64Stream(data, offset, streamMetadata) {
|
||||
const values = decodeVarintFloat64(data, offset, streamMetadata.numValues);
|
||||
return decodeFloat64Values(values, streamMetadata, true);
|
||||
}
|
||||
export function decodeUnsignedInt64AsFloat64Stream(data, offset, streamMetadata) {
|
||||
const values = decodeVarintFloat64(data, offset, streamMetadata.numValues);
|
||||
return decodeFloat64Values(values, streamMetadata, false);
|
||||
}
|
||||
export function decodeSignedConstInt64Stream(data, offset, streamMetadata) {
|
||||
const values = decodeVarintInt64(data, offset, streamMetadata.numValues);
|
||||
if (values.length === 1) {
|
||||
return decodeZigZagInt64Value(values[0]);
|
||||
}
|
||||
return decodeZigZagConstRleInt64(values);
|
||||
}
|
||||
export function decodeUnsignedConstInt64Stream(data, offset, streamMetadata) {
|
||||
const values = decodeVarintInt64(data, offset, streamMetadata.numValues);
|
||||
if (values.length === 1) {
|
||||
return values[0];
|
||||
}
|
||||
return decodeUnsignedConstRleInt64(values);
|
||||
}
|
||||
/**
|
||||
* This method decodes integer streams.
|
||||
* Currently the encoder uses only fixed combinations of encodings.
|
||||
* For performance reasons it is also uses a fixed combination of the encodings on the decoding side.
|
||||
* The following encodings and combinations are used:
|
||||
* - Morton Delta -> always sorted so not ZigZag encoding needed
|
||||
* - Delta -> currently always in combination with ZigZag encoding
|
||||
* - Rle -> in combination with ZigZag encoding if data type is signed
|
||||
* - Delta Rle
|
||||
* - Componentwise Delta -> always ZigZag encoding is used
|
||||
*/
|
||||
function decodeSignedInt32(values, streamMetadata, scalingData, nullabilityBuffer) {
|
||||
let decodedValues;
|
||||
switch (streamMetadata.logicalLevelTechnique1) {
|
||||
case LogicalLevelTechnique.DELTA:
|
||||
if (streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.RLE) {
|
||||
const rleMetadata = streamMetadata;
|
||||
if (!nullabilityBuffer) {
|
||||
return decodeDeltaRleInt32(values, rleMetadata.runs, rleMetadata.numRleValues);
|
||||
}
|
||||
values = decodeUnsignedRleInt32(values, rleMetadata.runs, rleMetadata.numRleValues);
|
||||
decodedValues = decodeZigZagDeltaInt32(values);
|
||||
}
|
||||
else {
|
||||
decodedValues = decodeZigZagDeltaInt32(values);
|
||||
}
|
||||
break;
|
||||
case LogicalLevelTechnique.RLE:
|
||||
decodedValues = decodeZigZagRleInt32(values, streamMetadata.runs, streamMetadata.numRleValues);
|
||||
break;
|
||||
case LogicalLevelTechnique.MORTON:
|
||||
fastInverseDelta(values);
|
||||
decodedValues = new Int32Array(values);
|
||||
break;
|
||||
case LogicalLevelTechnique.COMPONENTWISE_DELTA:
|
||||
if (scalingData && !nullabilityBuffer) {
|
||||
return decodeComponentwiseDeltaVec2Scaled(values, scalingData.scale, scalingData.min, scalingData.max);
|
||||
}
|
||||
decodedValues = decodeComponentwiseDeltaVec2(values);
|
||||
break;
|
||||
case LogicalLevelTechnique.NONE:
|
||||
decodedValues = decodeZigZagInt32(values);
|
||||
break;
|
||||
default:
|
||||
throw new Error(`The specified Logical level technique is not supported: ${streamMetadata.logicalLevelTechnique1}`);
|
||||
}
|
||||
if (nullabilityBuffer) {
|
||||
return unpackNullable(decodedValues, nullabilityBuffer, 0);
|
||||
}
|
||||
return decodedValues;
|
||||
}
|
||||
function decodeUnsignedInt32(values, streamMetadata, scalingData, nullabilityBuffer) {
|
||||
let decodedValues;
|
||||
switch (streamMetadata.logicalLevelTechnique1) {
|
||||
case LogicalLevelTechnique.DELTA:
|
||||
if (streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.RLE) {
|
||||
const rleMetadata = streamMetadata;
|
||||
const deltaValues = decodeUnsignedRleInt32(values, rleMetadata.runs, rleMetadata.numRleValues);
|
||||
decodedValues = decodeUnsignedZigZagDeltaInt32(deltaValues);
|
||||
}
|
||||
else {
|
||||
decodedValues = decodeUnsignedZigZagDeltaInt32(values);
|
||||
}
|
||||
break;
|
||||
case LogicalLevelTechnique.RLE:
|
||||
decodedValues = decodeUnsignedRleInt32(values, streamMetadata.runs, streamMetadata.numRleValues);
|
||||
break;
|
||||
case LogicalLevelTechnique.MORTON:
|
||||
fastInverseDelta(values);
|
||||
decodedValues = values;
|
||||
break;
|
||||
case LogicalLevelTechnique.COMPONENTWISE_DELTA:
|
||||
if (scalingData && !nullabilityBuffer) {
|
||||
decodedValues = decodeUnsignedComponentwiseDeltaVec2Scaled(values, scalingData.scale, scalingData.min, scalingData.max);
|
||||
}
|
||||
else {
|
||||
decodedValues = decodeUnsignedComponentwiseDeltaVec2(values);
|
||||
}
|
||||
break;
|
||||
case LogicalLevelTechnique.NONE:
|
||||
decodedValues = values;
|
||||
break;
|
||||
default:
|
||||
throw new Error(`The specified Logical level technique is not supported: ${streamMetadata.logicalLevelTechnique1}`);
|
||||
}
|
||||
if (nullabilityBuffer) {
|
||||
return unpackNullable(decodedValues, nullabilityBuffer, 0);
|
||||
}
|
||||
return decodedValues;
|
||||
}
|
||||
function decodeSignedInt64(values, streamMetadata, nullabilityBuffer) {
|
||||
let decodedValues;
|
||||
switch (streamMetadata.logicalLevelTechnique1) {
|
||||
case LogicalLevelTechnique.DELTA:
|
||||
if (streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.RLE) {
|
||||
const rleMetadata = streamMetadata;
|
||||
if (!nullabilityBuffer) {
|
||||
return decodeDeltaRleInt64(values, rleMetadata.runs, rleMetadata.numRleValues);
|
||||
}
|
||||
values = decodeUnsignedRleInt64(values, rleMetadata.runs, rleMetadata.numRleValues);
|
||||
decodedValues = decodeZigZagDeltaInt64(values);
|
||||
}
|
||||
else {
|
||||
decodedValues = decodeZigZagDeltaInt64(values);
|
||||
}
|
||||
break;
|
||||
case LogicalLevelTechnique.RLE:
|
||||
decodedValues = decodeZigZagRleInt64(values, streamMetadata.runs, streamMetadata.numRleValues);
|
||||
break;
|
||||
case LogicalLevelTechnique.NONE:
|
||||
decodedValues = decodeZigZagInt64(values);
|
||||
break;
|
||||
default:
|
||||
throw new Error(`The specified Logical level technique is not supported: ${streamMetadata.logicalLevelTechnique1}`);
|
||||
}
|
||||
if (nullabilityBuffer) {
|
||||
return unpackNullable(decodedValues, nullabilityBuffer, 0n);
|
||||
}
|
||||
return decodedValues;
|
||||
}
|
||||
function decodeUnsignedInt64(values, streamMetadata, nullabilityBuffer) {
|
||||
let decodedValues;
|
||||
switch (streamMetadata.logicalLevelTechnique1) {
|
||||
case LogicalLevelTechnique.DELTA:
|
||||
if (streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.RLE) {
|
||||
const rleMetadata = streamMetadata;
|
||||
const deltaValues = decodeUnsignedRleInt64(values, rleMetadata.runs, rleMetadata.numRleValues);
|
||||
decodedValues = decodeUnsignedZigZagDeltaInt64(deltaValues);
|
||||
}
|
||||
else {
|
||||
decodedValues = decodeUnsignedZigZagDeltaInt64(values);
|
||||
}
|
||||
break;
|
||||
case LogicalLevelTechnique.RLE:
|
||||
decodedValues = decodeUnsignedRleInt64(values, streamMetadata.runs, streamMetadata.numRleValues);
|
||||
break;
|
||||
case LogicalLevelTechnique.NONE:
|
||||
decodedValues = values;
|
||||
break;
|
||||
default:
|
||||
throw new Error(`The specified Logical level technique is not supported: ${streamMetadata.logicalLevelTechnique1}`);
|
||||
}
|
||||
if (nullabilityBuffer) {
|
||||
return unpackNullable(decodedValues, nullabilityBuffer, 0n);
|
||||
}
|
||||
return decodedValues;
|
||||
}
|
||||
function decodeFloat64Values(values, streamMetadata, isSigned) {
|
||||
switch (streamMetadata.logicalLevelTechnique1) {
|
||||
case LogicalLevelTechnique.DELTA:
|
||||
if (streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.RLE) {
|
||||
const rleMetadata = streamMetadata;
|
||||
values = decodeUnsignedRleFloat64(values, rleMetadata.runs, rleMetadata.numRleValues);
|
||||
}
|
||||
decodeZigZagDeltaFloat64(values);
|
||||
return values;
|
||||
case LogicalLevelTechnique.RLE:
|
||||
return decodeRleFloat64(values, streamMetadata, isSigned);
|
||||
case LogicalLevelTechnique.NONE:
|
||||
if (isSigned) {
|
||||
decodeZigZagFloat64(values);
|
||||
}
|
||||
return values;
|
||||
default:
|
||||
throw new Error(`The specified Logical level technique is not supported: ${streamMetadata.logicalLevelTechnique1}`);
|
||||
}
|
||||
}
|
||||
function decodeLengthToOffsetBuffer(values, streamMetadata) {
|
||||
if (streamMetadata.logicalLevelTechnique1 === LogicalLevelTechnique.DELTA &&
|
||||
streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.NONE) {
|
||||
return decodeZigZagDeltaOfDeltaInt32(values);
|
||||
}
|
||||
if (streamMetadata.logicalLevelTechnique1 === LogicalLevelTechnique.RLE &&
|
||||
streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.NONE) {
|
||||
const rleMetadata = streamMetadata;
|
||||
return decodeRleDeltaInt32(values, rleMetadata.runs, rleMetadata.numRleValues);
|
||||
}
|
||||
if (streamMetadata.logicalLevelTechnique1 === LogicalLevelTechnique.NONE &&
|
||||
streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.NONE) {
|
||||
//TODO: use fastInverseDelta again and check what are the performance problems in zoom 14
|
||||
//fastInverseDelta(values);
|
||||
inverseDelta(values);
|
||||
const offsets = new Uint32Array(streamMetadata.numValues + 1);
|
||||
offsets[0] = 0;
|
||||
offsets.set(values, 1);
|
||||
return offsets;
|
||||
}
|
||||
if (streamMetadata.logicalLevelTechnique1 === LogicalLevelTechnique.DELTA &&
|
||||
streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.RLE) {
|
||||
const rleMetadata = streamMetadata;
|
||||
const decodedValues = decodeZigZagRleDeltaInt32(values, rleMetadata.runs, rleMetadata.numRleValues);
|
||||
fastInverseDelta(decodedValues);
|
||||
return new Uint32Array(decodedValues);
|
||||
}
|
||||
throw new Error("Only delta encoding is supported for transforming length to offset streams yet.");
|
||||
}
|
||||
export function getVectorType(streamMetadata, sizeOrNullabilityBuffer, data, offset, varintWidth = "int32") {
|
||||
const logicalLevelTechnique1 = streamMetadata.logicalLevelTechnique1;
|
||||
if (logicalLevelTechnique1 === LogicalLevelTechnique.RLE) {
|
||||
return streamMetadata.runs === 1 ? VectorType.CONST : VectorType.FLAT;
|
||||
}
|
||||
if (logicalLevelTechnique1 !== LogicalLevelTechnique.DELTA ||
|
||||
streamMetadata.logicalLevelTechnique2 !== LogicalLevelTechnique.RLE) {
|
||||
return streamMetadata.numValues === 1 ? VectorType.CONST : VectorType.FLAT;
|
||||
}
|
||||
const numFeatures = sizeOrNullabilityBuffer instanceof BitVector ? sizeOrNullabilityBuffer.size() : sizeOrNullabilityBuffer;
|
||||
const rleMetadata = streamMetadata;
|
||||
if (rleMetadata.numRleValues !== numFeatures) {
|
||||
return VectorType.FLAT;
|
||||
}
|
||||
// Single run is always a sequence
|
||||
if (rleMetadata.runs === 1) {
|
||||
return VectorType.SEQUENCE;
|
||||
}
|
||||
if (rleMetadata.runs !== 2) {
|
||||
return streamMetadata.numValues === 1 ? VectorType.CONST : VectorType.FLAT;
|
||||
}
|
||||
// Two runs can be a sequence if both deltas are equal to 1
|
||||
const savedOffset = offset.get();
|
||||
if (streamMetadata.physicalLevelTechnique === PhysicalLevelTechnique.VARINT) {
|
||||
if (isDeltaRleSequenceVarintWidth(data, offset, varintWidth)) {
|
||||
return VectorType.SEQUENCE;
|
||||
}
|
||||
return streamMetadata.numValues === 1 ? VectorType.CONST : VectorType.FLAT;
|
||||
}
|
||||
const byteOffset = offset.get();
|
||||
const values = new Int32Array(data.buffer, data.byteOffset + byteOffset, 4);
|
||||
offset.set(savedOffset);
|
||||
// Check if both deltas are encoded 1
|
||||
const zigZagOne = 2;
|
||||
if (values[2] === zigZagOne && values[3] === zigZagOne) {
|
||||
return VectorType.SEQUENCE;
|
||||
}
|
||||
return streamMetadata.numValues === 1 ? VectorType.CONST : VectorType.FLAT;
|
||||
}
|
||||
function isDeltaRleSequenceVarintWidth(data, offset, varintWidth) {
|
||||
const peekOffset = new IntWrapper(offset.get());
|
||||
if (varintWidth === "int64") {
|
||||
const values = decodeVarintInt64(data, peekOffset, 4);
|
||||
return values[2] === 2n && values[3] === 2n;
|
||||
}
|
||||
const values = decodeVarintInt32(data, peekOffset, 4);
|
||||
return values[2] === 2 && values[3] === 2;
|
||||
}
|
||||
function decodeRleFloat64(data, streamMetadata, isSigned) {
|
||||
return isSigned
|
||||
? decodeZigZagRleFloat64(data, streamMetadata.runs, streamMetadata.numRleValues)
|
||||
: decodeUnsignedRleFloat64(data, streamMetadata.runs, streamMetadata.numRleValues);
|
||||
}
|
||||
//# sourceMappingURL=integerStreamDecoder.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/decoding/integerStreamDecoder.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/decoding/integerStreamDecoder.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
4
node_modules/@maplibre/mlt/dist/decoding/propertyDecoder.d.ts
generated
vendored
Normal file
4
node_modules/@maplibre/mlt/dist/decoding/propertyDecoder.d.ts
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
import type IntWrapper from "./intWrapper";
|
||||
import { type Column } from "../metadata/tileset/tilesetMetadata";
|
||||
import type Vector from "../vector/vector";
|
||||
export declare function decodePropertyColumn(data: Uint8Array, offset: IntWrapper, columnMetadata: Column, numStreams: number, numFeatures: number, propertyColumnNames?: Set<string>): Vector | Vector[];
|
||||
132
node_modules/@maplibre/mlt/dist/decoding/propertyDecoder.js
generated
vendored
Normal file
132
node_modules/@maplibre/mlt/dist/decoding/propertyDecoder.js
generated
vendored
Normal file
@@ -0,0 +1,132 @@
|
||||
import { ScalarType } from "../metadata/tileset/tilesetMetadata";
|
||||
import BitVector from "../vector/flat/bitVector";
|
||||
import { decodeStreamMetadata } from "../metadata/tile/streamMetadataDecoder";
|
||||
import { VectorType } from "../vector/vectorType";
|
||||
import { BooleanFlatVector } from "../vector/flat/booleanFlatVector";
|
||||
import { DoubleFlatVector } from "../vector/flat/doubleFlatVector";
|
||||
import { FloatFlatVector } from "../vector/flat/floatFlatVector";
|
||||
import { Int64ConstVector } from "../vector/constant/int64ConstVector";
|
||||
import { Int64FlatVector } from "../vector/flat/int64FlatVector";
|
||||
import { Int32FlatVector } from "../vector/flat/int32FlatVector";
|
||||
import { Int32ConstVector } from "../vector/constant/int32ConstVector";
|
||||
import { decodeBooleanRle, decodeDoublesLE, decodeFloatsLE, skipColumn } from "./decodingUtils";
|
||||
import { decodeSignedConstInt32Stream, decodeSignedConstInt64Stream, decodeSignedInt32Stream, decodeSignedInt64Stream, decodeUnsignedInt32Stream, decodeUnsignedConstInt32Stream, decodeUnsignedConstInt64Stream, decodeUnsignedInt64Stream, decodeSequenceInt32Stream, decodeSequenceInt64Stream, getVectorType, } from "./integerStreamDecoder";
|
||||
import { Int32SequenceVector } from "../vector/sequence/int32SequenceVector";
|
||||
import { Int64SequenceVector } from "../vector/sequence/int64SequenceVector";
|
||||
import { decodeSharedDictionary, decodeString } from "./stringDecoder";
|
||||
export function decodePropertyColumn(data, offset, columnMetadata, numStreams, numFeatures, propertyColumnNames) {
|
||||
if (columnMetadata.type === "scalarType") {
|
||||
if (propertyColumnNames && !propertyColumnNames.has(columnMetadata.name)) {
|
||||
skipColumn(numStreams, data, offset);
|
||||
return null;
|
||||
}
|
||||
return decodeScalarPropertyColumn(numStreams, data, offset, numFeatures, columnMetadata.scalarType, columnMetadata);
|
||||
}
|
||||
if (numStreams === 0) {
|
||||
return null;
|
||||
}
|
||||
return decodeSharedDictionary(data, offset, columnMetadata, numFeatures, propertyColumnNames);
|
||||
}
|
||||
function decodeScalarPropertyColumn(numStreams, data, offset, numFeatures, column, columnMetadata) {
|
||||
let nullabilityBuffer = null;
|
||||
if (numStreams === 0) {
|
||||
return null;
|
||||
}
|
||||
if (columnMetadata.nullable) {
|
||||
const presentStreamMetadata = decodeStreamMetadata(data, offset);
|
||||
const numValues = presentStreamMetadata.numValues;
|
||||
const streamDataStart = offset.get();
|
||||
const presentVector = decodeBooleanRle(data, numValues, presentStreamMetadata.byteLength, offset);
|
||||
offset.set(streamDataStart + presentStreamMetadata.byteLength);
|
||||
nullabilityBuffer = new BitVector(presentVector, presentStreamMetadata.numValues);
|
||||
}
|
||||
const sizeOrNullabilityBuffer = nullabilityBuffer ?? numFeatures;
|
||||
const scalarType = column.physicalType;
|
||||
switch (scalarType) {
|
||||
case ScalarType.UINT_32:
|
||||
case ScalarType.INT_32:
|
||||
return decodeInt32Column(data, offset, columnMetadata, column, sizeOrNullabilityBuffer);
|
||||
case ScalarType.STRING: {
|
||||
// In embedded format: numStreams includes nullability stream if column is nullable
|
||||
const stringDataStreams = columnMetadata.nullable ? numStreams - 1 : numStreams;
|
||||
return decodeString(columnMetadata.name, data, offset, stringDataStreams, nullabilityBuffer);
|
||||
}
|
||||
case ScalarType.BOOLEAN:
|
||||
return decodeBooleanColumn(data, offset, columnMetadata, numFeatures, sizeOrNullabilityBuffer);
|
||||
case ScalarType.UINT_64:
|
||||
case ScalarType.INT_64:
|
||||
return decodeInt64Column(data, offset, columnMetadata, sizeOrNullabilityBuffer, column);
|
||||
case ScalarType.FLOAT:
|
||||
return decodeFloatColumn(data, offset, columnMetadata, sizeOrNullabilityBuffer);
|
||||
case ScalarType.DOUBLE:
|
||||
return decodeDoubleColumn(data, offset, columnMetadata, sizeOrNullabilityBuffer);
|
||||
default:
|
||||
throw new Error(`The specified data type for the field is currently not supported: ${column}`);
|
||||
}
|
||||
}
|
||||
function decodeBooleanColumn(data, offset, column, _numFeatures, sizeOrNullabilityBuffer) {
|
||||
const dataStreamMetadata = decodeStreamMetadata(data, offset);
|
||||
const numValues = dataStreamMetadata.numValues;
|
||||
const streamDataStart = offset.get();
|
||||
const nullabilityBuffer = isNullabilityBuffer(sizeOrNullabilityBuffer) ? sizeOrNullabilityBuffer : undefined;
|
||||
const dataStream = decodeBooleanRle(data, numValues, dataStreamMetadata.byteLength, offset, nullabilityBuffer);
|
||||
offset.set(streamDataStart + dataStreamMetadata.byteLength);
|
||||
const dataVector = new BitVector(dataStream, numValues);
|
||||
return new BooleanFlatVector(column.name, dataVector, sizeOrNullabilityBuffer);
|
||||
}
|
||||
function decodeFloatColumn(data, offset, column, sizeOrNullabilityBuffer) {
|
||||
const dataStreamMetadata = decodeStreamMetadata(data, offset);
|
||||
const nullabilityBuffer = isNullabilityBuffer(sizeOrNullabilityBuffer) ? sizeOrNullabilityBuffer : undefined;
|
||||
const dataStream = decodeFloatsLE(data, offset, dataStreamMetadata.numValues, nullabilityBuffer);
|
||||
return new FloatFlatVector(column.name, dataStream, sizeOrNullabilityBuffer);
|
||||
}
|
||||
function decodeDoubleColumn(data, offset, column, sizeOrNullabilityBuffer) {
|
||||
const dataStreamMetadata = decodeStreamMetadata(data, offset);
|
||||
const nullabilityBuffer = isNullabilityBuffer(sizeOrNullabilityBuffer) ? sizeOrNullabilityBuffer : undefined;
|
||||
const dataStream = decodeDoublesLE(data, offset, dataStreamMetadata.numValues, nullabilityBuffer);
|
||||
return new DoubleFlatVector(column.name, dataStream, sizeOrNullabilityBuffer);
|
||||
}
|
||||
function decodeInt64Column(data, offset, column, sizeOrNullabilityBuffer, scalarColumn) {
|
||||
const dataStreamMetadata = decodeStreamMetadata(data, offset);
|
||||
const vectorType = getVectorType(dataStreamMetadata, sizeOrNullabilityBuffer, data, offset, "int64");
|
||||
const isSigned = scalarColumn.physicalType === ScalarType.INT_64;
|
||||
if (vectorType === VectorType.FLAT) {
|
||||
const nullabilityBuffer = isNullabilityBuffer(sizeOrNullabilityBuffer) ? sizeOrNullabilityBuffer : undefined;
|
||||
const dataStream = isSigned
|
||||
? decodeSignedInt64Stream(data, offset, dataStreamMetadata, nullabilityBuffer)
|
||||
: decodeUnsignedInt64Stream(data, offset, dataStreamMetadata, nullabilityBuffer);
|
||||
return new Int64FlatVector(column.name, dataStream, sizeOrNullabilityBuffer);
|
||||
}
|
||||
if (vectorType === VectorType.SEQUENCE) {
|
||||
const id = decodeSequenceInt64Stream(data, offset, dataStreamMetadata);
|
||||
return new Int64SequenceVector(column.name, id[0], id[1], dataStreamMetadata.numRleValues);
|
||||
}
|
||||
const constValue = isSigned
|
||||
? decodeSignedConstInt64Stream(data, offset, dataStreamMetadata)
|
||||
: decodeUnsignedConstInt64Stream(data, offset, dataStreamMetadata);
|
||||
return new Int64ConstVector(column.name, constValue, sizeOrNullabilityBuffer, isSigned);
|
||||
}
|
||||
function decodeInt32Column(data, offset, column, scalarColumn, sizeOrNullabilityBuffer) {
|
||||
const dataStreamMetadata = decodeStreamMetadata(data, offset);
|
||||
const vectorType = getVectorType(dataStreamMetadata, sizeOrNullabilityBuffer, data, offset);
|
||||
const isSigned = scalarColumn.physicalType === ScalarType.INT_32;
|
||||
if (vectorType === VectorType.FLAT) {
|
||||
const nullabilityBuffer = isNullabilityBuffer(sizeOrNullabilityBuffer) ? sizeOrNullabilityBuffer : undefined;
|
||||
const dataStream = isSigned
|
||||
? decodeSignedInt32Stream(data, offset, dataStreamMetadata, undefined, nullabilityBuffer)
|
||||
: decodeUnsignedInt32Stream(data, offset, dataStreamMetadata, undefined, nullabilityBuffer);
|
||||
return new Int32FlatVector(column.name, dataStream, sizeOrNullabilityBuffer);
|
||||
}
|
||||
if (vectorType === VectorType.SEQUENCE) {
|
||||
const id = decodeSequenceInt32Stream(data, offset, dataStreamMetadata);
|
||||
return new Int32SequenceVector(column.name, id[0], id[1], dataStreamMetadata.numRleValues);
|
||||
}
|
||||
const constValue = isSigned
|
||||
? decodeSignedConstInt32Stream(data, offset, dataStreamMetadata)
|
||||
: decodeUnsignedConstInt32Stream(data, offset, dataStreamMetadata);
|
||||
return new Int32ConstVector(column.name, constValue, sizeOrNullabilityBuffer, isSigned);
|
||||
}
|
||||
function isNullabilityBuffer(sizeOrNullabilityBuffer) {
|
||||
return sizeOrNullabilityBuffer instanceof BitVector;
|
||||
}
|
||||
//# sourceMappingURL=propertyDecoder.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/decoding/propertyDecoder.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/decoding/propertyDecoder.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
6
node_modules/@maplibre/mlt/dist/decoding/stringDecoder.d.ts
generated
vendored
Normal file
6
node_modules/@maplibre/mlt/dist/decoding/stringDecoder.d.ts
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
import type IntWrapper from "./intWrapper";
|
||||
import BitVector from "../vector/flat/bitVector";
|
||||
import type Vector from "../vector/vector";
|
||||
import { type Column } from "../metadata/tileset/tilesetMetadata";
|
||||
export declare function decodeString(name: string, data: Uint8Array, offset: IntWrapper, numStreams: number, bitVector?: BitVector): Vector;
|
||||
export declare function decodeSharedDictionary(data: Uint8Array, offset: IntWrapper, column: Column, numFeatures: number, propertyColumnNames?: Set<string>): Vector[];
|
||||
174
node_modules/@maplibre/mlt/dist/decoding/stringDecoder.js
generated
vendored
Normal file
174
node_modules/@maplibre/mlt/dist/decoding/stringDecoder.js
generated
vendored
Normal file
@@ -0,0 +1,174 @@
|
||||
import { decodeStreamMetadata } from "../metadata/tile/streamMetadataDecoder";
|
||||
import { StringFlatVector } from "../vector/flat/stringFlatVector";
|
||||
import { StringDictionaryVector } from "../vector/dictionary/stringDictionaryVector";
|
||||
import BitVector from "../vector/flat/bitVector";
|
||||
import { PhysicalStreamType } from "../metadata/tile/physicalStreamType";
|
||||
import { DictionaryType } from "../metadata/tile/dictionaryType";
|
||||
import { LengthType } from "../metadata/tile/lengthType";
|
||||
import { decodeUnsignedInt32Stream, decodeLengthStreamToOffsetBuffer } from "./integerStreamDecoder";
|
||||
import { ScalarType } from "../metadata/tileset/tilesetMetadata";
|
||||
import { decodeVarintInt32 } from "./integerDecodingUtils";
|
||||
import { decodeBooleanRle, skipColumn } from "./decodingUtils";
|
||||
import { StringFsstDictionaryVector } from "../vector/fsst-dictionary/stringFsstDictionaryVector";
|
||||
export function decodeString(name, data, offset, numStreams, bitVector) {
|
||||
let dictionaryLengthStream = null;
|
||||
let offsetStream = null;
|
||||
let dictionaryStream = null;
|
||||
let symbolLengthStream = null;
|
||||
let symbolTableStream = null;
|
||||
let nullabilityBuffer = bitVector ?? null;
|
||||
let plainLengthStream = null;
|
||||
let plainDataStream = null;
|
||||
for (let i = 0; i < numStreams; i++) {
|
||||
const streamMetadata = decodeStreamMetadata(data, offset);
|
||||
switch (streamMetadata.physicalStreamType) {
|
||||
case PhysicalStreamType.PRESENT: {
|
||||
const presentData = decodeBooleanRle(data, streamMetadata.numValues, streamMetadata.byteLength, offset);
|
||||
const presentStream = new BitVector(presentData, streamMetadata.numValues);
|
||||
nullabilityBuffer = bitVector ?? presentStream;
|
||||
break;
|
||||
}
|
||||
case PhysicalStreamType.OFFSET: {
|
||||
offsetStream = decodeUnsignedInt32Stream(data, offset, streamMetadata, undefined, nullabilityBuffer);
|
||||
break;
|
||||
}
|
||||
case PhysicalStreamType.LENGTH: {
|
||||
const lengthStream = decodeLengthStreamToOffsetBuffer(data, offset, streamMetadata);
|
||||
if (LengthType.DICTIONARY === streamMetadata.logicalStreamType.lengthType) {
|
||||
dictionaryLengthStream = lengthStream;
|
||||
}
|
||||
else if (LengthType.SYMBOL === streamMetadata.logicalStreamType.lengthType) {
|
||||
symbolLengthStream = lengthStream;
|
||||
}
|
||||
else {
|
||||
// Plain string encoding uses VAR_BINARY length type
|
||||
plainLengthStream = lengthStream;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case PhysicalStreamType.DATA: {
|
||||
const dataStream = data.subarray(offset.get(), offset.get() + streamMetadata.byteLength);
|
||||
offset.add(streamMetadata.byteLength);
|
||||
const dictType = streamMetadata.logicalStreamType.dictionaryType;
|
||||
if (DictionaryType.FSST === dictType) {
|
||||
symbolTableStream = dataStream;
|
||||
}
|
||||
else if (DictionaryType.SINGLE === dictType || DictionaryType.SHARED === dictType) {
|
||||
dictionaryStream = dataStream;
|
||||
}
|
||||
else if (DictionaryType.NONE === dictType) {
|
||||
plainDataStream = dataStream;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return (decodeFsstDictionaryVector(name, symbolTableStream, offsetStream, dictionaryLengthStream, dictionaryStream, symbolLengthStream, nullabilityBuffer) ??
|
||||
decodeDictionaryVector(name, dictionaryStream, offsetStream, dictionaryLengthStream, nullabilityBuffer) ??
|
||||
decodePlainStringVector(name, plainLengthStream, plainDataStream, offsetStream, nullabilityBuffer));
|
||||
}
|
||||
function decodeFsstDictionaryVector(name, symbolTableStream, offsetStream, dictionaryLengthStream, dictionaryStream, symbolLengthStream, nullabilityBuffer) {
|
||||
if (!symbolTableStream) {
|
||||
return null;
|
||||
}
|
||||
return new StringFsstDictionaryVector(name, offsetStream, dictionaryLengthStream, dictionaryStream, symbolLengthStream, symbolTableStream, nullabilityBuffer);
|
||||
}
|
||||
function decodeDictionaryVector(name, dictionaryStream, offsetStream, dictionaryLengthStream, nullabilityBuffer) {
|
||||
if (!dictionaryStream) {
|
||||
return null;
|
||||
}
|
||||
return nullabilityBuffer
|
||||
? new StringDictionaryVector(name, offsetStream, dictionaryLengthStream, dictionaryStream, nullabilityBuffer)
|
||||
: new StringDictionaryVector(name, offsetStream, dictionaryLengthStream, dictionaryStream);
|
||||
}
|
||||
function decodePlainStringVector(name, plainLengthStream, plainDataStream, offsetStream, nullabilityBuffer) {
|
||||
if (!plainLengthStream || !plainDataStream) {
|
||||
return null;
|
||||
}
|
||||
if (offsetStream) {
|
||||
return nullabilityBuffer
|
||||
? new StringDictionaryVector(name, offsetStream, plainLengthStream, plainDataStream, nullabilityBuffer)
|
||||
: new StringDictionaryVector(name, offsetStream, plainLengthStream, plainDataStream);
|
||||
}
|
||||
if (nullabilityBuffer && nullabilityBuffer.size() !== plainLengthStream.length - 1) {
|
||||
const sparseOffsetStream = new Uint32Array(nullabilityBuffer.size());
|
||||
let valueIndex = 0;
|
||||
for (let i = 0; i < nullabilityBuffer.size(); i++) {
|
||||
if (nullabilityBuffer.get(i)) {
|
||||
sparseOffsetStream[i] = valueIndex++;
|
||||
}
|
||||
else {
|
||||
sparseOffsetStream[i] = 0;
|
||||
}
|
||||
}
|
||||
return new StringDictionaryVector(name, sparseOffsetStream, plainLengthStream, plainDataStream, nullabilityBuffer);
|
||||
}
|
||||
return nullabilityBuffer
|
||||
? new StringFlatVector(name, plainLengthStream, plainDataStream, nullabilityBuffer)
|
||||
: new StringFlatVector(name, plainLengthStream, plainDataStream);
|
||||
}
|
||||
export function decodeSharedDictionary(data, offset, column, numFeatures, propertyColumnNames) {
|
||||
let dictionaryOffsetBuffer = null;
|
||||
let dictionaryBuffer = null;
|
||||
let symbolOffsetBuffer = null;
|
||||
let symbolTableBuffer = null;
|
||||
let dictionaryStreamDecoded = false;
|
||||
while (!dictionaryStreamDecoded) {
|
||||
const streamMetadata = decodeStreamMetadata(data, offset);
|
||||
switch (streamMetadata.physicalStreamType) {
|
||||
case PhysicalStreamType.LENGTH:
|
||||
if (LengthType.DICTIONARY === streamMetadata.logicalStreamType.lengthType) {
|
||||
dictionaryOffsetBuffer = decodeLengthStreamToOffsetBuffer(data, offset, streamMetadata);
|
||||
}
|
||||
else {
|
||||
symbolOffsetBuffer = decodeLengthStreamToOffsetBuffer(data, offset, streamMetadata);
|
||||
}
|
||||
break;
|
||||
case PhysicalStreamType.DATA:
|
||||
if (DictionaryType.SINGLE === streamMetadata.logicalStreamType.dictionaryType ||
|
||||
DictionaryType.SHARED === streamMetadata.logicalStreamType.dictionaryType) {
|
||||
dictionaryBuffer = data.subarray(offset.get(), offset.get() + streamMetadata.byteLength);
|
||||
dictionaryStreamDecoded = true;
|
||||
}
|
||||
else {
|
||||
symbolTableBuffer = data.subarray(offset.get(), offset.get() + streamMetadata.byteLength);
|
||||
}
|
||||
offset.add(streamMetadata.byteLength);
|
||||
break;
|
||||
}
|
||||
}
|
||||
const childFields = column.complexType.children;
|
||||
const stringDictionaryVectors = [];
|
||||
let i = 0;
|
||||
for (const childField of childFields) {
|
||||
const numStreams = decodeVarintInt32(data, offset, 1)[0];
|
||||
if (numStreams === 0) {
|
||||
/* Column is not present in the tile */
|
||||
continue;
|
||||
}
|
||||
const columnName = childField.name ? `${column.name}${childField.name}` : column.name;
|
||||
if (propertyColumnNames) {
|
||||
if (!propertyColumnNames.has(columnName)) {
|
||||
//TODO: add size of sub column to Mlt for faster skipping
|
||||
skipColumn(numStreams, data, offset);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (numStreams !== 2 ||
|
||||
childField.type !== "scalarField" ||
|
||||
childField.scalarField.physicalType !== ScalarType.STRING) {
|
||||
throw new Error("Currently only optional string fields are implemented for a struct.");
|
||||
}
|
||||
const presentStreamMetadata = decodeStreamMetadata(data, offset);
|
||||
const presentStream = decodeBooleanRle(data, presentStreamMetadata.numValues, presentStreamMetadata.byteLength, offset);
|
||||
const offsetStreamMetadata = decodeStreamMetadata(data, offset);
|
||||
const offsetCount = offsetStreamMetadata.decompressedCount;
|
||||
const isNullable = offsetCount !== numFeatures;
|
||||
const offsetStream = decodeUnsignedInt32Stream(data, offset, offsetStreamMetadata, undefined, isNullable ? new BitVector(presentStream, presentStreamMetadata.numValues) : undefined);
|
||||
stringDictionaryVectors[i++] = symbolTableBuffer
|
||||
? new StringFsstDictionaryVector(columnName, offsetStream, dictionaryOffsetBuffer, dictionaryBuffer, symbolOffsetBuffer, symbolTableBuffer, new BitVector(presentStream, presentStreamMetadata.numValues))
|
||||
: new StringDictionaryVector(columnName, offsetStream, dictionaryOffsetBuffer, dictionaryBuffer, new BitVector(presentStream, presentStreamMetadata.numValues));
|
||||
}
|
||||
return stringDictionaryVectors;
|
||||
}
|
||||
//# sourceMappingURL=stringDecoder.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/decoding/stringDecoder.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/decoding/stringDecoder.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
25
node_modules/@maplibre/mlt/dist/decoding/unpackNullableUtils.d.ts
generated
vendored
Normal file
25
node_modules/@maplibre/mlt/dist/decoding/unpackNullableUtils.d.ts
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
import BitVector from "../vector/flat/bitVector.js";
|
||||
/**
|
||||
* Type constraint for TypedArray types that can be unpacked
|
||||
*/
|
||||
export type TypedArrayConstructor = Int32ArrayConstructor | Uint32ArrayConstructor | BigInt64ArrayConstructor | BigUint64ArrayConstructor | Float32ArrayConstructor | Float64ArrayConstructor;
|
||||
export type TypedArrayInstance = Int32Array | Uint32Array | BigInt64Array | BigUint64Array | Float32Array | Float64Array;
|
||||
/**
|
||||
* Generic unpacking function.
|
||||
* Reconstructs the full array by inserting default values at null positions.
|
||||
*
|
||||
* @param dataStream The compact data stream containing only non-null values
|
||||
* @param presentBits BitVector indicating which positions have values (null if non-nullable)
|
||||
* @param defaultValue The default value to insert at null positions (0, 0n, etc.)
|
||||
* @returns Full array with default values at null positions
|
||||
*/
|
||||
export declare function unpackNullable<T extends TypedArrayInstance>(dataStream: T, presentBits: BitVector | null, defaultValue: number | bigint): T;
|
||||
/**
|
||||
* Special case for boolean columns because BitVector is not directly compatible with TypedArray.
|
||||
*
|
||||
* @param dataStream The compact BitVector data containing only non-null boolean values
|
||||
* @param dataStreamSize The number of actual values in dataStream
|
||||
* @param presentBits BitVector indicating which positions have values (null if non-nullable)
|
||||
* @returns Uint8Array buffer for BitVector with false at null positions
|
||||
*/
|
||||
export declare function unpackNullableBoolean(dataStream: Uint8Array, dataStreamSize: number, presentBits: BitVector | null): Uint8Array;
|
||||
51
node_modules/@maplibre/mlt/dist/decoding/unpackNullableUtils.js
generated
vendored
Normal file
51
node_modules/@maplibre/mlt/dist/decoding/unpackNullableUtils.js
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
import BitVector from "../vector/flat/bitVector.js";
|
||||
/**
|
||||
* Generic unpacking function.
|
||||
* Reconstructs the full array by inserting default values at null positions.
|
||||
*
|
||||
* @param dataStream The compact data stream containing only non-null values
|
||||
* @param presentBits BitVector indicating which positions have values (null if non-nullable)
|
||||
* @param defaultValue The default value to insert at null positions (0, 0n, etc.)
|
||||
* @returns Full array with default values at null positions
|
||||
*/
|
||||
export function unpackNullable(dataStream, presentBits, defaultValue) {
|
||||
// Non-nullable case: return data stream as-is
|
||||
if (!presentBits) {
|
||||
return dataStream;
|
||||
}
|
||||
const size = presentBits.size();
|
||||
// Create new array of same type with full size
|
||||
const constructor = dataStream.constructor;
|
||||
const result = new constructor(size);
|
||||
let counter = 0;
|
||||
for (let i = 0; i < size; i++) {
|
||||
// If position has a value, take from data stream; otherwise use default
|
||||
result[i] = presentBits.get(i) ? dataStream[counter++] : defaultValue;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
/**
|
||||
* Special case for boolean columns because BitVector is not directly compatible with TypedArray.
|
||||
*
|
||||
* @param dataStream The compact BitVector data containing only non-null boolean values
|
||||
* @param dataStreamSize The number of actual values in dataStream
|
||||
* @param presentBits BitVector indicating which positions have values (null if non-nullable)
|
||||
* @returns Uint8Array buffer for BitVector with false at null positions
|
||||
*/
|
||||
export function unpackNullableBoolean(dataStream, dataStreamSize, presentBits) {
|
||||
// Non-nullable case
|
||||
if (!presentBits) {
|
||||
return dataStream;
|
||||
}
|
||||
const numFeatures = presentBits.size();
|
||||
const bitVector = new BitVector(dataStream, dataStreamSize);
|
||||
const result = new BitVector(new Uint8Array(Math.ceil(numFeatures / 8)), numFeatures);
|
||||
let counter = 0;
|
||||
for (let i = 0; i < numFeatures; i++) {
|
||||
// If position has a value, take from data stream; otherwise use false
|
||||
const value = presentBits.get(i) ? bitVector.get(counter++) : false;
|
||||
result.set(i, value);
|
||||
}
|
||||
return result.getBuffer();
|
||||
}
|
||||
//# sourceMappingURL=unpackNullableUtils.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/decoding/unpackNullableUtils.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/decoding/unpackNullableUtils.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"unpackNullableUtils.js","sourceRoot":"","sources":["../../src/decoding/unpackNullableUtils.ts"],"names":[],"mappings":"AAAA,OAAO,SAAS,MAAM,6BAA6B,CAAC;AAqBpD;;;;;;;;GAQG;AACH,MAAM,UAAU,cAAc,CAC1B,UAAa,EACb,WAA6B,EAC7B,YAA6B;IAE7B,8CAA8C;IAC9C,IAAI,CAAC,WAAW,EAAE,CAAC;QACf,OAAO,UAAU,CAAC;IACtB,CAAC;IAED,MAAM,IAAI,GAAG,WAAW,CAAC,IAAI,EAAE,CAAC;IAChC,+CAA+C;IAC/C,MAAM,WAAW,GAAG,UAAU,CAAC,WAAoC,CAAC;IACpE,MAAM,MAAM,GAAG,IAAI,WAAW,CAAC,IAAI,CAAM,CAAC;IAE1C,IAAI,OAAO,GAAG,CAAC,CAAC;IAChB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,EAAE,CAAC,EAAE,EAAE,CAAC;QAC5B,wEAAwE;QACxE,MAAM,CAAC,CAAC,CAAC,GAAG,WAAW,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,CAAE,YAAoB,CAAC;IACnF,CAAC;IAED,OAAO,MAAM,CAAC;AAClB,CAAC;AAED;;;;;;;GAOG;AACH,MAAM,UAAU,qBAAqB,CACjC,UAAsB,EACtB,cAAsB,EACtB,WAA6B;IAE7B,oBAAoB;IACpB,IAAI,CAAC,WAAW,EAAE,CAAC;QACf,OAAO,UAAU,CAAC;IACtB,CAAC;IAED,MAAM,WAAW,GAAG,WAAW,CAAC,IAAI,EAAE,CAAC;IACvC,MAAM,SAAS,GAAG,IAAI,SAAS,CAAC,UAAU,EAAE,cAAc,CAAC,CAAC;IAC5D,MAAM,MAAM,GAAG,IAAI,SAAS,CAAC,IAAI,UAAU,CAAC,IAAI,CAAC,IAAI,CAAC,WAAW,GAAG,CAAC,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC;IAEtF,IAAI,OAAO,GAAG,CAAC,CAAC;IAChB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,WAAW,EAAE,CAAC,EAAE,EAAE,CAAC;QACnC,sEAAsE;QACtE,MAAM,KAAK,GAAG,WAAW,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,GAAG,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;QACpE,MAAM,CAAC,GAAG,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC;IACzB,CAAC;IAED,OAAO,MAAM,CAAC,SAAS,EAAE,CAAC;AAC9B,CAAC","sourcesContent":["import BitVector from \"../vector/flat/bitVector.js\";\n\n/**\n * Type constraint for TypedArray types that can be unpacked\n */\nexport type TypedArrayConstructor =\n | Int32ArrayConstructor\n | Uint32ArrayConstructor\n | BigInt64ArrayConstructor\n | BigUint64ArrayConstructor\n | Float32ArrayConstructor\n | Float64ArrayConstructor;\n\nexport type TypedArrayInstance =\n | Int32Array\n | Uint32Array\n | BigInt64Array\n | BigUint64Array\n | Float32Array\n | Float64Array;\n\n/**\n * Generic unpacking function.\n * Reconstructs the full array by inserting default values at null positions.\n *\n * @param dataStream The compact data stream containing only non-null values\n * @param presentBits BitVector indicating which positions have values (null if non-nullable)\n * @param defaultValue The default value to insert at null positions (0, 0n, etc.)\n * @returns Full array with default values at null positions\n */\nexport function unpackNullable<T extends TypedArrayInstance>(\n dataStream: T,\n presentBits: BitVector | null,\n defaultValue: number | bigint,\n): T {\n // Non-nullable case: return data stream as-is\n if (!presentBits) {\n return dataStream;\n }\n\n const size = presentBits.size();\n // Create new array of same type with full size\n const constructor = dataStream.constructor as TypedArrayConstructor;\n const result = new constructor(size) as T;\n\n let counter = 0;\n for (let i = 0; i < size; i++) {\n // If position has a value, take from data stream; otherwise use default\n result[i] = presentBits.get(i) ? dataStream[counter++] : (defaultValue as any);\n }\n\n return result;\n}\n\n/**\n * Special case for boolean columns because BitVector is not directly compatible with TypedArray.\n *\n * @param dataStream The compact BitVector data containing only non-null boolean values\n * @param dataStreamSize The number of actual values in dataStream\n * @param presentBits BitVector indicating which positions have values (null if non-nullable)\n * @returns Uint8Array buffer for BitVector with false at null positions\n */\nexport function unpackNullableBoolean(\n dataStream: Uint8Array,\n dataStreamSize: number,\n presentBits: BitVector | null,\n): Uint8Array {\n // Non-nullable case\n if (!presentBits) {\n return dataStream;\n }\n\n const numFeatures = presentBits.size();\n const bitVector = new BitVector(dataStream, dataStreamSize);\n const result = new BitVector(new Uint8Array(Math.ceil(numFeatures / 8)), numFeatures);\n\n let counter = 0;\n for (let i = 0; i < numFeatures; i++) {\n // If position has a value, take from data stream; otherwise use false\n const value = presentBits.get(i) ? bitVector.get(counter++) : false;\n result.set(i, value);\n }\n\n return result.getBuffer();\n}\n"]}
|
||||
Reference in New Issue
Block a user