Initial commit

This commit is contained in:
2026-04-15 17:08:39 +02:00
parent ae164c47a8
commit 47fd1c2b7a
1819 changed files with 685388 additions and 0 deletions

View File

@@ -0,0 +1,13 @@
/**
* Decodes big-endian bytes into `out` without allocating the output buffer.
*
* This function does not copy `bytes`; it writes decoded words into the provided `out` array.
* For aligned inputs it may create a temporary typed-array view (`Uint32Array`) over `bytes.buffer`
* to speed up decoding.
*
* If `byteLength` is not a multiple of 4, the final word is padded with zeros.
*
* @returns Number of int32 words written.
* @throws RangeError If `(offset, byteLength)` is out of bounds, or if `out` is too small.
*/
export declare function decodeBigEndianInt32sInto(bytes: Uint8Array, offset: number, byteLength: number, out: Uint32Array): number;

View File

@@ -0,0 +1,50 @@
import { bswap32 } from "./fastPforShared";
/**
* Decodes big-endian bytes into `out` without allocating the output buffer.
*
* This function does not copy `bytes`; it writes decoded words into the provided `out` array.
* For aligned inputs it may create a temporary typed-array view (`Uint32Array`) over `bytes.buffer`
* to speed up decoding.
*
* If `byteLength` is not a multiple of 4, the final word is padded with zeros.
*
* @returns Number of int32 words written.
* @throws RangeError If `(offset, byteLength)` is out of bounds, or if `out` is too small.
*/
export function decodeBigEndianInt32sInto(bytes, offset, byteLength, out) {
if (offset < 0 || byteLength < 0 || offset + byteLength > bytes.length) {
throw new RangeError(`decodeBigEndianInt32sInto: out of bounds (offset=${offset}, byteLength=${byteLength}, bytes.length=${bytes.length})`);
}
const numCompleteInts = Math.floor(byteLength / 4);
const hasTrailingBytes = byteLength % 4 !== 0;
const numInts = hasTrailingBytes ? numCompleteInts + 1 : numCompleteInts;
if (out.length < numInts) {
throw new RangeError(`decodeBigEndianInt32sInto: out.length=${out.length} < ${numInts}`);
}
if (numCompleteInts > 0) {
const absoluteOffset = bytes.byteOffset + offset;
if ((absoluteOffset & 3) === 0) {
const u32 = new Uint32Array(bytes.buffer, absoluteOffset, numCompleteInts);
for (let i = 0; i < numCompleteInts; i++) {
out[i] = bswap32(u32[i]) | 0;
}
}
else {
for (let i = 0; i < numCompleteInts; i++) {
const base = offset + i * 4;
out[i] = (bytes[base] << 24) | (bytes[base + 1] << 16) | (bytes[base + 2] << 8) | bytes[base + 3] | 0;
}
}
}
if (hasTrailingBytes) {
const base = offset + numCompleteInts * 4;
const remaining = byteLength - numCompleteInts * 4;
let v = 0;
for (let i = 0; i < remaining; i++) {
v |= bytes[base + i] << (24 - i * 8);
}
out[numCompleteInts] = v | 0;
}
return numInts;
}
//# sourceMappingURL=bigEndianDecode.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"bigEndianDecode.js","sourceRoot":"","sources":["../../src/decoding/bigEndianDecode.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,OAAO,EAAE,MAAM,kBAAkB,CAAC;AAE3C;;;;;;;;;;;GAWG;AACH,MAAM,UAAU,yBAAyB,CACrC,KAAiB,EACjB,MAAc,EACd,UAAkB,EAClB,GAAgB;IAEhB,IAAI,MAAM,GAAG,CAAC,IAAI,UAAU,GAAG,CAAC,IAAI,MAAM,GAAG,UAAU,GAAG,KAAK,CAAC,MAAM,EAAE,CAAC;QACrE,MAAM,IAAI,UAAU,CAChB,oDAAoD,MAAM,gBAAgB,UAAU,kBAAkB,KAAK,CAAC,MAAM,GAAG,CACxH,CAAC;IACN,CAAC;IAED,MAAM,eAAe,GAAG,IAAI,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC;IACnD,MAAM,gBAAgB,GAAG,UAAU,GAAG,CAAC,KAAK,CAAC,CAAC;IAC9C,MAAM,OAAO,GAAG,gBAAgB,CAAC,CAAC,CAAC,eAAe,GAAG,CAAC,CAAC,CAAC,CAAC,eAAe,CAAC;IAEzE,IAAI,GAAG,CAAC,MAAM,GAAG,OAAO,EAAE,CAAC;QACvB,MAAM,IAAI,UAAU,CAAC,yCAAyC,GAAG,CAAC,MAAM,MAAM,OAAO,EAAE,CAAC,CAAC;IAC7F,CAAC;IAED,IAAI,eAAe,GAAG,CAAC,EAAE,CAAC;QACtB,MAAM,cAAc,GAAG,KAAK,CAAC,UAAU,GAAG,MAAM,CAAC;QACjD,IAAI,CAAC,cAAc,GAAG,CAAC,CAAC,KAAK,CAAC,EAAE,CAAC;YAC7B,MAAM,GAAG,GAAG,IAAI,WAAW,CAAC,KAAK,CAAC,MAAM,EAAE,cAAc,EAAE,eAAe,CAAC,CAAC;YAC3E,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,eAAe,EAAE,CAAC,EAAE,EAAE,CAAC;gBACvC,GAAG,CAAC,CAAC,CAAC,GAAG,OAAO,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;YACjC,CAAC;QACL,CAAC;aAAM,CAAC;YACJ,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,eAAe,EAAE,CAAC,EAAE,EAAE,CAAC;gBACvC,MAAM,IAAI,GAAG,MAAM,GAAG,CAAC,GAAG,CAAC,CAAC;gBAC5B,GAAG,CAAC,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,IAAI,GAAG,CAAC,CAAC,IAAI,EAAE,CAAC,GAAG,CAAC,KAAK,CAAC,IAAI,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,GAAG,KAAK,CAAC,IAAI,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC;YAC1G,CAAC;QACL,CAAC;IACL,CAAC;IAED,IAAI,gBAAgB,EAAE,CAAC;QACnB,MAAM,IAAI,GAAG,MAAM,GAAG,eAAe,GAAG,CAAC,CAAC;QAC1C,MAAM,SAAS,GAAG,UAAU,GAAG,eAAe,GAAG,CAAC,CAAC;QACnD,IAAI,CAAC,GAAG,CAAC,CAAC;QACV,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,SAAS,EAAE,CAAC,EAAE,EAAE,CAAC;YACjC,CAAC,IAAI,KAAK,CAAC,IAAI,GAAG,CAAC,CAAC,IAAI,CAAC,EAAE,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC;QACzC,CAAC;QACD,GAAG,CAAC,eAAe,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC;IACjC,CAAC;IAED,OAAO,OAAO,CAAC;AACnB,CAAC","sourcesContent":["import { bswap32 } from \"./fastPforShared\";\n\n/**\n * Decodes big-endian bytes into `out` without allocating the output buffer.\n *\n * This function does not copy `bytes`; it writes decoded words into the provided `out` array.\n * For aligned inputs it may create a temporary typed-array view (`Uint32Array`) over `bytes.buffer`\n * to speed up decoding.\n *\n * If `byteLength` is not a multiple of 4, the final word is padded with zeros.\n *\n * @returns Number of int32 words written.\n * @throws RangeError If `(offset, byteLength)` is out of bounds, or if `out` is too small.\n */\nexport function decodeBigEndianInt32sInto(\n bytes: Uint8Array,\n offset: number,\n byteLength: number,\n out: Uint32Array,\n): number {\n if (offset < 0 || byteLength < 0 || offset + byteLength > bytes.length) {\n throw new RangeError(\n `decodeBigEndianInt32sInto: out of bounds (offset=${offset}, byteLength=${byteLength}, bytes.length=${bytes.length})`,\n );\n }\n\n const numCompleteInts = Math.floor(byteLength / 4);\n const hasTrailingBytes = byteLength % 4 !== 0;\n const numInts = hasTrailingBytes ? numCompleteInts + 1 : numCompleteInts;\n\n if (out.length < numInts) {\n throw new RangeError(`decodeBigEndianInt32sInto: out.length=${out.length} < ${numInts}`);\n }\n\n if (numCompleteInts > 0) {\n const absoluteOffset = bytes.byteOffset + offset;\n if ((absoluteOffset & 3) === 0) {\n const u32 = new Uint32Array(bytes.buffer, absoluteOffset, numCompleteInts);\n for (let i = 0; i < numCompleteInts; i++) {\n out[i] = bswap32(u32[i]) | 0;\n }\n } else {\n for (let i = 0; i < numCompleteInts; i++) {\n const base = offset + i * 4;\n out[i] = (bytes[base] << 24) | (bytes[base + 1] << 16) | (bytes[base + 2] << 8) | bytes[base + 3] | 0;\n }\n }\n }\n\n if (hasTrailingBytes) {\n const base = offset + numCompleteInts * 4;\n const remaining = byteLength - numCompleteInts * 4;\n let v = 0;\n for (let i = 0; i < remaining; i++) {\n v |= bytes[base + i] << (24 - i * 8);\n }\n out[numCompleteInts] = v | 0;\n }\n\n return numInts;\n}\n"]}

View File

@@ -0,0 +1,75 @@
import { PhysicalStreamType } from "../metadata/tile/physicalStreamType";
import { LogicalLevelTechnique } from "../metadata/tile/logicalLevelTechnique";
import { PhysicalLevelTechnique } from "../metadata/tile/physicalLevelTechnique";
import { DictionaryType } from "../metadata/tile/dictionaryType";
import { type Column } from "../metadata/tileset/tilesetMetadata";
import type { RleEncodedStreamMetadata, StreamMetadata } from "../metadata/tile/streamMetadataDecoder";
import type { LogicalStreamType } from "../metadata/tile/logicalStreamType";
/**
* Creates basic stream metadata with logical techniques.
*/
export declare function createStreamMetadata(logicalTechnique1: LogicalLevelTechnique, logicalTechnique2?: LogicalLevelTechnique, numValues?: number): StreamMetadata;
/**
* Creates RLE-encoded stream metadata.
*/
export declare function createRleMetadata(logicalTechnique1: LogicalLevelTechnique, logicalTechnique2: LogicalLevelTechnique, runs: number, numRleValues: number): RleEncodedStreamMetadata;
/**
* Creates column metadata for STRUCT type columns.
*/
export declare function createColumnMetadataForStruct(columnName: string, childFields: Array<{
name: string;
type?: number;
}>): Column;
/**
* Creates a single stream with metadata and data.
*/
export declare function createStream(physicalType: PhysicalStreamType, data: Uint8Array, options?: {
logical?: LogicalStreamType;
technique?: PhysicalLevelTechnique;
count?: number;
}): Uint8Array;
/**
* Encodes FSST-compressed strings into a complete stream.
* This uses hardcoded test data: ["cat", "dog", "cat"]
* @returns Encoded Uint8Array that can be passed to decodeString
*/
export declare function encodeFsstStrings(): Uint8Array;
/**
* Encodes a shared dictionary for struct fields.
* @param dictionaryStrings - Array of unique strings in the dictionary
* @param options - Encoding options
* @returns Object containing length and data streams
*/
export declare function encodeSharedDictionary(dictionaryStrings: string[], options?: {
useFsst?: boolean;
dictionaryType?: DictionaryType;
}): {
lengthStream: Uint8Array;
dataStream: Uint8Array;
symbolLengthStream?: Uint8Array;
symbolDataStream?: Uint8Array;
};
/**
* Encodes streams for a struct field.
* @param offsetIndices - Indices into the shared dictionary
* @param presentValues - Boolean array indicating which values are present
* @param isPresent - Whether the field itself is present
* @returns Encoded streams for the field
*/
export declare function encodeStructField(offsetIndices: number[], presentValues: boolean[], isPresent?: boolean): Uint8Array;
/**
* Builds a complete encoded stream by combining metadata and data.
*/
export declare function buildEncodedStream(streamMetadata: StreamMetadata | RleEncodedStreamMetadata, encodedData: Uint8Array): Uint8Array;
/**
* Encodes stream metadata into binary format.
* - Byte 1: Stream type (physical type in upper 4 bits, logical subtype in lower 4 bits)
* - Byte 2: Encodings (llt1[5-7], llt2[2-4], plt[0-1])
* - Varints: numValues, byteLength
* - If RLE: Varints: runs, numRleValues
*/
export declare function encodeStreamMetadata(metadata: StreamMetadata | RleEncodedStreamMetadata): Uint8Array;
/**
* Concatenates multiple Uint8Array buffers into a single buffer.
*/
export declare function concatenateBuffers(...buffers: Uint8Array[]): Uint8Array;

View File

@@ -0,0 +1,284 @@
import { PhysicalStreamType } from "../metadata/tile/physicalStreamType";
import { LogicalLevelTechnique } from "../metadata/tile/logicalLevelTechnique";
import { PhysicalLevelTechnique } from "../metadata/tile/physicalLevelTechnique";
import { DictionaryType } from "../metadata/tile/dictionaryType";
import { LengthType } from "../metadata/tile/lengthType";
import { OffsetType } from "../metadata/tile/offsetType";
import IntWrapper from "./intWrapper";
import { ComplexType, ScalarType } from "../metadata/tileset/tilesetMetadata";
import { encodeBooleanRle, encodeStrings, createStringLengths } from "../encoding/encodingUtils";
import { encodeVarintInt32Value, encodeVarintInt32 } from "../encoding/integerEncodingUtils";
/**
* Creates basic stream metadata with logical techniques.
*/
export function createStreamMetadata(logicalTechnique1, logicalTechnique2 = LogicalLevelTechnique.NONE, numValues = 3) {
return {
physicalStreamType: PhysicalStreamType.DATA,
logicalStreamType: { dictionaryType: DictionaryType.NONE },
logicalLevelTechnique1: logicalTechnique1,
logicalLevelTechnique2: logicalTechnique2,
physicalLevelTechnique: PhysicalLevelTechnique.VARINT,
numValues,
byteLength: 10,
decompressedCount: numValues,
};
}
/**
* Creates RLE-encoded stream metadata.
*/
export function createRleMetadata(logicalTechnique1, logicalTechnique2, runs, numRleValues) {
return {
physicalStreamType: PhysicalStreamType.DATA,
logicalStreamType: { dictionaryType: DictionaryType.NONE },
logicalLevelTechnique1: logicalTechnique1,
logicalLevelTechnique2: logicalTechnique2,
physicalLevelTechnique: PhysicalLevelTechnique.VARINT,
numValues: runs * 2,
byteLength: 10,
decompressedCount: numRleValues,
runs,
numRleValues,
};
}
/**
* Creates column metadata for STRUCT type columns.
*/
export function createColumnMetadataForStruct(columnName, childFields) {
const children = childFields.map((fieldConfig) => ({
name: fieldConfig.name,
nullable: true,
scalarField: {
physicalType: fieldConfig.type ?? ScalarType.STRING,
type: "physicalType",
},
type: "scalarField",
}));
return {
name: columnName,
nullable: false,
complexType: {
physicalType: ComplexType.STRUCT,
children,
type: "physicalType",
},
type: "complexType",
};
}
/**
* Creates a single stream with metadata and data.
*/
export function createStream(physicalType, data, options = {}) {
const count = options.count ?? 0;
return buildEncodedStream({
physicalStreamType: physicalType,
logicalStreamType: options.logical ?? {},
logicalLevelTechnique1: LogicalLevelTechnique.NONE,
logicalLevelTechnique2: LogicalLevelTechnique.NONE,
physicalLevelTechnique: options.technique ?? PhysicalLevelTechnique.NONE,
numValues: count,
byteLength: data.length,
decompressedCount: count,
}, data);
}
/**
* Encodes FSST-compressed strings into a complete stream.
* This uses hardcoded test data: ["cat", "dog", "cat"]
* @returns Encoded Uint8Array that can be passed to decodeString
*/
export function encodeFsstStrings() {
const symbolTable = new Uint8Array([99, 97, 116, 100, 111, 103]); // "catdog"
const symbolLengths = new Uint32Array([3, 3]);
const compressedDictionary = new Uint8Array([0, 1]);
const dictionaryLengths = new Uint32Array([3, 3]);
const offsets = new Uint32Array([0, 1, 0]); // "cat", "dog", "cat"
const numValues = 3;
return concatenateBuffers(createStream(PhysicalStreamType.PRESENT, encodeBooleanRle(new Array(numValues).fill(true)), {
technique: PhysicalLevelTechnique.VARINT,
count: numValues,
}), createStream(PhysicalStreamType.DATA, symbolTable, {
logical: { dictionaryType: DictionaryType.FSST },
}), createStream(PhysicalStreamType.LENGTH, encodeVarintInt32(symbolLengths), {
logical: { lengthType: LengthType.SYMBOL },
technique: PhysicalLevelTechnique.VARINT,
count: symbolLengths.length,
}), createStream(PhysicalStreamType.OFFSET, encodeVarintInt32(offsets), {
logical: { offsetType: OffsetType.STRING },
technique: PhysicalLevelTechnique.VARINT,
count: offsets.length,
}), createStream(PhysicalStreamType.LENGTH, encodeVarintInt32(dictionaryLengths), {
logical: { lengthType: LengthType.DICTIONARY },
technique: PhysicalLevelTechnique.VARINT,
count: dictionaryLengths.length,
}), createStream(PhysicalStreamType.DATA, compressedDictionary, {
logical: { dictionaryType: DictionaryType.SINGLE },
}));
}
/**
* Encodes a shared dictionary for struct fields.
* @param dictionaryStrings - Array of unique strings in the dictionary
* @param options - Encoding options
* @returns Object containing length and data streams
*/
export function encodeSharedDictionary(dictionaryStrings, options = {}) {
const { useFsst = false, dictionaryType = DictionaryType.SHARED } = options;
const encodedDictionary = encodeStrings(dictionaryStrings);
const dictionaryLengths = createStringLengths(dictionaryStrings);
const lengthStream = createStream(PhysicalStreamType.LENGTH, encodeVarintInt32(new Uint32Array(dictionaryLengths)), {
logical: { lengthType: LengthType.DICTIONARY },
technique: PhysicalLevelTechnique.VARINT,
count: dictionaryLengths.length,
});
const dataStream = createStream(PhysicalStreamType.DATA, encodedDictionary, {
logical: { dictionaryType: dictionaryType },
count: encodedDictionary.length,
});
if (useFsst) {
const symbolTable = new Uint8Array([99, 97, 116, 100, 111, 103]); // "catdog"
const symbolLengths = new Uint32Array([3, 3]);
const symbolLengthStream = createStream(PhysicalStreamType.LENGTH, encodeVarintInt32(symbolLengths), {
logical: { lengthType: LengthType.SYMBOL },
technique: PhysicalLevelTechnique.VARINT,
count: symbolLengths.length,
});
const symbolDataStream = createStream(PhysicalStreamType.DATA, symbolTable, {
logical: { dictionaryType: DictionaryType.FSST },
count: symbolTable.length,
});
return { lengthStream, dataStream, symbolLengthStream, symbolDataStream };
}
return { lengthStream, dataStream };
}
/**
* Encodes streams for a struct field.
* @param offsetIndices - Indices into the shared dictionary
* @param presentValues - Boolean array indicating which values are present
* @param isPresent - Whether the field itself is present
* @returns Encoded streams for the field
*/
export function encodeStructField(offsetIndices, presentValues, isPresent = true) {
if (!isPresent) {
return encodeNumStreams(0);
}
const numStreamsEncoded = encodeNumStreams(2);
const encodedPresent = createPresentStream(presentValues);
const encodedOffsets = createOffsetStream(offsetIndices);
return concatenateBuffers(numStreamsEncoded, encodedPresent, encodedOffsets);
}
function encodeNumStreams(numStreams) {
const buffer = new Uint8Array(5);
const offset = new IntWrapper(0);
encodeVarintInt32Value(numStreams, buffer, offset);
return buffer.slice(0, offset.get());
}
function createPresentStream(presentValues) {
const metadata = {
physicalStreamType: PhysicalStreamType.PRESENT,
logicalStreamType: { dictionaryType: DictionaryType.NONE },
logicalLevelTechnique1: LogicalLevelTechnique.NONE,
logicalLevelTechnique2: LogicalLevelTechnique.NONE,
physicalLevelTechnique: PhysicalLevelTechnique.VARINT,
numValues: presentValues.length,
byteLength: 0,
decompressedCount: presentValues.length,
};
return buildEncodedStream(metadata, encodeBooleanRle(presentValues));
}
function createOffsetStream(offsetIndices) {
const metadata = {
physicalStreamType: PhysicalStreamType.OFFSET,
logicalStreamType: { offsetType: OffsetType.STRING },
logicalLevelTechnique1: LogicalLevelTechnique.NONE,
logicalLevelTechnique2: LogicalLevelTechnique.NONE,
physicalLevelTechnique: PhysicalLevelTechnique.VARINT,
numValues: offsetIndices.length,
byteLength: 0,
decompressedCount: offsetIndices.length,
};
return buildEncodedStream(metadata, encodeVarintInt32(new Uint32Array(offsetIndices)));
}
/**
* Builds a complete encoded stream by combining metadata and data.
*/
export function buildEncodedStream(streamMetadata, encodedData) {
const updatedMetadata = {
...streamMetadata,
byteLength: encodedData.length,
};
const metadataBuffer = encodeStreamMetadata(updatedMetadata);
const result = new Uint8Array(metadataBuffer.length + encodedData.length);
result.set(metadataBuffer, 0);
result.set(encodedData, metadataBuffer.length);
return result;
}
/**
* Encodes stream metadata into binary format.
* - Byte 1: Stream type (physical type in upper 4 bits, logical subtype in lower 4 bits)
* - Byte 2: Encodings (llt1[5-7], llt2[2-4], plt[0-1])
* - Varints: numValues, byteLength
* - If RLE: Varints: runs, numRleValues
*/
export function encodeStreamMetadata(metadata) {
const buffer = new Uint8Array(100);
let writeOffset = 0;
// Byte 1: Stream type
buffer[writeOffset++] = encodeStreamTypeByte(metadata);
// Byte 2: Encoding techniques
buffer[writeOffset++] = encodeEncodingsByte(metadata);
// Variable-length fields
const offset = new IntWrapper(writeOffset);
encodeVarintInt32Value(metadata.numValues, buffer, offset);
encodeVarintInt32Value(metadata.byteLength, buffer, offset);
// RLE-specific fields
if (isRleMetadata(metadata)) {
encodeVarintInt32Value(metadata.runs, buffer, offset);
encodeVarintInt32Value(metadata.numRleValues, buffer, offset);
}
return buffer.slice(0, offset.get());
}
function encodeStreamTypeByte(metadata) {
const physicalTypeIndex = Object.values(PhysicalStreamType).indexOf(metadata.physicalStreamType);
const lowerNibble = getLogicalSubtypeValue(metadata);
return (physicalTypeIndex << 4) | lowerNibble;
}
function getLogicalSubtypeValue(metadata) {
const { physicalStreamType, logicalStreamType } = metadata;
switch (physicalStreamType) {
case PhysicalStreamType.DATA:
return logicalStreamType.dictionaryType !== undefined
? Object.values(DictionaryType).indexOf(logicalStreamType.dictionaryType)
: 0;
case PhysicalStreamType.OFFSET:
return logicalStreamType.offsetType !== undefined
? Object.values(OffsetType).indexOf(logicalStreamType.offsetType)
: 0;
case PhysicalStreamType.LENGTH:
return logicalStreamType.lengthType !== undefined
? Object.values(LengthType).indexOf(logicalStreamType.lengthType)
: 0;
default:
return 0;
}
}
function encodeEncodingsByte(metadata) {
const llt1Index = Object.values(LogicalLevelTechnique).indexOf(metadata.logicalLevelTechnique1);
const llt2Index = Object.values(LogicalLevelTechnique).indexOf(metadata.logicalLevelTechnique2);
const pltIndex = Object.values(PhysicalLevelTechnique).indexOf(metadata.physicalLevelTechnique);
return (llt1Index << 5) | (llt2Index << 2) | pltIndex;
}
function isRleMetadata(metadata) {
return "runs" in metadata && "numRleValues" in metadata;
}
/**
* Concatenates multiple Uint8Array buffers into a single buffer.
*/
export function concatenateBuffers(...buffers) {
const totalLength = buffers.reduce((sum, buf) => sum + buf.length, 0);
const result = new Uint8Array(totalLength);
let offset = 0;
for (const buffer of buffers) {
result.set(buffer, offset);
offset += buffer.length;
}
return result;
}
//# sourceMappingURL=decodingTestUtils.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,10 @@
import type IntWrapper from "./intWrapper";
import { VectorType } from "../vector/vectorType";
import type BitVector from "../vector/flat/bitVector";
export declare function skipColumn(numStreams: number, tile: Uint8Array, offset: IntWrapper): void;
export declare function decodeBooleanRle(buffer: Uint8Array, numBooleans: number, byteLength: number, pos: IntWrapper, nullabilityBuffer?: BitVector): Uint8Array;
export declare function decodeByteRle(buffer: Uint8Array, numBytes: number, byteLength: number, pos: IntWrapper): Uint8Array;
export declare function decodeFloatsLE(encodedValues: Uint8Array, pos: IntWrapper, numValues: number, nullabilityBuffer?: BitVector): Float32Array;
export declare function decodeDoublesLE(encodedValues: Uint8Array, pos: IntWrapper, numValues: number, nullabilityBuffer?: BitVector): Float64Array;
export declare function decodeString(buf: Uint8Array, pos: number, end: number): string;
export declare function getVectorTypeBooleanStream(numFeatures: number, byteLength: number, data: Uint8Array, offset: IntWrapper): VectorType;

View File

@@ -0,0 +1,154 @@
import { VectorType } from "../vector/vectorType";
import { decodeStreamMetadata } from "../metadata/tile/streamMetadataDecoder";
import { unpackNullableBoolean, unpackNullable } from "./unpackNullableUtils";
export function skipColumn(numStreams, tile, offset) {
//TODO: add size of column in Mlt for fast skipping
for (let i = 0; i < numStreams; i++) {
const streamMetadata = decodeStreamMetadata(tile, offset);
offset.add(streamMetadata.byteLength);
}
}
export function decodeBooleanRle(buffer, numBooleans, byteLength, pos, nullabilityBuffer) {
const numBytes = Math.ceil(numBooleans / 8.0);
const values = decodeByteRle(buffer, numBytes, byteLength, pos);
if (nullabilityBuffer) {
return unpackNullableBoolean(values, numBooleans, nullabilityBuffer);
}
return values;
}
export function decodeByteRle(buffer, numBytes, byteLength, pos) {
const values = new Uint8Array(numBytes);
let valueOffset = 0;
const streamEndPos = pos.get() + byteLength;
while (valueOffset < numBytes) {
if (pos.get() >= streamEndPos) {
break;
}
const header = buffer[pos.increment()];
/* Runs */
if (header <= 0x7f) {
const numRuns = header + 3;
const value = buffer[pos.increment()];
const endValueOffset = Math.min(valueOffset + numRuns, numBytes);
values.fill(value, valueOffset, endValueOffset);
valueOffset = endValueOffset;
}
else {
/* Literals */
const numLiterals = 256 - header;
for (let i = 0; i < numLiterals && valueOffset < numBytes; i++) {
values[valueOffset++] = buffer[pos.increment()];
}
}
}
pos.set(streamEndPos);
return values;
}
export function decodeFloatsLE(encodedValues, pos, numValues, nullabilityBuffer) {
const currentPos = pos.get();
const newOffset = currentPos + numValues * Float32Array.BYTES_PER_ELEMENT;
const newBuf = new Uint8Array(encodedValues.subarray(currentPos, newOffset)).buffer;
const fb = new Float32Array(newBuf);
pos.set(newOffset);
if (nullabilityBuffer) {
return unpackNullable(fb, nullabilityBuffer, 0);
}
return fb;
}
export function decodeDoublesLE(encodedValues, pos, numValues, nullabilityBuffer) {
const currentPos = pos.get();
const newOffset = currentPos + numValues * Float64Array.BYTES_PER_ELEMENT;
const newBuf = new Uint8Array(encodedValues.subarray(currentPos, newOffset)).buffer;
const fb = new Float64Array(newBuf);
pos.set(newOffset);
if (nullabilityBuffer) {
return unpackNullable(fb, nullabilityBuffer, 0);
}
return fb;
}
const TEXT_DECODER_MIN_LENGTH = 12;
const utf8TextDecoder = new TextDecoder();
// Source: https://github.com/mapbox/pbf/issues/106
export function decodeString(buf, pos, end) {
if (end - pos >= TEXT_DECODER_MIN_LENGTH) {
// longer strings are fast with the built-in browser TextDecoder API
return utf8TextDecoder.decode(buf.subarray(pos, end));
}
// short strings are fast with custom implementation
return readUtf8(buf, pos, end);
}
function readUtf8(buf, pos, end) {
let str = "";
let i = pos;
while (i < end) {
const b0 = buf[i];
let c = null; // codepoint
let bytesPerSequence = b0 > 0xef ? 4 : b0 > 0xdf ? 3 : b0 > 0xbf ? 2 : 1;
if (i + bytesPerSequence > end)
break;
let b1;
let b2;
let b3;
if (bytesPerSequence === 1) {
if (b0 < 0x80) {
c = b0;
}
}
else if (bytesPerSequence === 2) {
b1 = buf[i + 1];
if ((b1 & 0xc0) === 0x80) {
c = ((b0 & 0x1f) << 0x6) | (b1 & 0x3f);
if (c <= 0x7f) {
c = null;
}
}
}
else if (bytesPerSequence === 3) {
b1 = buf[i + 1];
b2 = buf[i + 2];
if ((b1 & 0xc0) === 0x80 && (b2 & 0xc0) === 0x80) {
c = ((b0 & 0xf) << 0xc) | ((b1 & 0x3f) << 0x6) | (b2 & 0x3f);
if (c <= 0x7ff || (c >= 0xd800 && c <= 0xdfff)) {
c = null;
}
}
}
else if (bytesPerSequence === 4) {
b1 = buf[i + 1];
b2 = buf[i + 2];
b3 = buf[i + 3];
if ((b1 & 0xc0) === 0x80 && (b2 & 0xc0) === 0x80 && (b3 & 0xc0) === 0x80) {
c = ((b0 & 0xf) << 0x12) | ((b1 & 0x3f) << 0xc) | ((b2 & 0x3f) << 0x6) | (b3 & 0x3f);
if (c <= 0xffff || c >= 0x110000) {
c = null;
}
}
}
if (c === null) {
c = 0xfffd;
bytesPerSequence = 1;
}
else if (c > 0xffff) {
c -= 0x10000;
str += String.fromCharCode(((c >>> 10) & 0x3ff) | 0xd800);
c = 0xdc00 | (c & 0x3ff);
}
str += String.fromCharCode(c);
i += bytesPerSequence;
}
return str;
}
export function getVectorTypeBooleanStream(numFeatures, byteLength, data, offset) {
const valuesPerRun = 0x83;
// TODO: use VectorType metadata field for to test which VectorType is used
return Math.ceil(numFeatures / valuesPerRun) * 2 === byteLength &&
/* Test the first value byte if all bits are set to true */
(data[offset.get() + 1] & 0xff) === (bitCount(numFeatures) << 2) - 1
? VectorType.CONST
: VectorType.FLAT;
}
function bitCount(number) {
//TODO: refactor to get rid of special case handling
return number === 0 ? 1 : Math.floor(Math.log2(number) + 1);
}
//# sourceMappingURL=decodingUtils.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,47 @@
/**
* FastPFOR decoding implementation.
*
* @remarks
* Terminology note: "exceptions" in FastPFOR refer to **outlier values** within a block that do not fit in the
* chosen base bit-width for that block. These are stored in separate "exception streams" and later applied back
* to the unpacked base values. This is unrelated to JavaScript/TypeScript runtime exceptions.
*/
/**
* Workspace for the FastPFOR decoder.
*/
export type FastPforDecoderWorkspace = {
dataToBePacked: Array<Uint32Array>;
dataPointers: Int32Array;
byteContainer: Uint8Array;
byteContainerI32?: Int32Array;
exceptionSizes: Int32Array;
};
/**
* Workspace for decoding the FastPFOR *wire format* (big-endian int32 words).
*
* @remarks
* This workspace owns:
* - a scratch `encodedWords` buffer to materialize big-endian words
* - the reusable `FastPforDecoderWorkspace` used by `decodeFastPforInt32`
*
* The caller is responsible for creating and reusing this object.
*/
export type FastPforWireDecodeWorkspace = {
encodedWords: Uint32Array;
decoderWorkspace: FastPforDecoderWorkspace;
};
/**
* Creates an isolated workspace for decoding.
* Reusing a workspace across calls avoids repeated allocations.
*/
export declare function createDecoderWorkspace(): FastPforDecoderWorkspace;
export declare function createFastPforWireDecodeWorkspace(initialEncodedWordCapacity?: number): FastPforWireDecodeWorkspace;
export declare function ensureFastPforWireEncodedWordsCapacity(workspace: FastPforWireDecodeWorkspace, requiredWordCount: number): Uint32Array;
/**
* Decodes a sequence of FastPFOR-encoded integers.
*
* @param encoded The input buffer containing FastPFOR encoded data.
* @param numValues The number of integers expected to be decoded.
* @param workspace Optional workspace for reuse across calls. If omitted, a new workspace is created per call.
*/
export declare function decodeFastPforInt32(encoded: Uint32Array, numValues: number, workspace?: FastPforDecoderWorkspace): Uint32Array;

View File

@@ -0,0 +1,482 @@
import { MASKS, DEFAULT_PAGE_SIZE, BLOCK_SIZE, greatestMultiple, roundUpToMultipleOf32, normalizePageSize, } from "./fastPforShared";
import { fastUnpack32_2, fastUnpack32_3, fastUnpack32_4, fastUnpack32_5, fastUnpack32_6, fastUnpack32_7, fastUnpack32_8, fastUnpack32_9, fastUnpack32_10, fastUnpack32_11, fastUnpack32_12, fastUnpack32_16, fastUnpack256_1, fastUnpack256_2, fastUnpack256_3, fastUnpack256_4, fastUnpack256_5, fastUnpack256_6, fastUnpack256_7, fastUnpack256_8, fastUnpack256_16, fastUnpack256_Generic, } from "./fastPforUnpack";
const MAX_BIT_WIDTH = 32;
const BIT_WIDTH_SLOTS = MAX_BIT_WIDTH + 1;
const PAGE_SIZE = normalizePageSize(DEFAULT_PAGE_SIZE);
const BYTE_CONTAINER_SIZE = ((3 * PAGE_SIZE) / BLOCK_SIZE + PAGE_SIZE) | 0;
/**
* Creates an isolated workspace for decoding.
* Reusing a workspace across calls avoids repeated allocations.
*/
export function createDecoderWorkspace() {
const byteContainer = new Uint8Array(BYTE_CONTAINER_SIZE);
return {
dataToBePacked: new Array(BIT_WIDTH_SLOTS),
dataPointers: new Int32Array(BIT_WIDTH_SLOTS),
byteContainer,
byteContainerI32: new Int32Array(byteContainer.buffer, byteContainer.byteOffset, byteContainer.byteLength >>> 2),
exceptionSizes: new Int32Array(BIT_WIDTH_SLOTS),
};
}
export function createFastPforWireDecodeWorkspace(initialEncodedWordCapacity = 16) {
if (initialEncodedWordCapacity < 0) {
throw new RangeError(`initialEncodedWordCapacity must be >= 0, got ${initialEncodedWordCapacity}`);
}
const capacity = Math.max(16, initialEncodedWordCapacity | 0);
return {
encodedWords: new Uint32Array(capacity),
decoderWorkspace: createDecoderWorkspace(),
};
}
export function ensureFastPforWireEncodedWordsCapacity(workspace, requiredWordCount) {
if (requiredWordCount <= workspace.encodedWords.length)
return workspace.encodedWords;
const next = new Uint32Array(Math.max(16, requiredWordCount * 2));
workspace.encodedWords = next;
return next;
}
function materializeByteContainer(inValues, byteContainerStart, byteSize, workspace) {
if (workspace.byteContainer.length < byteSize) {
workspace.byteContainer = new Uint8Array(byteSize * 2);
workspace.byteContainerI32 = undefined;
}
const byteContainer = workspace.byteContainer;
const numFullInts = byteSize >>> 2;
if ((byteContainer.byteOffset & 3) === 0) {
let intView = workspace.byteContainerI32;
if (!intView ||
intView.buffer !== byteContainer.buffer ||
intView.byteOffset !== byteContainer.byteOffset ||
intView.length < numFullInts) {
intView = workspace.byteContainerI32 = new Int32Array(byteContainer.buffer, byteContainer.byteOffset, byteContainer.byteLength >>> 2);
}
intView.set(inValues.subarray(byteContainerStart, byteContainerStart + numFullInts));
}
else {
for (let i = 0; i < numFullInts; i = (i + 1) | 0) {
const val = inValues[(byteContainerStart + i) | 0] | 0;
const base = i << 2;
byteContainer[base] = val & 0xff;
byteContainer[(base + 1) | 0] = (val >>> 8) & 0xff;
byteContainer[(base + 2) | 0] = (val >>> 16) & 0xff;
byteContainer[(base + 3) | 0] = (val >>> 24) & 0xff;
}
}
const remainder = byteSize & 3;
if (remainder > 0) {
const lastIntIdx = (byteContainerStart + numFullInts) | 0;
const lastVal = inValues[lastIntIdx] | 0;
const base = numFullInts << 2;
for (let r = 0; r < remainder; r = (r + 1) | 0) {
byteContainer[(base + r) | 0] = (lastVal >>> (r << 3)) & 0xff;
}
}
return byteContainer;
}
/**
* Unpacks the per-bitWidth "exception streams" described by the page's bitmap.
*
* @remarks
* For each bit-width present in the bitmap, a stream header gives the count of outlier values for that
* bit-width, followed by packed bits representing those values.
*
* @param inValues - Packed input (32-bit words).
* @param inExcept - Offset (32-bit word index) where the exception bitmap starts.
* @param workspace - Decoder workspace used to store the unpacked exception streams.
* @returns The new input offset (32-bit word index) after consuming all exception streams.
*/
function unpackExceptionStreams(inValues, inExcept, workspace) {
const bitmap = inValues[inExcept++] | 0;
const dataToBePacked = workspace.dataToBePacked;
for (let bitWidth = 2; bitWidth <= MAX_BIT_WIDTH; bitWidth = (bitWidth + 1) | 0) {
if (((bitmap >>> (bitWidth - 1)) & 1) === 0)
continue;
if (inExcept >= inValues.length) {
throw new Error(`FastPFOR decode: truncated exception stream header (bitWidth=${bitWidth}, streamWordIndex=${inExcept}, needWords=1, availableWords=${inValues.length - inExcept}, encodedWords=${inValues.length})`);
}
const size = inValues[inExcept++] >>> 0;
const roundedUp = roundUpToMultipleOf32(size);
const wordsNeeded = (size * bitWidth + 31) >>> 5;
if (inExcept + wordsNeeded > inValues.length) {
throw new Error(`FastPFOR decode: truncated exception stream (bitWidth=${bitWidth}, size=${size}, streamWordIndex=${inExcept}, needWords=${wordsNeeded}, availableWords=${inValues.length - inExcept}, encodedWords=${inValues.length})`);
}
let exceptionStream = dataToBePacked[bitWidth];
if (!exceptionStream || exceptionStream.length < roundedUp) {
exceptionStream = dataToBePacked[bitWidth] = new Uint32Array(roundedUp);
}
let j = 0;
for (; j < size; j = (j + 32) | 0) {
fastUnpack32(inValues, inExcept, exceptionStream, j, bitWidth);
inExcept = (inExcept + bitWidth) | 0;
}
const overflow = (j - size) | 0;
inExcept = (inExcept - ((overflow * bitWidth) >>> 5)) | 0;
workspace.exceptionSizes[bitWidth] = size;
}
return inExcept;
}
/**
* Unpacks one 256-value block from the packed bitstream using a specialized implementation for common widths.
*
* @param inValues - Packed input (32-bit words).
* @param inPos - Input offset (32-bit word index) where the packed block starts.
* @param out - Output buffer.
* @param outPos - Output offset where the 256 values will be written.
* @param bitWidth - Base bit-width used for this block.
* @returns The new input offset (32-bit word index) right after the packed block data.
*/
function unpackBlock256(inValues, inPos, out, outPos, bitWidth) {
switch (bitWidth) {
case 1:
fastUnpack256_1(inValues, inPos, out, outPos);
break;
case 2:
fastUnpack256_2(inValues, inPos, out, outPos);
break;
case 3:
fastUnpack256_3(inValues, inPos, out, outPos);
break;
case 4:
fastUnpack256_4(inValues, inPos, out, outPos);
break;
case 5:
fastUnpack256_5(inValues, inPos, out, outPos);
break;
case 6:
fastUnpack256_6(inValues, inPos, out, outPos);
break;
case 7:
fastUnpack256_7(inValues, inPos, out, outPos);
break;
case 8:
fastUnpack256_8(inValues, inPos, out, outPos);
break;
case 16:
fastUnpack256_16(inValues, inPos, out, outPos);
break;
default:
fastUnpack256_Generic(inValues, inPos, out, outPos, bitWidth);
break;
}
return (inPos + (bitWidth << 3)) | 0;
}
/**
* Reads and validates the 2-byte block header from the byteContainer.
*
* @remarks
* The header is `[bitWidth, exceptionCount]`, both stored as single bytes.
*
* @param byteContainer - Byte metadata buffer for the page.
* @param byteContainerLen - The valid byte length in `byteContainer` for this page.
* @param bytePosIn - Current offset in `byteContainer`.
* @param block - Block index within the page (for error messages).
* @returns The parsed header and the updated `bytePosIn`.
*/
function readBlockHeader(byteContainer, byteContainerLen, bytePosIn, block) {
if (bytePosIn + 2 > byteContainerLen) {
throw new Error(`FastPFOR decode: byteContainer underflow at block=${block} (need 2 bytes for [bitWidth, exceptionCount], bytePos=${bytePosIn}, byteSize=${byteContainerLen})`);
}
const bitWidth = byteContainer[bytePosIn++];
const exceptionCount = byteContainer[bytePosIn++];
if (bitWidth > MAX_BIT_WIDTH) {
throw new Error(`FastPFOR decode: invalid bitWidth=${bitWidth} at block=${block} (expected 0..${MAX_BIT_WIDTH}). This likely indicates corrupted or truncated input.`);
}
return { bitWidth, exceptionCount, bytePosIn };
}
/**
* Reads and validates the exception header for a block.
*
* @remarks
* The header contains `maxBits` (1 byte), which defines the width of the outlier values as
* `exceptionBitWidth = maxBits - bitWidth`.
*
* @param byteContainer - Byte metadata buffer for the page.
* @param byteContainerLen - The valid byte length in `byteContainer` for this page.
* @param bytePosIn - Current offset in `byteContainer`.
* @param bitWidth - Base bit-width for the block.
* @param exceptionCount - Number of exceptions/outliers in this block.
* @param block - Block index within the page (for error messages).
* @returns Parsed `maxBits`, `exceptionBitWidth`, and the updated `bytePosIn`.
*/
function readBlockExceptionHeader(byteContainer, byteContainerLen, bytePosIn, bitWidth, exceptionCount, block) {
if (bytePosIn + 1 > byteContainerLen) {
throw new Error(`FastPFOR decode: exception header underflow at block=${block} (need 1 byte for maxBits, bytePos=${bytePosIn}, byteSize=${byteContainerLen})`);
}
const maxBits = byteContainer[bytePosIn++];
if (maxBits < bitWidth || maxBits > MAX_BIT_WIDTH) {
throw new Error(`FastPFOR decode: invalid maxBits=${maxBits} at block=${block} (bitWidth=${bitWidth}, expected ${bitWidth}..${MAX_BIT_WIDTH})`);
}
const exceptionBitWidth = (maxBits - bitWidth) | 0;
if (exceptionBitWidth < 1 || exceptionBitWidth > MAX_BIT_WIDTH) {
throw new Error(`FastPFOR decode: invalid exceptionBitWidth=${exceptionBitWidth} at block=${block} (bitWidth=${bitWidth}, maxBits=${maxBits})`);
}
if (bytePosIn + exceptionCount > byteContainerLen) {
throw new Error(`FastPFOR decode: exception positions underflow at block=${block} (need=${exceptionCount}, have=${byteContainerLen - bytePosIn})`);
}
return { maxBits, exceptionBitWidth, bytePosIn };
}
/**
* Applies (block-local) FastPFOR "exceptions" (outliers) to an already-unpacked base 256-value block.
*
* @param out - Output buffer containing the base unpacked values for the block.
* @param blockOutPos - Offset in `out` where the 256-value block starts.
* @param bitWidth - Base bit-width for the block.
* @param exceptionCount - Number of exceptions/outliers in this block.
* @param byteContainer - Byte metadata buffer for the page.
* @param byteContainerLen - The valid byte length in `byteContainer` for this page.
* @param bytePosIn - Current offset in `byteContainer` (right after `[bitWidth, exceptionCount]`).
* @param workspace - Decoder workspace holding the unpacked exception streams.
* @param block - Block index within the page (for error messages).
* @returns The updated `bytePosIn` after consuming the exception metadata bytes.
*
* The exception metadata is stored in `byteContainer`:
* - `maxBits` (1 byte): the maximum bit-width of any value in the block
* - `exceptionCount` exception positions (1 byte each, 0..255)
*
* The exception values themselves are read from the pre-unpacked exception streams stored in `workspace`.
* Returns the new position in the byteContainer after consuming the exception metadata bytes.
*/
function applyBlockExceptions(out, blockOutPos, bitWidth, exceptionCount, byteContainer, byteContainerLen, bytePosIn, workspace, block) {
const { maxBits, exceptionBitWidth, bytePosIn: afterHeaderPos, } = readBlockExceptionHeader(byteContainer, byteContainerLen, bytePosIn, bitWidth, exceptionCount, block);
bytePosIn = afterHeaderPos;
if (exceptionBitWidth === 1) {
const shift = 1 << bitWidth;
for (let k = 0; k < exceptionCount; k = (k + 1) | 0) {
const pos = byteContainer[bytePosIn++];
out[(pos + blockOutPos) | 0] |= shift;
}
return bytePosIn;
}
const exceptionValues = workspace.dataToBePacked[exceptionBitWidth];
if (!exceptionValues) {
throw new Error(`FastPFOR decode: missing exception stream for exceptionBitWidth=${exceptionBitWidth} (bitWidth=${bitWidth}, maxBits=${maxBits}) at block ${block}`);
}
const exceptionPointers = workspace.dataPointers;
let exPtr = exceptionPointers[exceptionBitWidth] | 0;
const exSize = workspace.exceptionSizes[exceptionBitWidth] | 0;
if (exPtr + exceptionCount > exSize) {
throw new Error(`FastPFOR decode: exception stream overflow for exceptionBitWidth=${exceptionBitWidth} (ptr=${exPtr}, need ${exceptionCount}, size=${exSize}) at block ${block}`);
}
for (let k = 0; k < exceptionCount; k = (k + 1) | 0) {
const pos = byteContainer[bytePosIn++];
const val = exceptionValues[exPtr++] | 0;
out[(pos + blockOutPos) | 0] |= val << bitWidth;
}
exceptionPointers[exceptionBitWidth] = exPtr;
return bytePosIn;
}
function decodePageBlocks(inValues, pageStart, inPos, packedEnd, out, outPos, blocks, byteContainer, byteContainerLen, workspace) {
let tmpInPos = inPos | 0;
let bytePosIn = 0;
for (let run = 0; run < blocks; run = (run + 1) | 0) {
const header = readBlockHeader(byteContainer, byteContainerLen, bytePosIn, run);
bytePosIn = header.bytePosIn;
const bitWidth = header.bitWidth;
const exceptionCount = header.exceptionCount;
const blockOutPos = (outPos + run * BLOCK_SIZE) | 0;
switch (bitWidth) {
case 0:
out.fill(0, blockOutPos, blockOutPos + BLOCK_SIZE);
break;
case 32:
for (let i = 0; i < BLOCK_SIZE; i = (i + 1) | 0) {
out[(blockOutPos + i) | 0] = inValues[(tmpInPos + i) | 0] | 0;
}
tmpInPos = (tmpInPos + BLOCK_SIZE) | 0;
break;
default:
tmpInPos = unpackBlock256(inValues, tmpInPos, out, blockOutPos, bitWidth);
break;
}
if (exceptionCount > 0) {
bytePosIn = applyBlockExceptions(out, blockOutPos, bitWidth, exceptionCount, byteContainer, byteContainerLen, bytePosIn, workspace, run);
}
}
if (tmpInPos !== packedEnd) {
throw new Error(`FastPFOR decode: packed region mismatch (pageStart=${pageStart}, packedStart=${inPos}, consumedPackedEnd=${tmpInPos}, expectedPackedEnd=${packedEnd}, packedWords=${packedEnd - inPos}, encoded.length=${inValues.length})`);
}
return;
}
/**
* Decodes one FastPFOR page (aligned to 256-value blocks).
*/
function decodePage(inValues, out, inPos, outPos, thisSize, workspace) {
const pageStart = inPos | 0;
const whereMeta = inValues[pageStart] | 0;
if (whereMeta <= 0 || pageStart + whereMeta > inValues.length - 1) {
throw new Error(`FastPFOR decode: invalid whereMeta=${whereMeta} at pageStart=${pageStart} (expected > 0 and pageStart+whereMeta < encoded.length=${inValues.length})`);
}
const packedStart = (pageStart + 1) | 0;
const packedEnd = (pageStart + whereMeta) | 0;
const byteSize = inValues[packedEnd] >>> 0;
const metaInts = (byteSize + 3) >>> 2;
const byteContainerStart = packedEnd + 1;
const bitmapPos = byteContainerStart + metaInts;
if (bitmapPos >= inValues.length) {
throw new Error(`FastPFOR decode: invalid byteSize=${byteSize} (metaInts=${metaInts}, pageStart=${pageStart}, packedEnd=${packedEnd}, byteContainerStart=${byteContainerStart}) causes bitmapPos=${bitmapPos} out of bounds (encoded.length=${inValues.length})`);
}
const byteContainer = materializeByteContainer(inValues, byteContainerStart, byteSize, workspace);
const byteContainerLen = byteSize;
const inExcept = unpackExceptionStreams(inValues, bitmapPos, workspace);
const exceptionPointers = workspace.dataPointers;
exceptionPointers.fill(0);
const startOutPos = outPos | 0;
const blocks = (thisSize / BLOCK_SIZE) | 0;
decodePageBlocks(inValues, pageStart, packedStart, packedEnd, out, startOutPos, blocks, byteContainer, byteContainerLen, workspace);
return inExcept;
}
function decodeAlignedPages(inValues, out, inPos, outPos, outLength, workspace) {
const alignedOutLength = greatestMultiple(outLength, BLOCK_SIZE);
const finalOut = outPos + alignedOutLength;
let tmpOutPos = outPos;
let tmpInPos = inPos;
while (tmpOutPos !== finalOut) {
const thisSize = Math.min(PAGE_SIZE, finalOut - tmpOutPos);
tmpInPos = decodePage(inValues, out, tmpInPos, tmpOutPos, thisSize, workspace);
tmpOutPos = (tmpOutPos + thisSize) | 0;
}
return tmpInPos;
}
/**
* Decodes the VariableByte tail (MSB=1 terminator, opposite of Protobuf Varint).
*/
function decodeVByte(inValues, inPos, inLength, out, outPos, expectedCount) {
if (expectedCount === 0)
return inPos;
let bitOffset = 0;
let wordIndex = inPos;
const finalWordIndex = inPos + inLength;
const outPos0 = outPos;
let tmpOutPos = outPos;
const targetOut = outPos + expectedCount;
let accumulator = 0;
let accumulatorShift = 0;
while (wordIndex < finalWordIndex && tmpOutPos < targetOut) {
const word = inValues[wordIndex];
const byte = (word >>> bitOffset) & 0xff;
bitOffset += 8;
wordIndex += bitOffset >>> 5;
bitOffset &= 31;
accumulator |= (byte & 0x7f) << accumulatorShift;
if ((byte & 0x80) !== 0) {
out[tmpOutPos++] = accumulator | 0;
accumulator = 0;
accumulatorShift = 0;
}
else {
accumulatorShift += 7;
if (accumulatorShift > 28) {
throw new Error(`FastPFOR VByte: unterminated value (expected MSB=1 terminator within 5 bytes; shift=${accumulatorShift}, partial=${accumulator}, decoded=${tmpOutPos - outPos0}/${expectedCount}, inPos=${wordIndex}, inEnd=${finalWordIndex})`);
}
}
}
if (tmpOutPos !== targetOut) {
throw new Error(`FastPFOR VByte: truncated stream (decoded=${tmpOutPos - outPos0}, expected=${expectedCount}, consumedWords=${wordIndex - inPos}/${inLength}, vbyteStart=${inPos}, vbyteEnd=${finalWordIndex})`);
}
return wordIndex;
}
/**
* Decodes a sequence of FastPFOR-encoded integers.
*
* @param encoded The input buffer containing FastPFOR encoded data.
* @param numValues The number of integers expected to be decoded.
* @param workspace Optional workspace for reuse across calls. If omitted, a new workspace is created per call.
*/
export function decodeFastPforInt32(encoded, numValues, workspace) {
let inPos = 0;
let outPos = 0;
const decoded = new Uint32Array(numValues);
const decoderWorkspace = workspace ?? createDecoderWorkspace();
if (encoded.length > 0) {
const alignedLength = encoded[inPos] | 0;
inPos = (inPos + 1) | 0;
if ((alignedLength & (BLOCK_SIZE - 1)) !== 0) {
throw new Error(`FastPFOR decode: invalid alignedLength=${alignedLength} (expected multiple of ${BLOCK_SIZE})`);
}
if (outPos + alignedLength > decoded.length) {
throw new Error(`FastPFOR decode: output buffer too small (outPos=${outPos}, alignedLength=${alignedLength}, out.length=${decoded.length})`);
}
inPos = decodeAlignedPages(encoded, decoded, inPos, outPos, alignedLength, decoderWorkspace);
outPos = (outPos + alignedLength) | 0;
}
const remainingLength = (encoded.length - inPos) | 0;
const expectedTail = (numValues - outPos) | 0;
decodeVByte(encoded, inPos, remainingLength, decoded, outPos, expectedTail);
return decoded;
}
function fastUnpack32(inValues, inPos, out, outPos, bitWidth) {
switch (bitWidth) {
case 2:
fastUnpack32_2(inValues, inPos, out, outPos);
return;
case 3:
fastUnpack32_3(inValues, inPos, out, outPos);
return;
case 4:
fastUnpack32_4(inValues, inPos, out, outPos);
return;
case 5:
fastUnpack32_5(inValues, inPos, out, outPos);
return;
case 6:
fastUnpack32_6(inValues, inPos, out, outPos);
return;
case 7:
fastUnpack32_7(inValues, inPos, out, outPos);
return;
case 8:
fastUnpack32_8(inValues, inPos, out, outPos);
return;
case 9:
fastUnpack32_9(inValues, inPos, out, outPos);
return;
case 10:
fastUnpack32_10(inValues, inPos, out, outPos);
return;
case 11:
fastUnpack32_11(inValues, inPos, out, outPos);
return;
case 12:
fastUnpack32_12(inValues, inPos, out, outPos);
return;
case 16:
fastUnpack32_16(inValues, inPos, out, outPos);
return;
case 32:
for (let i = 0; i < 32; i = (i + 1) | 0) {
out[(outPos + i) | 0] = inValues[(inPos + i) | 0] | 0;
}
return;
default:
break;
}
const valueMask = MASKS[bitWidth] >>> 0;
let inputWordIndex = inPos;
let bitOffset = 0;
let currentWord = inValues[inputWordIndex] >>> 0;
for (let i = 0; i < 32; i++) {
if (bitOffset + bitWidth <= 32) {
const value = (currentWord >>> bitOffset) & valueMask;
out[outPos + i] = value | 0;
bitOffset += bitWidth;
if (bitOffset === 32) {
bitOffset = 0;
inputWordIndex++;
if (i !== 31)
currentWord = inValues[inputWordIndex] >>> 0;
}
}
else {
const lowBits = 32 - bitOffset;
const low = currentWord >>> bitOffset;
inputWordIndex++;
currentWord = inValues[inputWordIndex] >>> 0;
const highMask = MASKS[bitWidth - lowBits] >>> 0;
const high = currentWord & highMask;
const value = (low | (high << lowBits)) & valueMask;
out[outPos + i] = value | 0;
bitOffset = bitWidth - lowBits;
}
}
}
//# sourceMappingURL=fastPforDecoder.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,7 @@
export declare const MASKS: Readonly<Uint32Array>;
export declare const DEFAULT_PAGE_SIZE = 65536;
export declare const BLOCK_SIZE = 256;
export declare function greatestMultiple(value: number, factor: number): number;
export declare function roundUpToMultipleOf32(value: number): number;
export declare function normalizePageSize(pageSize: number): number;
export declare function bswap32(value: number): number;

View File

@@ -0,0 +1,29 @@
/**
* Bit masks for each bitwidth 0-32.
* DO NOT MUTATE - this is a shared constant.
*/
const masks = new Uint32Array(33);
masks[0] = 0;
for (let bitWidth = 1; bitWidth <= 32; bitWidth++) {
masks[bitWidth] = bitWidth === 32 ? 0xffffffff : 0xffffffff >>> (32 - bitWidth);
}
export const MASKS = masks;
export const DEFAULT_PAGE_SIZE = 65536;
export const BLOCK_SIZE = 256;
export function greatestMultiple(value, factor) {
return value - (value % factor);
}
export function roundUpToMultipleOf32(value) {
return greatestMultiple(value + 31, 32);
}
export function normalizePageSize(pageSize) {
if (!Number.isFinite(pageSize) || pageSize <= 0)
return DEFAULT_PAGE_SIZE;
const aligned = greatestMultiple(Math.floor(pageSize), BLOCK_SIZE);
return aligned === 0 ? BLOCK_SIZE : aligned;
}
export function bswap32(value) {
const x = value >>> 0;
return (((x & 0xff) << 24) | ((x & 0xff00) << 8) | ((x >>> 8) & 0xff00) | ((x >>> 24) & 0xff)) >>> 0;
}
//# sourceMappingURL=fastPforShared.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"fastPforShared.js","sourceRoot":"","sources":["../../src/decoding/fastPforShared.ts"],"names":[],"mappings":"AAAA;;;GAGG;AACH,MAAM,KAAK,GAAG,IAAI,WAAW,CAAC,EAAE,CAAC,CAAC;AAClC,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC;AACb,KAAK,IAAI,QAAQ,GAAG,CAAC,EAAE,QAAQ,IAAI,EAAE,EAAE,QAAQ,EAAE,EAAE,CAAC;IAChD,KAAK,CAAC,QAAQ,CAAC,GAAG,QAAQ,KAAK,EAAE,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,UAAU,KAAK,CAAC,EAAE,GAAG,QAAQ,CAAC,CAAC;AACpF,CAAC;AACD,MAAM,CAAC,MAAM,KAAK,GAA0B,KAAK,CAAC;AAElD,MAAM,CAAC,MAAM,iBAAiB,GAAG,KAAK,CAAC;AACvC,MAAM,CAAC,MAAM,UAAU,GAAG,GAAG,CAAC;AAE9B,MAAM,UAAU,gBAAgB,CAAC,KAAa,EAAE,MAAc;IAC1D,OAAO,KAAK,GAAG,CAAC,KAAK,GAAG,MAAM,CAAC,CAAC;AACpC,CAAC;AAED,MAAM,UAAU,qBAAqB,CAAC,KAAa;IAC/C,OAAO,gBAAgB,CAAC,KAAK,GAAG,EAAE,EAAE,EAAE,CAAC,CAAC;AAC5C,CAAC;AAED,MAAM,UAAU,iBAAiB,CAAC,QAAgB;IAC9C,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,QAAQ,CAAC,IAAI,QAAQ,IAAI,CAAC;QAAE,OAAO,iBAAiB,CAAC;IAE1E,MAAM,OAAO,GAAG,gBAAgB,CAAC,IAAI,CAAC,KAAK,CAAC,QAAQ,CAAC,EAAE,UAAU,CAAC,CAAC;IACnE,OAAO,OAAO,KAAK,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,OAAO,CAAC;AAChD,CAAC;AAED,MAAM,UAAU,OAAO,CAAC,KAAa;IACjC,MAAM,CAAC,GAAG,KAAK,KAAK,CAAC,CAAC;IACtB,OAAO,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,IAAI,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,MAAM,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,GAAG,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC,GAAG,IAAI,CAAC,CAAC,KAAK,CAAC,CAAC;AACzG,CAAC","sourcesContent":["/**\n * Bit masks for each bitwidth 0-32.\n * DO NOT MUTATE - this is a shared constant.\n */\nconst masks = new Uint32Array(33);\nmasks[0] = 0;\nfor (let bitWidth = 1; bitWidth <= 32; bitWidth++) {\n masks[bitWidth] = bitWidth === 32 ? 0xffffffff : 0xffffffff >>> (32 - bitWidth);\n}\nexport const MASKS: Readonly<Uint32Array> = masks;\n\nexport const DEFAULT_PAGE_SIZE = 65536;\nexport const BLOCK_SIZE = 256;\n\nexport function greatestMultiple(value: number, factor: number): number {\n return value - (value % factor);\n}\n\nexport function roundUpToMultipleOf32(value: number): number {\n return greatestMultiple(value + 31, 32);\n}\n\nexport function normalizePageSize(pageSize: number): number {\n if (!Number.isFinite(pageSize) || pageSize <= 0) return DEFAULT_PAGE_SIZE;\n\n const aligned = greatestMultiple(Math.floor(pageSize), BLOCK_SIZE);\n return aligned === 0 ? BLOCK_SIZE : aligned;\n}\n\nexport function bswap32(value: number): number {\n const x = value >>> 0;\n return (((x & 0xff) << 24) | ((x & 0xff00) << 8) | ((x >>> 8) & 0xff00) | ((x >>> 24) & 0xff)) >>> 0;\n}\n"]}

View File

@@ -0,0 +1,23 @@
export declare function fastUnpack32_1(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
export declare function fastUnpack32_2(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
export declare function fastUnpack32_3(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
export declare function fastUnpack32_4(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
export declare function fastUnpack32_5(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
export declare function fastUnpack32_6(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
export declare function fastUnpack32_7(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
export declare function fastUnpack32_8(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
export declare function fastUnpack32_9(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
export declare function fastUnpack32_10(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
export declare function fastUnpack32_11(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
export declare function fastUnpack32_12(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
export declare function fastUnpack32_16(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
export declare function fastUnpack256_1(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
export declare function fastUnpack256_2(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
export declare function fastUnpack256_3(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
export declare function fastUnpack256_4(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
export declare function fastUnpack256_5(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
export declare function fastUnpack256_6(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
export declare function fastUnpack256_7(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
export declare function fastUnpack256_8(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
export declare function fastUnpack256_16(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number): void;
export declare function fastUnpack256_Generic(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number, bitWidth: number): void;

View File

@@ -0,0 +1,910 @@
import { MASKS } from "./fastPforShared";
export function fastUnpack32_1(inValues, inPos, out, outPos) {
const in0 = inValues[inPos] >>> 0;
for (let i = 0; i < 32; i++) {
out[outPos + i] = (in0 >>> i) & 1;
}
}
export function fastUnpack32_2(inValues, inPos, out, outPos) {
let op = outPos;
const in0 = inValues[inPos] >>> 0;
const in1 = inValues[inPos + 1] >>> 0;
out[op++] = (in0 >>> 0) & 0x3;
out[op++] = (in0 >>> 2) & 0x3;
out[op++] = (in0 >>> 4) & 0x3;
out[op++] = (in0 >>> 6) & 0x3;
out[op++] = (in0 >>> 8) & 0x3;
out[op++] = (in0 >>> 10) & 0x3;
out[op++] = (in0 >>> 12) & 0x3;
out[op++] = (in0 >>> 14) & 0x3;
out[op++] = (in0 >>> 16) & 0x3;
out[op++] = (in0 >>> 18) & 0x3;
out[op++] = (in0 >>> 20) & 0x3;
out[op++] = (in0 >>> 22) & 0x3;
out[op++] = (in0 >>> 24) & 0x3;
out[op++] = (in0 >>> 26) & 0x3;
out[op++] = (in0 >>> 28) & 0x3;
out[op++] = (in0 >>> 30) & 0x3;
out[op++] = (in1 >>> 0) & 0x3;
out[op++] = (in1 >>> 2) & 0x3;
out[op++] = (in1 >>> 4) & 0x3;
out[op++] = (in1 >>> 6) & 0x3;
out[op++] = (in1 >>> 8) & 0x3;
out[op++] = (in1 >>> 10) & 0x3;
out[op++] = (in1 >>> 12) & 0x3;
out[op++] = (in1 >>> 14) & 0x3;
out[op++] = (in1 >>> 16) & 0x3;
out[op++] = (in1 >>> 18) & 0x3;
out[op++] = (in1 >>> 20) & 0x3;
out[op++] = (in1 >>> 22) & 0x3;
out[op++] = (in1 >>> 24) & 0x3;
out[op++] = (in1 >>> 26) & 0x3;
out[op++] = (in1 >>> 28) & 0x3;
out[op] = (in1 >>> 30) & 0x3;
}
export function fastUnpack32_3(inValues, inPos, out, outPos) {
let op = outPos;
const in0 = inValues[inPos] >>> 0;
const in1 = inValues[inPos + 1] >>> 0;
const in2 = inValues[inPos + 2] >>> 0;
out[op++] = (in0 >>> 0) & 0x7;
out[op++] = (in0 >>> 3) & 0x7;
out[op++] = (in0 >>> 6) & 0x7;
out[op++] = (in0 >>> 9) & 0x7;
out[op++] = (in0 >>> 12) & 0x7;
out[op++] = (in0 >>> 15) & 0x7;
out[op++] = (in0 >>> 18) & 0x7;
out[op++] = (in0 >>> 21) & 0x7;
out[op++] = (in0 >>> 24) & 0x7;
out[op++] = (in0 >>> 27) & 0x7;
out[op++] = ((in0 >>> 30) | ((in1 & 0x1) << 2)) & 0x7;
out[op++] = (in1 >>> 1) & 0x7;
out[op++] = (in1 >>> 4) & 0x7;
out[op++] = (in1 >>> 7) & 0x7;
out[op++] = (in1 >>> 10) & 0x7;
out[op++] = (in1 >>> 13) & 0x7;
out[op++] = (in1 >>> 16) & 0x7;
out[op++] = (in1 >>> 19) & 0x7;
out[op++] = (in1 >>> 22) & 0x7;
out[op++] = (in1 >>> 25) & 0x7;
out[op++] = (in1 >>> 28) & 0x7;
out[op++] = ((in1 >>> 31) | ((in2 & 0x3) << 1)) & 0x7;
out[op++] = (in2 >>> 2) & 0x7;
out[op++] = (in2 >>> 5) & 0x7;
out[op++] = (in2 >>> 8) & 0x7;
out[op++] = (in2 >>> 11) & 0x7;
out[op++] = (in2 >>> 14) & 0x7;
out[op++] = (in2 >>> 17) & 0x7;
out[op++] = (in2 >>> 20) & 0x7;
out[op++] = (in2 >>> 23) & 0x7;
out[op++] = (in2 >>> 26) & 0x7;
out[op] = (in2 >>> 29) & 0x7;
}
export function fastUnpack32_4(inValues, inPos, out, outPos) {
let op = outPos;
const in0 = inValues[inPos] >>> 0;
const in1 = inValues[inPos + 1] >>> 0;
const in2 = inValues[inPos + 2] >>> 0;
const in3 = inValues[inPos + 3] >>> 0;
out[op++] = (in0 >>> 0) & 0xf;
out[op++] = (in0 >>> 4) & 0xf;
out[op++] = (in0 >>> 8) & 0xf;
out[op++] = (in0 >>> 12) & 0xf;
out[op++] = (in0 >>> 16) & 0xf;
out[op++] = (in0 >>> 20) & 0xf;
out[op++] = (in0 >>> 24) & 0xf;
out[op++] = (in0 >>> 28) & 0xf;
out[op++] = (in1 >>> 0) & 0xf;
out[op++] = (in1 >>> 4) & 0xf;
out[op++] = (in1 >>> 8) & 0xf;
out[op++] = (in1 >>> 12) & 0xf;
out[op++] = (in1 >>> 16) & 0xf;
out[op++] = (in1 >>> 20) & 0xf;
out[op++] = (in1 >>> 24) & 0xf;
out[op++] = (in1 >>> 28) & 0xf;
out[op++] = (in2 >>> 0) & 0xf;
out[op++] = (in2 >>> 4) & 0xf;
out[op++] = (in2 >>> 8) & 0xf;
out[op++] = (in2 >>> 12) & 0xf;
out[op++] = (in2 >>> 16) & 0xf;
out[op++] = (in2 >>> 20) & 0xf;
out[op++] = (in2 >>> 24) & 0xf;
out[op++] = (in2 >>> 28) & 0xf;
out[op++] = (in3 >>> 0) & 0xf;
out[op++] = (in3 >>> 4) & 0xf;
out[op++] = (in3 >>> 8) & 0xf;
out[op++] = (in3 >>> 12) & 0xf;
out[op++] = (in3 >>> 16) & 0xf;
out[op++] = (in3 >>> 20) & 0xf;
out[op++] = (in3 >>> 24) & 0xf;
out[op] = (in3 >>> 28) & 0xf;
}
export function fastUnpack32_5(inValues, inPos, out, outPos) {
let op = outPos;
const in0 = inValues[inPos] >>> 0;
const in1 = inValues[inPos + 1] >>> 0;
const in2 = inValues[inPos + 2] >>> 0;
const in3 = inValues[inPos + 3] >>> 0;
const in4 = inValues[inPos + 4] >>> 0;
out[op++] = (in0 >>> 0) & 0x1f;
out[op++] = (in0 >>> 5) & 0x1f;
out[op++] = (in0 >>> 10) & 0x1f;
out[op++] = (in0 >>> 15) & 0x1f;
out[op++] = (in0 >>> 20) & 0x1f;
out[op++] = (in0 >>> 25) & 0x1f;
out[op++] = ((in0 >>> 30) | ((in1 & 0x7) << 2)) & 0x1f;
out[op++] = (in1 >>> 3) & 0x1f;
out[op++] = (in1 >>> 8) & 0x1f;
out[op++] = (in1 >>> 13) & 0x1f;
out[op++] = (in1 >>> 18) & 0x1f;
out[op++] = (in1 >>> 23) & 0x1f;
out[op++] = ((in1 >>> 28) | ((in2 & 0x1) << 4)) & 0x1f;
out[op++] = (in2 >>> 1) & 0x1f;
out[op++] = (in2 >>> 6) & 0x1f;
out[op++] = (in2 >>> 11) & 0x1f;
out[op++] = (in2 >>> 16) & 0x1f;
out[op++] = (in2 >>> 21) & 0x1f;
out[op++] = (in2 >>> 26) & 0x1f;
out[op++] = ((in2 >>> 31) | ((in3 & 0xf) << 1)) & 0x1f;
out[op++] = (in3 >>> 4) & 0x1f;
out[op++] = (in3 >>> 9) & 0x1f;
out[op++] = (in3 >>> 14) & 0x1f;
out[op++] = (in3 >>> 19) & 0x1f;
out[op++] = (in3 >>> 24) & 0x1f;
out[op++] = ((in3 >>> 29) | ((in4 & 0x3) << 3)) & 0x1f;
out[op++] = (in4 >>> 2) & 0x1f;
out[op++] = (in4 >>> 7) & 0x1f;
out[op++] = (in4 >>> 12) & 0x1f;
out[op++] = (in4 >>> 17) & 0x1f;
out[op++] = (in4 >>> 22) & 0x1f;
out[op] = (in4 >>> 27) & 0x1f;
}
export function fastUnpack32_6(inValues, inPos, out, outPos) {
let op = outPos;
const in0 = inValues[inPos] >>> 0;
const in1 = inValues[inPos + 1] >>> 0;
const in2 = inValues[inPos + 2] >>> 0;
const in3 = inValues[inPos + 3] >>> 0;
const in4 = inValues[inPos + 4] >>> 0;
const in5 = inValues[inPos + 5] >>> 0;
out[op++] = (in0 >>> 0) & 0x3f;
out[op++] = (in0 >>> 6) & 0x3f;
out[op++] = (in0 >>> 12) & 0x3f;
out[op++] = (in0 >>> 18) & 0x3f;
out[op++] = (in0 >>> 24) & 0x3f;
out[op++] = ((in0 >>> 30) | ((in1 & 0xf) << 2)) & 0x3f;
out[op++] = (in1 >>> 4) & 0x3f;
out[op++] = (in1 >>> 10) & 0x3f;
out[op++] = (in1 >>> 16) & 0x3f;
out[op++] = (in1 >>> 22) & 0x3f;
out[op++] = ((in1 >>> 28) | ((in2 & 0x3) << 4)) & 0x3f;
out[op++] = (in2 >>> 2) & 0x3f;
out[op++] = (in2 >>> 8) & 0x3f;
out[op++] = (in2 >>> 14) & 0x3f;
out[op++] = (in2 >>> 20) & 0x3f;
out[op++] = (in2 >>> 26) & 0x3f;
out[op++] = (in3 >>> 0) & 0x3f;
out[op++] = (in3 >>> 6) & 0x3f;
out[op++] = (in3 >>> 12) & 0x3f;
out[op++] = (in3 >>> 18) & 0x3f;
out[op++] = (in3 >>> 24) & 0x3f;
out[op++] = ((in3 >>> 30) | ((in4 & 0xf) << 2)) & 0x3f;
out[op++] = (in4 >>> 4) & 0x3f;
out[op++] = (in4 >>> 10) & 0x3f;
out[op++] = (in4 >>> 16) & 0x3f;
out[op++] = (in4 >>> 22) & 0x3f;
out[op++] = ((in4 >>> 28) | ((in5 & 0x3) << 4)) & 0x3f;
out[op++] = (in5 >>> 2) & 0x3f;
out[op++] = (in5 >>> 8) & 0x3f;
out[op++] = (in5 >>> 14) & 0x3f;
out[op++] = (in5 >>> 20) & 0x3f;
out[op] = (in5 >>> 26) & 0x3f;
}
export function fastUnpack32_7(inValues, inPos, out, outPos) {
let op = outPos;
const in0 = inValues[inPos] >>> 0;
const in1 = inValues[inPos + 1] >>> 0;
const in2 = inValues[inPos + 2] >>> 0;
const in3 = inValues[inPos + 3] >>> 0;
const in4 = inValues[inPos + 4] >>> 0;
const in5 = inValues[inPos + 5] >>> 0;
const in6 = inValues[inPos + 6] >>> 0;
out[op++] = (in0 >>> 0) & 0x7f;
out[op++] = (in0 >>> 7) & 0x7f;
out[op++] = (in0 >>> 14) & 0x7f;
out[op++] = (in0 >>> 21) & 0x7f;
out[op++] = ((in0 >>> 28) | ((in1 & 0x7) << 4)) & 0x7f;
out[op++] = (in1 >>> 3) & 0x7f;
out[op++] = (in1 >>> 10) & 0x7f;
out[op++] = (in1 >>> 17) & 0x7f;
out[op++] = (in1 >>> 24) & 0x7f;
out[op++] = ((in1 >>> 31) | ((in2 & 0x3f) << 1)) & 0x7f;
out[op++] = (in2 >>> 6) & 0x7f;
out[op++] = (in2 >>> 13) & 0x7f;
out[op++] = (in2 >>> 20) & 0x7f;
out[op++] = ((in2 >>> 27) | ((in3 & 0x3) << 5)) & 0x7f;
out[op++] = (in3 >>> 2) & 0x7f;
out[op++] = (in3 >>> 9) & 0x7f;
out[op++] = (in3 >>> 16) & 0x7f;
out[op++] = (in3 >>> 23) & 0x7f;
out[op++] = ((in3 >>> 30) | ((in4 & 0x1f) << 2)) & 0x7f;
out[op++] = (in4 >>> 5) & 0x7f;
out[op++] = (in4 >>> 12) & 0x7f;
out[op++] = (in4 >>> 19) & 0x7f;
out[op++] = ((in4 >>> 26) | ((in5 & 0x1) << 6)) & 0x7f;
out[op++] = (in5 >>> 1) & 0x7f;
out[op++] = (in5 >>> 8) & 0x7f;
out[op++] = (in5 >>> 15) & 0x7f;
out[op++] = (in5 >>> 22) & 0x7f;
out[op++] = ((in5 >>> 29) | ((in6 & 0xf) << 3)) & 0x7f;
out[op++] = (in6 >>> 4) & 0x7f;
out[op++] = (in6 >>> 11) & 0x7f;
out[op++] = (in6 >>> 18) & 0x7f;
out[op] = (in6 >>> 25) & 0x7f;
}
export function fastUnpack32_8(inValues, inPos, out, outPos) {
let op = outPos;
const in0 = inValues[inPos] >>> 0;
const in1 = inValues[inPos + 1] >>> 0;
const in2 = inValues[inPos + 2] >>> 0;
const in3 = inValues[inPos + 3] >>> 0;
const in4 = inValues[inPos + 4] >>> 0;
const in5 = inValues[inPos + 5] >>> 0;
const in6 = inValues[inPos + 6] >>> 0;
const in7 = inValues[inPos + 7] >>> 0;
out[op++] = (in0 >>> 0) & 0xff;
out[op++] = (in0 >>> 8) & 0xff;
out[op++] = (in0 >>> 16) & 0xff;
out[op++] = (in0 >>> 24) & 0xff;
out[op++] = (in1 >>> 0) & 0xff;
out[op++] = (in1 >>> 8) & 0xff;
out[op++] = (in1 >>> 16) & 0xff;
out[op++] = (in1 >>> 24) & 0xff;
out[op++] = (in2 >>> 0) & 0xff;
out[op++] = (in2 >>> 8) & 0xff;
out[op++] = (in2 >>> 16) & 0xff;
out[op++] = (in2 >>> 24) & 0xff;
out[op++] = (in3 >>> 0) & 0xff;
out[op++] = (in3 >>> 8) & 0xff;
out[op++] = (in3 >>> 16) & 0xff;
out[op++] = (in3 >>> 24) & 0xff;
out[op++] = (in4 >>> 0) & 0xff;
out[op++] = (in4 >>> 8) & 0xff;
out[op++] = (in4 >>> 16) & 0xff;
out[op++] = (in4 >>> 24) & 0xff;
out[op++] = (in5 >>> 0) & 0xff;
out[op++] = (in5 >>> 8) & 0xff;
out[op++] = (in5 >>> 16) & 0xff;
out[op++] = (in5 >>> 24) & 0xff;
out[op++] = (in6 >>> 0) & 0xff;
out[op++] = (in6 >>> 8) & 0xff;
out[op++] = (in6 >>> 16) & 0xff;
out[op++] = (in6 >>> 24) & 0xff;
out[op++] = (in7 >>> 0) & 0xff;
out[op++] = (in7 >>> 8) & 0xff;
out[op++] = (in7 >>> 16) & 0xff;
out[op] = (in7 >>> 24) & 0xff;
}
export function fastUnpack32_9(inValues, inPos, out, outPos) {
let op = outPos;
const in0 = inValues[inPos] >>> 0;
const in1 = inValues[inPos + 1] >>> 0;
const in2 = inValues[inPos + 2] >>> 0;
const in3 = inValues[inPos + 3] >>> 0;
const in4 = inValues[inPos + 4] >>> 0;
const in5 = inValues[inPos + 5] >>> 0;
const in6 = inValues[inPos + 6] >>> 0;
const in7 = inValues[inPos + 7] >>> 0;
const in8 = inValues[inPos + 8] >>> 0;
out[op++] = (in0 >>> 0) & 0x1ff;
out[op++] = (in0 >>> 9) & 0x1ff;
out[op++] = (in0 >>> 18) & 0x1ff;
out[op++] = ((in0 >>> 27) | ((in1 & 0xf) << 5)) & 0x1ff;
out[op++] = (in1 >>> 4) & 0x1ff;
out[op++] = (in1 >>> 13) & 0x1ff;
out[op++] = (in1 >>> 22) & 0x1ff;
out[op++] = ((in1 >>> 31) | ((in2 & 0xff) << 1)) & 0x1ff;
out[op++] = (in2 >>> 8) & 0x1ff;
out[op++] = (in2 >>> 17) & 0x1ff;
out[op++] = ((in2 >>> 26) | ((in3 & 0x7) << 6)) & 0x1ff;
out[op++] = (in3 >>> 3) & 0x1ff;
out[op++] = (in3 >>> 12) & 0x1ff;
out[op++] = (in3 >>> 21) & 0x1ff;
out[op++] = ((in3 >>> 30) | ((in4 & 0x7f) << 2)) & 0x1ff;
out[op++] = (in4 >>> 7) & 0x1ff;
out[op++] = (in4 >>> 16) & 0x1ff;
out[op++] = ((in4 >>> 25) | ((in5 & 0x3) << 7)) & 0x1ff;
out[op++] = (in5 >>> 2) & 0x1ff;
out[op++] = (in5 >>> 11) & 0x1ff;
out[op++] = (in5 >>> 20) & 0x1ff;
out[op++] = ((in5 >>> 29) | ((in6 & 0x3f) << 3)) & 0x1ff;
out[op++] = (in6 >>> 6) & 0x1ff;
out[op++] = (in6 >>> 15) & 0x1ff;
out[op++] = ((in6 >>> 24) | ((in7 & 0x1) << 8)) & 0x1ff;
out[op++] = (in7 >>> 1) & 0x1ff;
out[op++] = (in7 >>> 10) & 0x1ff;
out[op++] = (in7 >>> 19) & 0x1ff;
out[op++] = ((in7 >>> 28) | ((in8 & 0x1f) << 4)) & 0x1ff;
out[op++] = (in8 >>> 5) & 0x1ff;
out[op++] = (in8 >>> 14) & 0x1ff;
out[op] = (in8 >>> 23) & 0x1ff;
}
export function fastUnpack32_10(inValues, inPos, out, outPos) {
let op = outPos;
const in0 = inValues[inPos] >>> 0;
const in1 = inValues[inPos + 1] >>> 0;
const in2 = inValues[inPos + 2] >>> 0;
const in3 = inValues[inPos + 3] >>> 0;
const in4 = inValues[inPos + 4] >>> 0;
const in5 = inValues[inPos + 5] >>> 0;
const in6 = inValues[inPos + 6] >>> 0;
const in7 = inValues[inPos + 7] >>> 0;
const in8 = inValues[inPos + 8] >>> 0;
const in9 = inValues[inPos + 9] >>> 0;
out[op++] = (in0 >>> 0) & 0x3ff;
out[op++] = (in0 >>> 10) & 0x3ff;
out[op++] = (in0 >>> 20) & 0x3ff;
out[op++] = ((in0 >>> 30) | ((in1 & 0xff) << 2)) & 0x3ff;
out[op++] = (in1 >>> 8) & 0x3ff;
out[op++] = (in1 >>> 18) & 0x3ff;
out[op++] = ((in1 >>> 28) | ((in2 & 0x3f) << 4)) & 0x3ff;
out[op++] = (in2 >>> 6) & 0x3ff;
out[op++] = (in2 >>> 16) & 0x3ff;
out[op++] = ((in2 >>> 26) | ((in3 & 0xf) << 6)) & 0x3ff;
out[op++] = (in3 >>> 4) & 0x3ff;
out[op++] = (in3 >>> 14) & 0x3ff;
out[op++] = ((in3 >>> 24) | ((in4 & 0x3) << 8)) & 0x3ff;
out[op++] = (in4 >>> 2) & 0x3ff;
out[op++] = (in4 >>> 12) & 0x3ff;
out[op++] = (in4 >>> 22) & 0x3ff;
out[op++] = (in5 >>> 0) & 0x3ff;
out[op++] = (in5 >>> 10) & 0x3ff;
out[op++] = (in5 >>> 20) & 0x3ff;
out[op++] = ((in5 >>> 30) | ((in6 & 0xff) << 2)) & 0x3ff;
out[op++] = (in6 >>> 8) & 0x3ff;
out[op++] = (in6 >>> 18) & 0x3ff;
out[op++] = ((in6 >>> 28) | ((in7 & 0x3f) << 4)) & 0x3ff;
out[op++] = (in7 >>> 6) & 0x3ff;
out[op++] = (in7 >>> 16) & 0x3ff;
out[op++] = ((in7 >>> 26) | ((in8 & 0xf) << 6)) & 0x3ff;
out[op++] = (in8 >>> 4) & 0x3ff;
out[op++] = (in8 >>> 14) & 0x3ff;
out[op++] = ((in8 >>> 24) | ((in9 & 0x3) << 8)) & 0x3ff;
out[op++] = (in9 >>> 2) & 0x3ff;
out[op++] = (in9 >>> 12) & 0x3ff;
out[op] = (in9 >>> 22) & 0x3ff;
}
export function fastUnpack32_11(inValues, inPos, out, outPos) {
let op = outPos;
const in0 = inValues[inPos] >>> 0;
const in1 = inValues[inPos + 1] >>> 0;
const in2 = inValues[inPos + 2] >>> 0;
const in3 = inValues[inPos + 3] >>> 0;
const in4 = inValues[inPos + 4] >>> 0;
const in5 = inValues[inPos + 5] >>> 0;
const in6 = inValues[inPos + 6] >>> 0;
const in7 = inValues[inPos + 7] >>> 0;
const in8 = inValues[inPos + 8] >>> 0;
const in9 = inValues[inPos + 9] >>> 0;
const in10 = inValues[inPos + 10] >>> 0;
out[op++] = (in0 >>> 0) & 0x7ff;
out[op++] = (in0 >>> 11) & 0x7ff;
out[op++] = ((in0 >>> 22) | ((in1 & 0x1) << 10)) & 0x7ff;
out[op++] = (in1 >>> 1) & 0x7ff;
out[op++] = (in1 >>> 12) & 0x7ff;
out[op++] = ((in1 >>> 23) | ((in2 & 0x3) << 9)) & 0x7ff;
out[op++] = (in2 >>> 2) & 0x7ff;
out[op++] = (in2 >>> 13) & 0x7ff;
out[op++] = ((in2 >>> 24) | ((in3 & 0x7) << 8)) & 0x7ff;
out[op++] = (in3 >>> 3) & 0x7ff;
out[op++] = (in3 >>> 14) & 0x7ff;
out[op++] = ((in3 >>> 25) | ((in4 & 0xf) << 7)) & 0x7ff;
out[op++] = (in4 >>> 4) & 0x7ff;
out[op++] = (in4 >>> 15) & 0x7ff;
out[op++] = ((in4 >>> 26) | ((in5 & 0x1f) << 6)) & 0x7ff;
out[op++] = (in5 >>> 5) & 0x7ff;
out[op++] = (in5 >>> 16) & 0x7ff;
out[op++] = ((in5 >>> 27) | ((in6 & 0x3f) << 5)) & 0x7ff;
out[op++] = (in6 >>> 6) & 0x7ff;
out[op++] = (in6 >>> 17) & 0x7ff;
out[op++] = ((in6 >>> 28) | ((in7 & 0x7f) << 4)) & 0x7ff;
out[op++] = (in7 >>> 7) & 0x7ff;
out[op++] = (in7 >>> 18) & 0x7ff;
out[op++] = ((in7 >>> 29) | ((in8 & 0xff) << 3)) & 0x7ff;
out[op++] = (in8 >>> 8) & 0x7ff;
out[op++] = (in8 >>> 19) & 0x7ff;
out[op++] = ((in8 >>> 30) | ((in9 & 0x1ff) << 2)) & 0x7ff;
out[op++] = (in9 >>> 9) & 0x7ff;
out[op++] = (in9 >>> 20) & 0x7ff;
out[op++] = ((in9 >>> 31) | ((in10 & 0x3ff) << 1)) & 0x7ff;
out[op++] = (in10 >>> 10) & 0x7ff;
out[op] = (in10 >>> 21) & 0x7ff;
}
export function fastUnpack32_12(inValues, inPos, out, outPos) {
let op = outPos;
const in0 = inValues[inPos] >>> 0;
const in1 = inValues[inPos + 1] >>> 0;
const in2 = inValues[inPos + 2] >>> 0;
const in3 = inValues[inPos + 3] >>> 0;
const in4 = inValues[inPos + 4] >>> 0;
const in5 = inValues[inPos + 5] >>> 0;
const in6 = inValues[inPos + 6] >>> 0;
const in7 = inValues[inPos + 7] >>> 0;
const in8 = inValues[inPos + 8] >>> 0;
const in9 = inValues[inPos + 9] >>> 0;
const in10 = inValues[inPos + 10] >>> 0;
const in11 = inValues[inPos + 11] >>> 0;
out[op++] = (in0 >>> 0) & 0xfff;
out[op++] = (in0 >>> 12) & 0xfff;
out[op++] = ((in0 >>> 24) | ((in1 & 0xf) << 8)) & 0xfff;
out[op++] = (in1 >>> 4) & 0xfff;
out[op++] = (in1 >>> 16) & 0xfff;
out[op++] = ((in1 >>> 28) | ((in2 & 0xff) << 4)) & 0xfff;
out[op++] = (in2 >>> 8) & 0xfff;
out[op++] = (in2 >>> 20) & 0xfff;
out[op++] = (in3 >>> 0) & 0xfff;
out[op++] = (in3 >>> 12) & 0xfff;
out[op++] = ((in3 >>> 24) | ((in4 & 0xf) << 8)) & 0xfff;
out[op++] = (in4 >>> 4) & 0xfff;
out[op++] = (in4 >>> 16) & 0xfff;
out[op++] = ((in4 >>> 28) | ((in5 & 0xff) << 4)) & 0xfff;
out[op++] = (in5 >>> 8) & 0xfff;
out[op++] = (in5 >>> 20) & 0xfff;
out[op++] = (in6 >>> 0) & 0xfff;
out[op++] = (in6 >>> 12) & 0xfff;
out[op++] = ((in6 >>> 24) | ((in7 & 0xf) << 8)) & 0xfff;
out[op++] = (in7 >>> 4) & 0xfff;
out[op++] = (in7 >>> 16) & 0xfff;
out[op++] = ((in7 >>> 28) | ((in8 & 0xff) << 4)) & 0xfff;
out[op++] = (in8 >>> 8) & 0xfff;
out[op++] = (in8 >>> 20) & 0xfff;
out[op++] = (in9 >>> 0) & 0xfff;
out[op++] = (in9 >>> 12) & 0xfff;
out[op++] = ((in9 >>> 24) | ((in10 & 0xf) << 8)) & 0xfff;
out[op++] = (in10 >>> 4) & 0xfff;
out[op++] = (in10 >>> 16) & 0xfff;
out[op++] = ((in10 >>> 28) | ((in11 & 0xff) << 4)) & 0xfff;
out[op++] = (in11 >>> 8) & 0xfff;
out[op] = (in11 >>> 20) & 0xfff;
}
export function fastUnpack32_16(inValues, inPos, out, outPos) {
let op = outPos;
const in0 = inValues[inPos] >>> 0;
const in1 = inValues[inPos + 1] >>> 0;
const in2 = inValues[inPos + 2] >>> 0;
const in3 = inValues[inPos + 3] >>> 0;
const in4 = inValues[inPos + 4] >>> 0;
const in5 = inValues[inPos + 5] >>> 0;
const in6 = inValues[inPos + 6] >>> 0;
const in7 = inValues[inPos + 7] >>> 0;
const in8 = inValues[inPos + 8] >>> 0;
const in9 = inValues[inPos + 9] >>> 0;
const in10 = inValues[inPos + 10] >>> 0;
const in11 = inValues[inPos + 11] >>> 0;
const in12 = inValues[inPos + 12] >>> 0;
const in13 = inValues[inPos + 13] >>> 0;
const in14 = inValues[inPos + 14] >>> 0;
const in15 = inValues[inPos + 15] >>> 0;
out[op++] = (in0 >>> 0) & 0xffff;
out[op++] = (in0 >>> 16) & 0xffff;
out[op++] = (in1 >>> 0) & 0xffff;
out[op++] = (in1 >>> 16) & 0xffff;
out[op++] = (in2 >>> 0) & 0xffff;
out[op++] = (in2 >>> 16) & 0xffff;
out[op++] = (in3 >>> 0) & 0xffff;
out[op++] = (in3 >>> 16) & 0xffff;
out[op++] = (in4 >>> 0) & 0xffff;
out[op++] = (in4 >>> 16) & 0xffff;
out[op++] = (in5 >>> 0) & 0xffff;
out[op++] = (in5 >>> 16) & 0xffff;
out[op++] = (in6 >>> 0) & 0xffff;
out[op++] = (in6 >>> 16) & 0xffff;
out[op++] = (in7 >>> 0) & 0xffff;
out[op++] = (in7 >>> 16) & 0xffff;
out[op++] = (in8 >>> 0) & 0xffff;
out[op++] = (in8 >>> 16) & 0xffff;
out[op++] = (in9 >>> 0) & 0xffff;
out[op++] = (in9 >>> 16) & 0xffff;
out[op++] = (in10 >>> 0) & 0xffff;
out[op++] = (in10 >>> 16) & 0xffff;
out[op++] = (in11 >>> 0) & 0xffff;
out[op++] = (in11 >>> 16) & 0xffff;
out[op++] = (in12 >>> 0) & 0xffff;
out[op++] = (in12 >>> 16) & 0xffff;
out[op++] = (in13 >>> 0) & 0xffff;
out[op++] = (in13 >>> 16) & 0xffff;
out[op++] = (in14 >>> 0) & 0xffff;
out[op++] = (in14 >>> 16) & 0xffff;
out[op++] = (in15 >>> 0) & 0xffff;
out[op] = (in15 >>> 16) & 0xffff;
}
export function fastUnpack256_1(inValues, inPos, out, outPos) {
let op = outPos;
let ip = inPos;
for (let c = 0; c < 8; c++) {
const in0 = inValues[ip++] >>> 0;
out[op++] = (in0 >>> 0) & 0x1;
out[op++] = (in0 >>> 1) & 0x1;
out[op++] = (in0 >>> 2) & 0x1;
out[op++] = (in0 >>> 3) & 0x1;
out[op++] = (in0 >>> 4) & 0x1;
out[op++] = (in0 >>> 5) & 0x1;
out[op++] = (in0 >>> 6) & 0x1;
out[op++] = (in0 >>> 7) & 0x1;
out[op++] = (in0 >>> 8) & 0x1;
out[op++] = (in0 >>> 9) & 0x1;
out[op++] = (in0 >>> 10) & 0x1;
out[op++] = (in0 >>> 11) & 0x1;
out[op++] = (in0 >>> 12) & 0x1;
out[op++] = (in0 >>> 13) & 0x1;
out[op++] = (in0 >>> 14) & 0x1;
out[op++] = (in0 >>> 15) & 0x1;
out[op++] = (in0 >>> 16) & 0x1;
out[op++] = (in0 >>> 17) & 0x1;
out[op++] = (in0 >>> 18) & 0x1;
out[op++] = (in0 >>> 19) & 0x1;
out[op++] = (in0 >>> 20) & 0x1;
out[op++] = (in0 >>> 21) & 0x1;
out[op++] = (in0 >>> 22) & 0x1;
out[op++] = (in0 >>> 23) & 0x1;
out[op++] = (in0 >>> 24) & 0x1;
out[op++] = (in0 >>> 25) & 0x1;
out[op++] = (in0 >>> 26) & 0x1;
out[op++] = (in0 >>> 27) & 0x1;
out[op++] = (in0 >>> 28) & 0x1;
out[op++] = (in0 >>> 29) & 0x1;
out[op++] = (in0 >>> 30) & 0x1;
out[op++] = (in0 >>> 31) & 0x1;
}
}
export function fastUnpack256_2(inValues, inPos, out, outPos) {
let op = outPos;
let ip = inPos;
for (let c = 0; c < 8; c++) {
const in0 = inValues[ip++] >>> 0;
const in1 = inValues[ip++] >>> 0;
out[op++] = (in0 >>> 0) & 0x3;
out[op++] = (in0 >>> 2) & 0x3;
out[op++] = (in0 >>> 4) & 0x3;
out[op++] = (in0 >>> 6) & 0x3;
out[op++] = (in0 >>> 8) & 0x3;
out[op++] = (in0 >>> 10) & 0x3;
out[op++] = (in0 >>> 12) & 0x3;
out[op++] = (in0 >>> 14) & 0x3;
out[op++] = (in0 >>> 16) & 0x3;
out[op++] = (in0 >>> 18) & 0x3;
out[op++] = (in0 >>> 20) & 0x3;
out[op++] = (in0 >>> 22) & 0x3;
out[op++] = (in0 >>> 24) & 0x3;
out[op++] = (in0 >>> 26) & 0x3;
out[op++] = (in0 >>> 28) & 0x3;
out[op++] = (in0 >>> 30) & 0x3;
out[op++] = (in1 >>> 0) & 0x3;
out[op++] = (in1 >>> 2) & 0x3;
out[op++] = (in1 >>> 4) & 0x3;
out[op++] = (in1 >>> 6) & 0x3;
out[op++] = (in1 >>> 8) & 0x3;
out[op++] = (in1 >>> 10) & 0x3;
out[op++] = (in1 >>> 12) & 0x3;
out[op++] = (in1 >>> 14) & 0x3;
out[op++] = (in1 >>> 16) & 0x3;
out[op++] = (in1 >>> 18) & 0x3;
out[op++] = (in1 >>> 20) & 0x3;
out[op++] = (in1 >>> 22) & 0x3;
out[op++] = (in1 >>> 24) & 0x3;
out[op++] = (in1 >>> 26) & 0x3;
out[op++] = (in1 >>> 28) & 0x3;
out[op++] = (in1 >>> 30) & 0x3;
}
}
export function fastUnpack256_3(inValues, inPos, out, outPos) {
let op = outPos;
let ip = inPos;
for (let c = 0; c < 8; c++) {
const in0 = inValues[ip++] >>> 0;
const in1 = inValues[ip++] >>> 0;
const in2 = inValues[ip++] >>> 0;
out[op++] = (in0 >>> 0) & 0x7;
out[op++] = (in0 >>> 3) & 0x7;
out[op++] = (in0 >>> 6) & 0x7;
out[op++] = (in0 >>> 9) & 0x7;
out[op++] = (in0 >>> 12) & 0x7;
out[op++] = (in0 >>> 15) & 0x7;
out[op++] = (in0 >>> 18) & 0x7;
out[op++] = (in0 >>> 21) & 0x7;
out[op++] = (in0 >>> 24) & 0x7;
out[op++] = (in0 >>> 27) & 0x7;
out[op++] = ((in0 >>> 30) | ((in1 & 0x1) << 2)) & 0x7;
out[op++] = (in1 >>> 1) & 0x7;
out[op++] = (in1 >>> 4) & 0x7;
out[op++] = (in1 >>> 7) & 0x7;
out[op++] = (in1 >>> 10) & 0x7;
out[op++] = (in1 >>> 13) & 0x7;
out[op++] = (in1 >>> 16) & 0x7;
out[op++] = (in1 >>> 19) & 0x7;
out[op++] = (in1 >>> 22) & 0x7;
out[op++] = (in1 >>> 25) & 0x7;
out[op++] = (in1 >>> 28) & 0x7;
out[op++] = ((in1 >>> 31) | ((in2 & 0x3) << 1)) & 0x7;
out[op++] = (in2 >>> 2) & 0x7;
out[op++] = (in2 >>> 5) & 0x7;
out[op++] = (in2 >>> 8) & 0x7;
out[op++] = (in2 >>> 11) & 0x7;
out[op++] = (in2 >>> 14) & 0x7;
out[op++] = (in2 >>> 17) & 0x7;
out[op++] = (in2 >>> 20) & 0x7;
out[op++] = (in2 >>> 23) & 0x7;
out[op++] = (in2 >>> 26) & 0x7;
out[op++] = (in2 >>> 29) & 0x7;
}
}
export function fastUnpack256_4(inValues, inPos, out, outPos) {
let op = outPos;
let ip = inPos;
for (let c = 0; c < 8; c++) {
const in0 = inValues[ip++] >>> 0;
const in1 = inValues[ip++] >>> 0;
const in2 = inValues[ip++] >>> 0;
const in3 = inValues[ip++] >>> 0;
out[op++] = (in0 >>> 0) & 0xf;
out[op++] = (in0 >>> 4) & 0xf;
out[op++] = (in0 >>> 8) & 0xf;
out[op++] = (in0 >>> 12) & 0xf;
out[op++] = (in0 >>> 16) & 0xf;
out[op++] = (in0 >>> 20) & 0xf;
out[op++] = (in0 >>> 24) & 0xf;
out[op++] = (in0 >>> 28) & 0xf;
out[op++] = (in1 >>> 0) & 0xf;
out[op++] = (in1 >>> 4) & 0xf;
out[op++] = (in1 >>> 8) & 0xf;
out[op++] = (in1 >>> 12) & 0xf;
out[op++] = (in1 >>> 16) & 0xf;
out[op++] = (in1 >>> 20) & 0xf;
out[op++] = (in1 >>> 24) & 0xf;
out[op++] = (in1 >>> 28) & 0xf;
out[op++] = (in2 >>> 0) & 0xf;
out[op++] = (in2 >>> 4) & 0xf;
out[op++] = (in2 >>> 8) & 0xf;
out[op++] = (in2 >>> 12) & 0xf;
out[op++] = (in2 >>> 16) & 0xf;
out[op++] = (in2 >>> 20) & 0xf;
out[op++] = (in2 >>> 24) & 0xf;
out[op++] = (in2 >>> 28) & 0xf;
out[op++] = (in3 >>> 0) & 0xf;
out[op++] = (in3 >>> 4) & 0xf;
out[op++] = (in3 >>> 8) & 0xf;
out[op++] = (in3 >>> 12) & 0xf;
out[op++] = (in3 >>> 16) & 0xf;
out[op++] = (in3 >>> 20) & 0xf;
out[op++] = (in3 >>> 24) & 0xf;
out[op++] = (in3 >>> 28) & 0xf;
}
}
export function fastUnpack256_5(inValues, inPos, out, outPos) {
let op = outPos;
let ip = inPos;
for (let c = 0; c < 8; c++) {
const in0 = inValues[ip++] >>> 0;
const in1 = inValues[ip++] >>> 0;
const in2 = inValues[ip++] >>> 0;
const in3 = inValues[ip++] >>> 0;
const in4 = inValues[ip++] >>> 0;
out[op++] = (in0 >>> 0) & 0x1f;
out[op++] = (in0 >>> 5) & 0x1f;
out[op++] = (in0 >>> 10) & 0x1f;
out[op++] = (in0 >>> 15) & 0x1f;
out[op++] = (in0 >>> 20) & 0x1f;
out[op++] = (in0 >>> 25) & 0x1f;
out[op++] = ((in0 >>> 30) | ((in1 & 0x7) << 2)) & 0x1f;
out[op++] = (in1 >>> 3) & 0x1f;
out[op++] = (in1 >>> 8) & 0x1f;
out[op++] = (in1 >>> 13) & 0x1f;
out[op++] = (in1 >>> 18) & 0x1f;
out[op++] = (in1 >>> 23) & 0x1f;
out[op++] = ((in1 >>> 28) | ((in2 & 0x1) << 4)) & 0x1f;
out[op++] = (in2 >>> 1) & 0x1f;
out[op++] = (in2 >>> 6) & 0x1f;
out[op++] = (in2 >>> 11) & 0x1f;
out[op++] = (in2 >>> 16) & 0x1f;
out[op++] = (in2 >>> 21) & 0x1f;
out[op++] = (in2 >>> 26) & 0x1f;
out[op++] = ((in2 >>> 31) | ((in3 & 0xf) << 1)) & 0x1f;
out[op++] = (in3 >>> 4) & 0x1f;
out[op++] = (in3 >>> 9) & 0x1f;
out[op++] = (in3 >>> 14) & 0x1f;
out[op++] = (in3 >>> 19) & 0x1f;
out[op++] = (in3 >>> 24) & 0x1f;
out[op++] = ((in3 >>> 29) | ((in4 & 0x3) << 3)) & 0x1f;
out[op++] = (in4 >>> 2) & 0x1f;
out[op++] = (in4 >>> 7) & 0x1f;
out[op++] = (in4 >>> 12) & 0x1f;
out[op++] = (in4 >>> 17) & 0x1f;
out[op++] = (in4 >>> 22) & 0x1f;
out[op++] = (in4 >>> 27) & 0x1f;
}
}
export function fastUnpack256_6(inValues, inPos, out, outPos) {
let op = outPos;
let ip = inPos;
for (let c = 0; c < 8; c++) {
const in0 = inValues[ip++] >>> 0;
const in1 = inValues[ip++] >>> 0;
const in2 = inValues[ip++] >>> 0;
const in3 = inValues[ip++] >>> 0;
const in4 = inValues[ip++] >>> 0;
const in5 = inValues[ip++] >>> 0;
out[op++] = (in0 >>> 0) & 0x3f;
out[op++] = (in0 >>> 6) & 0x3f;
out[op++] = (in0 >>> 12) & 0x3f;
out[op++] = (in0 >>> 18) & 0x3f;
out[op++] = (in0 >>> 24) & 0x3f;
out[op++] = ((in0 >>> 30) | ((in1 & 0xf) << 2)) & 0x3f;
out[op++] = (in1 >>> 4) & 0x3f;
out[op++] = (in1 >>> 10) & 0x3f;
out[op++] = (in1 >>> 16) & 0x3f;
out[op++] = (in1 >>> 22) & 0x3f;
out[op++] = ((in1 >>> 28) | ((in2 & 0x3) << 4)) & 0x3f;
out[op++] = (in2 >>> 2) & 0x3f;
out[op++] = (in2 >>> 8) & 0x3f;
out[op++] = (in2 >>> 14) & 0x3f;
out[op++] = (in2 >>> 20) & 0x3f;
out[op++] = (in2 >>> 26) & 0x3f;
out[op++] = (in3 >>> 0) & 0x3f;
out[op++] = (in3 >>> 6) & 0x3f;
out[op++] = (in3 >>> 12) & 0x3f;
out[op++] = (in3 >>> 18) & 0x3f;
out[op++] = (in3 >>> 24) & 0x3f;
out[op++] = ((in3 >>> 30) | ((in4 & 0xf) << 2)) & 0x3f;
out[op++] = (in4 >>> 4) & 0x3f;
out[op++] = (in4 >>> 10) & 0x3f;
out[op++] = (in4 >>> 16) & 0x3f;
out[op++] = (in4 >>> 22) & 0x3f;
out[op++] = ((in4 >>> 28) | ((in5 & 0x3) << 4)) & 0x3f;
out[op++] = (in5 >>> 2) & 0x3f;
out[op++] = (in5 >>> 8) & 0x3f;
out[op++] = (in5 >>> 14) & 0x3f;
out[op++] = (in5 >>> 20) & 0x3f;
out[op++] = (in5 >>> 26) & 0x3f;
}
}
export function fastUnpack256_7(inValues, inPos, out, outPos) {
let op = outPos;
let ip = inPos;
for (let c = 0; c < 8; c++) {
const in0 = inValues[ip++] >>> 0;
const in1 = inValues[ip++] >>> 0;
const in2 = inValues[ip++] >>> 0;
const in3 = inValues[ip++] >>> 0;
const in4 = inValues[ip++] >>> 0;
const in5 = inValues[ip++] >>> 0;
const in6 = inValues[ip++] >>> 0;
out[op++] = (in0 >>> 0) & 0x7f;
out[op++] = (in0 >>> 7) & 0x7f;
out[op++] = (in0 >>> 14) & 0x7f;
out[op++] = (in0 >>> 21) & 0x7f;
out[op++] = ((in0 >>> 28) | ((in1 & 0x7) << 4)) & 0x7f;
out[op++] = (in1 >>> 3) & 0x7f;
out[op++] = (in1 >>> 10) & 0x7f;
out[op++] = (in1 >>> 17) & 0x7f;
out[op++] = (in1 >>> 24) & 0x7f;
out[op++] = ((in1 >>> 31) | ((in2 & 0x3f) << 1)) & 0x7f;
out[op++] = (in2 >>> 6) & 0x7f;
out[op++] = (in2 >>> 13) & 0x7f;
out[op++] = (in2 >>> 20) & 0x7f;
out[op++] = ((in2 >>> 27) | ((in3 & 0x3) << 5)) & 0x7f;
out[op++] = (in3 >>> 2) & 0x7f;
out[op++] = (in3 >>> 9) & 0x7f;
out[op++] = (in3 >>> 16) & 0x7f;
out[op++] = (in3 >>> 23) & 0x7f;
out[op++] = ((in3 >>> 30) | ((in4 & 0x1f) << 2)) & 0x7f;
out[op++] = (in4 >>> 5) & 0x7f;
out[op++] = (in4 >>> 12) & 0x7f;
out[op++] = (in4 >>> 19) & 0x7f;
out[op++] = ((in4 >>> 26) | ((in5 & 0x1) << 6)) & 0x7f;
out[op++] = (in5 >>> 1) & 0x7f;
out[op++] = (in5 >>> 8) & 0x7f;
out[op++] = (in5 >>> 15) & 0x7f;
out[op++] = (in5 >>> 22) & 0x7f;
out[op++] = ((in5 >>> 29) | ((in6 & 0xf) << 3)) & 0x7f;
out[op++] = (in6 >>> 4) & 0x7f;
out[op++] = (in6 >>> 11) & 0x7f;
out[op++] = (in6 >>> 18) & 0x7f;
out[op++] = (in6 >>> 25) & 0x7f;
}
}
export function fastUnpack256_8(inValues, inPos, out, outPos) {
let op = outPos;
let ip = inPos;
for (let c = 0; c < 8; c++) {
const in0 = inValues[ip++] >>> 0;
const in1 = inValues[ip++] >>> 0;
const in2 = inValues[ip++] >>> 0;
const in3 = inValues[ip++] >>> 0;
const in4 = inValues[ip++] >>> 0;
const in5 = inValues[ip++] >>> 0;
const in6 = inValues[ip++] >>> 0;
const in7 = inValues[ip++] >>> 0;
out[op++] = (in0 >>> 0) & 0xff;
out[op++] = (in0 >>> 8) & 0xff;
out[op++] = (in0 >>> 16) & 0xff;
out[op++] = (in0 >>> 24) & 0xff;
out[op++] = (in1 >>> 0) & 0xff;
out[op++] = (in1 >>> 8) & 0xff;
out[op++] = (in1 >>> 16) & 0xff;
out[op++] = (in1 >>> 24) & 0xff;
out[op++] = (in2 >>> 0) & 0xff;
out[op++] = (in2 >>> 8) & 0xff;
out[op++] = (in2 >>> 16) & 0xff;
out[op++] = (in2 >>> 24) & 0xff;
out[op++] = (in3 >>> 0) & 0xff;
out[op++] = (in3 >>> 8) & 0xff;
out[op++] = (in3 >>> 16) & 0xff;
out[op++] = (in3 >>> 24) & 0xff;
out[op++] = (in4 >>> 0) & 0xff;
out[op++] = (in4 >>> 8) & 0xff;
out[op++] = (in4 >>> 16) & 0xff;
out[op++] = (in4 >>> 24) & 0xff;
out[op++] = (in5 >>> 0) & 0xff;
out[op++] = (in5 >>> 8) & 0xff;
out[op++] = (in5 >>> 16) & 0xff;
out[op++] = (in5 >>> 24) & 0xff;
out[op++] = (in6 >>> 0) & 0xff;
out[op++] = (in6 >>> 8) & 0xff;
out[op++] = (in6 >>> 16) & 0xff;
out[op++] = (in6 >>> 24) & 0xff;
out[op++] = (in7 >>> 0) & 0xff;
out[op++] = (in7 >>> 8) & 0xff;
out[op++] = (in7 >>> 16) & 0xff;
out[op++] = (in7 >>> 24) & 0xff;
}
}
export function fastUnpack256_16(inValues, inPos, out, outPos) {
let op = outPos;
let ip = inPos;
for (let i = 0; i < 128; i++) {
const in0 = inValues[ip++] >>> 0;
out[op++] = in0 & 0xffff;
out[op++] = (in0 >>> 16) & 0xffff;
}
}
export function fastUnpack256_Generic(inValues, inPos, out, outPos, bitWidth) {
const mask = MASKS[bitWidth] >>> 0;
let inputWordIndex = inPos;
let bitOffset = 0;
let currentWord = inValues[inputWordIndex] >>> 0;
let op = outPos;
for (let c = 0; c < 8; c++) {
for (let i = 0; i < 32; i++) {
if (bitOffset + bitWidth <= 32) {
const value = (currentWord >>> bitOffset) & mask;
out[op + i] = value | 0;
bitOffset += bitWidth;
if (bitOffset === 32) {
bitOffset = 0;
inputWordIndex++;
if (i !== 31) {
currentWord = inValues[inputWordIndex] >>> 0;
}
}
}
else {
const lowBits = 32 - bitOffset;
const low = currentWord >>> bitOffset;
inputWordIndex++;
currentWord = inValues[inputWordIndex] >>> 0;
const highBits = bitWidth - lowBits;
const highMask = (-1 >>> (32 - highBits)) >>> 0;
const high = currentWord & highMask;
const value = (low | (high << lowBits)) & mask;
out[op + i] = value | 0;
bitOffset = highBits;
}
}
op += 32;
bitOffset = 0;
if (c < 7) {
currentWord = inValues[inputWordIndex] >>> 0;
}
}
}
//# sourceMappingURL=fastPforUnpack.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,9 @@
/**
* Decode FSST compressed data
*
* @param symbols Array of symbols, where each symbol can be between 1 and 8 bytes
* @param symbolLengths Array of symbol lengths, length of each symbol in symbols array
* @param compressedData FSST Compressed data, where each entry is an index to the symbols array
* @returns Decoded data as Uint8Array
*/
export declare function decodeFsst(symbols: Uint8Array, symbolLengths: Uint32Array, compressedData: Uint8Array): Uint8Array;

View File

@@ -0,0 +1,31 @@
/**
* Decode FSST compressed data
*
* @param symbols Array of symbols, where each symbol can be between 1 and 8 bytes
* @param symbolLengths Array of symbol lengths, length of each symbol in symbols array
* @param compressedData FSST Compressed data, where each entry is an index to the symbols array
* @returns Decoded data as Uint8Array
*/
//TODO: improve -> quick and dirty implementation
export function decodeFsst(symbols, symbolLengths, compressedData) {
//TODO: use typed array directly
const decodedData = [];
const symbolOffsets = new Array(symbolLengths.length).fill(0);
for (let i = 1; i < symbolLengths.length; i++) {
symbolOffsets[i] = symbolOffsets[i - 1] + symbolLengths[i - 1];
}
for (let i = 0; i < compressedData.length; i++) {
if (compressedData[i] === 255) {
decodedData.push(compressedData[++i]);
}
else {
const symbolLength = symbolLengths[compressedData[i]];
const symbolOffset = symbolOffsets[compressedData[i]];
for (let j = 0; j < symbolLength; j++) {
decodedData.push(symbols[symbolOffset + j]);
}
}
}
return new Uint8Array(decodedData);
}
//# sourceMappingURL=fsstDecoder.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"fsstDecoder.js","sourceRoot":"","sources":["../../src/decoding/fsstDecoder.ts"],"names":[],"mappings":"AAAA;;;;;;;GAOG;AACH,iDAAiD;AACjD,MAAM,UAAU,UAAU,CAAC,OAAmB,EAAE,aAA0B,EAAE,cAA0B;IAClG,gCAAgC;IAChC,MAAM,WAAW,GAAa,EAAE,CAAC;IACjC,MAAM,aAAa,GAAa,IAAI,KAAK,CAAC,aAAa,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;IAExE,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,aAAa,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;QAC5C,aAAa,CAAC,CAAC,CAAC,GAAG,aAAa,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,aAAa,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC;IACnE,CAAC;IAED,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,cAAc,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;QAC7C,IAAI,cAAc,CAAC,CAAC,CAAC,KAAK,GAAG,EAAE,CAAC;YAC5B,WAAW,CAAC,IAAI,CAAC,cAAc,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;QAC1C,CAAC;aAAM,CAAC;YACJ,MAAM,YAAY,GAAG,aAAa,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC;YACtD,MAAM,YAAY,GAAG,aAAa,CAAC,cAAc,CAAC,CAAC,CAAC,CAAC,CAAC;YACtD,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,YAAY,EAAE,CAAC,EAAE,EAAE,CAAC;gBACpC,WAAW,CAAC,IAAI,CAAC,OAAO,CAAC,YAAY,GAAG,CAAC,CAAC,CAAC,CAAC;YAChD,CAAC;QACL,CAAC;IACL,CAAC;IACD,OAAO,IAAI,UAAU,CAAC,WAAW,CAAC,CAAC;AACvC,CAAC","sourcesContent":["/**\n * Decode FSST compressed data\n *\n * @param symbols Array of symbols, where each symbol can be between 1 and 8 bytes\n * @param symbolLengths Array of symbol lengths, length of each symbol in symbols array\n * @param compressedData FSST Compressed data, where each entry is an index to the symbols array\n * @returns Decoded data as Uint8Array\n */\n//TODO: improve -> quick and dirty implementation\nexport function decodeFsst(symbols: Uint8Array, symbolLengths: Uint32Array, compressedData: Uint8Array): Uint8Array {\n //TODO: use typed array directly\n const decodedData: number[] = [];\n const symbolOffsets: number[] = new Array(symbolLengths.length).fill(0);\n\n for (let i = 1; i < symbolLengths.length; i++) {\n symbolOffsets[i] = symbolOffsets[i - 1] + symbolLengths[i - 1];\n }\n\n for (let i = 0; i < compressedData.length; i++) {\n if (compressedData[i] === 255) {\n decodedData.push(compressedData[++i]);\n } else {\n const symbolLength = symbolLengths[compressedData[i]];\n const symbolOffset = symbolOffsets[compressedData[i]];\n for (let j = 0; j < symbolLength; j++) {\n decodedData.push(symbols[symbolOffset + j]);\n }\n }\n }\n return new Uint8Array(decodedData);\n}\n"]}

View File

@@ -0,0 +1,5 @@
import type IntWrapper from "./intWrapper";
import type { GeometryVector } from "../vector/geometry/geometryVector";
import type { GpuVector } from "../vector/geometry/gpuVector";
import type GeometryScaling from "./geometryScaling";
export declare function decodeGeometryColumn(tile: Uint8Array, numStreams: number, offset: IntWrapper, numFeatures: number, scalingData?: GeometryScaling): GeometryVector | GpuVector;

View File

@@ -0,0 +1,289 @@
import { decodeStreamMetadata } from "../metadata/tile/streamMetadataDecoder";
import { decodeSignedInt32Stream, decodeLengthStreamToOffsetBuffer, decodeUnsignedConstInt32Stream, decodeUnsignedInt32Stream, getVectorType, } from "./integerStreamDecoder";
import { VectorType } from "../vector/vectorType";
import { PhysicalStreamType } from "../metadata/tile/physicalStreamType";
import { LengthType } from "../metadata/tile/lengthType";
import { DictionaryType } from "../metadata/tile/dictionaryType";
import { createConstGeometryVector, createMortonEncodedConstGeometryVector, } from "../vector/geometry/constGeometryVector";
import { createFlatGeometryVector, createFlatGeometryVectorMortonEncoded } from "../vector/geometry/flatGeometryVector";
import { OffsetType } from "../metadata/tile/offsetType";
import { createConstGpuVector } from "../vector/geometry/constGpuVector";
import { createFlatGpuVector } from "../vector/geometry/flatGpuVector";
// TODO: get rid of numFeatures parameter
export function decodeGeometryColumn(tile, numStreams, offset, numFeatures, scalingData) {
const geometryTypeMetadata = decodeStreamMetadata(tile, offset);
const geometryTypesVectorType = getVectorType(geometryTypeMetadata, numFeatures, tile, offset);
let vertexOffsets;
let vertexBuffer;
let mortonSettings;
let indexBuffer;
if (geometryTypesVectorType === VectorType.CONST) {
/* All geometries in the column have the same geometry type */
const geometryType = decodeUnsignedConstInt32Stream(tile, offset, geometryTypeMetadata);
// Variables for const geometry path (directly decoded as offsets)
let geometryOffsets;
let partOffsets;
let ringOffsets;
//TODO: use geometryOffsets for that? -> but then tessellated polygons can't be used with normal polygons
// in one FeatureTable?
let triangleOffsets;
for (let i = 0; i < numStreams - 1; i++) {
const geometryStreamMetadata = decodeStreamMetadata(tile, offset);
switch (geometryStreamMetadata.physicalStreamType) {
case PhysicalStreamType.LENGTH:
switch (geometryStreamMetadata.logicalStreamType.lengthType) {
case LengthType.GEOMETRIES:
geometryOffsets = decodeLengthStreamToOffsetBuffer(tile, offset, geometryStreamMetadata);
break;
case LengthType.PARTS:
partOffsets = decodeLengthStreamToOffsetBuffer(tile, offset, geometryStreamMetadata);
break;
case LengthType.RINGS:
ringOffsets = decodeLengthStreamToOffsetBuffer(tile, offset, geometryStreamMetadata);
break;
case LengthType.TRIANGLES:
triangleOffsets = decodeLengthStreamToOffsetBuffer(tile, offset, geometryStreamMetadata);
}
break;
case PhysicalStreamType.OFFSET: {
switch (geometryStreamMetadata.logicalStreamType.offsetType) {
case OffsetType.VERTEX:
vertexOffsets = decodeUnsignedInt32Stream(tile, offset, geometryStreamMetadata);
break;
case OffsetType.INDEX:
indexBuffer = decodeUnsignedInt32Stream(tile, offset, geometryStreamMetadata);
break;
}
break;
}
case PhysicalStreamType.DATA: {
if (DictionaryType.VERTEX === geometryStreamMetadata.logicalStreamType.dictionaryType) {
vertexBuffer = decodeSignedInt32Stream(tile, offset, geometryStreamMetadata, scalingData);
}
else {
const mortonMetadata = geometryStreamMetadata;
mortonSettings = {
numBits: mortonMetadata.numBits,
coordinateShift: mortonMetadata.coordinateShift,
};
vertexBuffer = decodeUnsignedInt32Stream(tile, offset, geometryStreamMetadata, scalingData);
}
break;
}
}
}
if (indexBuffer) {
if (geometryOffsets !== undefined || partOffsets !== undefined) {
/* Case when the indices of a Polygon outline are encoded in the tile */
const topologyVector = { geometryOffsets, partOffsets, ringOffsets };
return createConstGpuVector(numFeatures, geometryType, triangleOffsets, indexBuffer, vertexBuffer, topologyVector);
}
/* Case when the no Polygon outlines are encoded in the tile */
return createConstGpuVector(numFeatures, geometryType, triangleOffsets, indexBuffer, vertexBuffer);
}
return mortonSettings === undefined
? /* Currently only 2D coordinates (Vec2) are implemented in the encoder */
createConstGeometryVector(numFeatures, geometryType, { geometryOffsets, partOffsets, ringOffsets }, vertexOffsets, vertexBuffer)
: createMortonEncodedConstGeometryVector(numFeatures, geometryType, { geometryOffsets, partOffsets, ringOffsets }, vertexOffsets, vertexBuffer, mortonSettings);
}
/* Different geometry types are mixed in the geometry column */
const geometryTypeVector = decodeUnsignedInt32Stream(tile, offset, geometryTypeMetadata);
// Variables for flat geometry path (decoded as lengths, then converted to offsets)
let geometryLengths;
let partLengths;
let ringLengths;
//TODO: use geometryOffsets for that? -> but then tessellated polygons can't be used with normal polygons
// in one FeatureTable?
let triangleOffsets;
for (let i = 0; i < numStreams - 1; i++) {
const geometryStreamMetadata = decodeStreamMetadata(tile, offset);
switch (geometryStreamMetadata.physicalStreamType) {
case PhysicalStreamType.LENGTH:
switch (geometryStreamMetadata.logicalStreamType.lengthType) {
case LengthType.GEOMETRIES:
geometryLengths = decodeUnsignedInt32Stream(tile, offset, geometryStreamMetadata);
break;
case LengthType.PARTS:
partLengths = decodeUnsignedInt32Stream(tile, offset, geometryStreamMetadata);
break;
case LengthType.RINGS:
ringLengths = decodeUnsignedInt32Stream(tile, offset, geometryStreamMetadata);
break;
case LengthType.TRIANGLES:
triangleOffsets = decodeLengthStreamToOffsetBuffer(tile, offset, geometryStreamMetadata);
}
break;
case PhysicalStreamType.OFFSET:
switch (geometryStreamMetadata.logicalStreamType.offsetType) {
case OffsetType.VERTEX:
vertexOffsets = decodeUnsignedInt32Stream(tile, offset, geometryStreamMetadata);
break;
case OffsetType.INDEX:
indexBuffer = decodeUnsignedInt32Stream(tile, offset, geometryStreamMetadata);
break;
}
break;
case PhysicalStreamType.DATA:
if (DictionaryType.VERTEX === geometryStreamMetadata.logicalStreamType.dictionaryType) {
vertexBuffer = decodeSignedInt32Stream(tile, offset, geometryStreamMetadata, scalingData);
}
else {
const mortonMetadata = geometryStreamMetadata;
mortonSettings = {
numBits: mortonMetadata.numBits,
coordinateShift: mortonMetadata.coordinateShift,
};
vertexBuffer = decodeUnsignedInt32Stream(tile, offset, geometryStreamMetadata, scalingData);
}
break;
}
}
// TODO: refactor the following instructions -> decode in one pass for performance reasons
/* Calculate the offsets from the length buffer for util access */
let geometryOffsets;
let partOffsets;
let ringOffsets;
if (geometryLengths) {
geometryOffsets = decodeRootLengthStream(geometryTypeVector, geometryLengths, 2);
if (partLengths && ringLengths) {
partOffsets = decodeLevel1LengthStream(geometryTypeVector, geometryOffsets, partLengths, false);
ringOffsets = decodeLevel2LengthStream(geometryTypeVector, geometryOffsets, partOffsets, ringLengths);
}
else if (partLengths) {
partOffsets = decodeLevel1WithoutRingBufferLengthStream(geometryTypeVector, geometryOffsets, partLengths);
}
}
else if (partLengths && ringLengths) {
partOffsets = decodeRootLengthStream(geometryTypeVector, partLengths, 1);
ringOffsets = decodeLevel1LengthStream(geometryTypeVector, partOffsets, ringLengths, true);
}
else if (partLengths) {
partOffsets = decodeRootLengthStream(geometryTypeVector, partLengths, 0);
}
if (indexBuffer && !partOffsets) {
/* Case when the indices of a Polygon outline are not encoded in the data so no
* topology data are present in the tile */
return createFlatGpuVector(geometryTypeVector, triangleOffsets, indexBuffer, vertexBuffer);
}
if (indexBuffer) {
/* Case when the indices of a Polygon outline are encoded in the tile */
return createFlatGpuVector(geometryTypeVector, triangleOffsets, indexBuffer, vertexBuffer, {
geometryOffsets,
partOffsets,
ringOffsets,
});
}
return mortonSettings === undefined /* Currently only 2D coordinates (Vec2) are implemented in the encoder */
? createFlatGeometryVector(geometryTypeVector, { geometryOffsets, partOffsets, ringOffsets }, vertexOffsets, vertexBuffer)
: createFlatGeometryVectorMortonEncoded(geometryTypeVector, { geometryOffsets, partOffsets, ringOffsets }, vertexOffsets, vertexBuffer, mortonSettings);
}
/*
* Handle the parsing of the different topology length buffers separate not generic to reduce the
* branching and improve the performance
*/
function decodeRootLengthStream(geometryTypes, rootLengthStream, bufferId) {
const rootBufferOffsets = new Uint32Array(geometryTypes.length + 1);
let previousOffset = 0;
rootBufferOffsets[0] = previousOffset;
let rootLengthCounter = 0;
for (let i = 0; i < geometryTypes.length; i++) {
/* Test if the geometry has and entry in the root buffer
* BufferId: 2 GeometryOffsets -> MultiPolygon, MultiLineString, MultiPoint
* BufferId: 1 PartOffsets -> Polygon
* BufferId: 0 PartOffsets, RingOffsets -> LineString
* */
previousOffset = rootBufferOffsets[i + 1] =
previousOffset + (geometryTypes[i] > bufferId ? rootLengthStream[rootLengthCounter++] : 1);
}
return rootBufferOffsets;
}
function decodeLevel1LengthStream(geometryTypes, rootOffsetBuffer, level1LengthBuffer, isLineStringPresent) {
const level1BufferOffsets = new Uint32Array(rootOffsetBuffer[rootOffsetBuffer.length - 1] + 1);
let previousOffset = 0;
level1BufferOffsets[0] = previousOffset;
let level1BufferCounter = 1;
let level1LengthBufferCounter = 0;
for (let i = 0; i < geometryTypes.length; i++) {
const geometryType = geometryTypes[i];
const numGeometries = rootOffsetBuffer[i + 1] - rootOffsetBuffer[i];
if (geometryType === 5 ||
geometryType === 2 ||
(isLineStringPresent && (geometryType === 4 || geometryType === 1))) {
/* For MultiPolygon, Polygon and in some cases for MultiLineString and LineString
* a value in the level1LengthBuffer exists */
for (let j = 0; j < numGeometries; j++) {
previousOffset = level1BufferOffsets[level1BufferCounter++] =
previousOffset + level1LengthBuffer[level1LengthBufferCounter++];
}
}
else {
/* For MultiPoint and Point and in some cases for MultiLineString and LineString no value in the
* level1LengthBuffer exists */
for (let j = 0; j < numGeometries; j++) {
level1BufferOffsets[level1BufferCounter++] = ++previousOffset;
}
}
}
return level1BufferOffsets;
}
/*
* Case where no ring buffer exists so no MultiPolygon or Polygon geometry is part of the buffer
*/
function decodeLevel1WithoutRingBufferLengthStream(geometryTypes, rootOffsetBuffer, level1LengthBuffer) {
const level1BufferOffsets = new Uint32Array(rootOffsetBuffer[rootOffsetBuffer.length - 1] + 1);
let previousOffset = 0;
level1BufferOffsets[0] = previousOffset;
let level1OffsetBufferCounter = 1;
let level1LengthCounter = 0;
for (let i = 0; i < geometryTypes.length; i++) {
const geometryType = geometryTypes[i];
const numGeometries = rootOffsetBuffer[i + 1] - rootOffsetBuffer[i];
if (geometryType === 4 || geometryType === 1) {
/* For MultiLineString and LineString a value in the level1LengthBuffer exists */
for (let j = 0; j < numGeometries; j++) {
previousOffset = level1BufferOffsets[level1OffsetBufferCounter++] =
previousOffset + level1LengthBuffer[level1LengthCounter++];
}
}
else {
/* For MultiPoint and Point no value in level1LengthBuffer exists */
for (let j = 0; j < numGeometries; j++) {
level1BufferOffsets[level1OffsetBufferCounter++] = ++previousOffset;
}
}
}
return level1BufferOffsets;
}
function decodeLevel2LengthStream(geometryTypes, rootOffsetBuffer, level1OffsetBuffer, level2LengthBuffer) {
const level2BufferOffsets = new Uint32Array(level1OffsetBuffer[level1OffsetBuffer.length - 1] + 1);
let previousOffset = 0;
level2BufferOffsets[0] = previousOffset;
let level1OffsetBufferCounter = 1;
let level2OffsetBufferCounter = 1;
let level2LengthBufferCounter = 0;
for (let i = 0; i < geometryTypes.length; i++) {
const geometryType = geometryTypes[i];
const numGeometries = rootOffsetBuffer[i + 1] - rootOffsetBuffer[i];
if (geometryType !== 0 && geometryType !== 3) {
/* For MultiPolygon, MultiLineString, Polygon and LineString a value in level2LengthBuffer
* exists */
for (let j = 0; j < numGeometries; j++) {
const numParts = level1OffsetBuffer[level1OffsetBufferCounter] - level1OffsetBuffer[level1OffsetBufferCounter - 1];
level1OffsetBufferCounter++;
for (let k = 0; k < numParts; k++) {
previousOffset = level2BufferOffsets[level2OffsetBufferCounter++] =
previousOffset + level2LengthBuffer[level2LengthBufferCounter++];
}
}
}
else {
/* For MultiPoint and Point no value in level2LengthBuffer exists */
for (let j = 0; j < numGeometries; j++) {
level2BufferOffsets[level2OffsetBufferCounter++] = ++previousOffset;
level1OffsetBufferCounter++;
}
}
}
return level2BufferOffsets;
}
//# sourceMappingURL=geometryDecoder.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,6 @@
export default interface GeometryScaling {
extent: number;
min: number;
max: number;
scale?: number;
}

View File

@@ -0,0 +1,2 @@
export {};
//# sourceMappingURL=geometryScaling.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"geometryScaling.js","sourceRoot":"","sources":["../../src/decoding/geometryScaling.ts"],"names":[],"mappings":"","sourcesContent":["export default interface GeometryScaling {\n extent: number;\n min: number;\n max: number;\n scale?: number;\n}\n"]}

View File

@@ -0,0 +1,8 @@
export default class IntWrapper {
private value;
constructor(value: number);
get(): number;
set(v: number): void;
increment(): number;
add(v: number): void;
}

19
node_modules/@maplibre/mlt/dist/decoding/intWrapper.js generated vendored Normal file
View File

@@ -0,0 +1,19 @@
// Ported from https://github.com/lemire/JavaFastPFOR/blob/master/src/main/java/me/lemire/integercompression/IntWrapper.java
export default class IntWrapper {
constructor(value) {
this.value = value;
}
get() {
return this.value;
}
set(v) {
this.value = v;
}
increment() {
return this.value++;
}
add(v) {
this.value += v;
}
}
//# sourceMappingURL=intWrapper.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"intWrapper.js","sourceRoot":"","sources":["../../src/decoding/intWrapper.ts"],"names":[],"mappings":"AAAA,4HAA4H;AAE5H,MAAM,CAAC,OAAO,OAAO,UAAU;IAC3B,YAAoB,KAAa;QAAb,UAAK,GAAL,KAAK,CAAQ;IAAG,CAAC;IAE9B,GAAG;QACN,OAAO,IAAI,CAAC,KAAK,CAAC;IACtB,CAAC;IAEM,GAAG,CAAC,CAAS;QAChB,IAAI,CAAC,KAAK,GAAG,CAAC,CAAC;IACnB,CAAC;IAEM,SAAS;QACZ,OAAO,IAAI,CAAC,KAAK,EAAE,CAAC;IACxB,CAAC;IAEM,GAAG,CAAC,CAAS;QAChB,IAAI,CAAC,KAAK,IAAI,CAAC,CAAC;IACpB,CAAC;CACJ","sourcesContent":["// Ported from https://github.com/lemire/JavaFastPFOR/blob/master/src/main/java/me/lemire/integercompression/IntWrapper.java\n\nexport default class IntWrapper {\n constructor(private value: number) {}\n\n public get(): number {\n return this.value;\n }\n\n public set(v: number): void {\n this.value = v;\n }\n\n public increment(): number {\n return this.value++;\n }\n\n public add(v: number): void {\n this.value += v;\n }\n}\n"]}

View File

@@ -0,0 +1,54 @@
import type IntWrapper from "./intWrapper";
import { type FastPforWireDecodeWorkspace } from "./fastPforDecoder";
export type { FastPforWireDecodeWorkspace } from "./fastPforDecoder";
export { createFastPforWireDecodeWorkspace } from "./fastPforDecoder";
export declare function decodeVarintInt32(buf: Uint8Array, bufferOffset: IntWrapper, numValues: number): Uint32Array;
export declare function decodeVarintInt64(src: Uint8Array, offset: IntWrapper, numValues: number): BigUint64Array;
export declare function decodeVarintFloat64(src: Uint8Array, offset: IntWrapper, numValues: number): Float64Array;
export declare function decodeFastPfor(encodedBytes: Uint8Array, expectedValueCount: number, encodedByteLength: number, offset: IntWrapper): Uint32Array;
export declare function decodeFastPforWithWorkspace(encodedBytes: Uint8Array, expectedValueCount: number, encodedByteLength: number, offset: IntWrapper, workspace: FastPforWireDecodeWorkspace): Uint32Array;
export declare function decodeZigZagInt32Value(encoded: number): number;
export declare function decodeZigZagInt64Value(encoded: bigint): bigint;
export declare function decodeZigZagFloat64Value(encoded: number): number;
export declare function decodeZigZagInt32(encodedData: Uint32Array): Int32Array;
export declare function decodeZigZagInt64(encodedData: BigUint64Array): BigInt64Array;
export declare function decodeZigZagFloat64(encodedData: Float64Array): void;
export declare function decodeUnsignedRleInt32(encodedData: Uint32Array, numRuns: number, numTotalValues?: number): Uint32Array;
export declare function decodeUnsignedRleInt64(encodedData: BigUint64Array, numRuns: number, numTotalValues?: number): BigUint64Array;
export declare function decodeUnsignedRleFloat64(encodedData: Float64Array, numRuns: number, numTotalValues: number): Float64Array;
export declare function decodeZigZagDeltaInt32(data: Uint32Array): Int32Array;
export declare function decodeZigZagDeltaInt64(data: BigInt64Array | BigUint64Array): BigInt64Array;
export declare function decodeZigZagDeltaFloat64(data: Float64Array): void;
export declare function decodeZigZagRleInt32(data: Uint32Array, numRuns: number, numTotalValues?: number): Int32Array;
export declare function decodeZigZagRleInt64(data: BigUint64Array, numRuns: number, numTotalValues?: number): BigInt64Array;
export declare function decodeZigZagRleFloat64(data: Float64Array, numRuns: number, numTotalValues: number): Float64Array;
export declare function fastInverseDelta(data: Uint32Array | Int32Array): void;
export declare function inverseDelta(data: Uint32Array): void;
export declare function decodeComponentwiseDeltaVec2(data: Uint32Array): Int32Array;
export declare function decodeComponentwiseDeltaVec2Scaled(data: Uint32Array, scale: number, min: number, max: number): Int32Array;
export declare function decodeZigZagDeltaOfDeltaInt32(data: Uint32Array): Uint32Array;
export declare function decodeZigZagRleDeltaInt32(data: Uint32Array, numRuns: number, numTotalValues: number): Int32Array;
export declare function decodeRleDeltaInt32(data: Uint32Array, numRuns: number, numTotalValues: number): Uint32Array;
/**
* Decode Delta-RLE with multiple runs by fully reconstructing values.
*
* @param data RLE encoded data: [run1, run2, ..., value1, value2, ...]
* @param numRuns Number of runs in the RLE encoding
* @param numValues Total number of values to reconstruct
* @returns Reconstructed values with deltas applied
*/
export declare function decodeDeltaRleInt32(data: Uint32Array, numRuns: number, numValues: number): Int32Array;
/**
* Decode Delta-RLE with multiple runs for 64-bit integers.
*/
export declare function decodeDeltaRleInt64(data: BigUint64Array, numRuns: number, numValues: number): BigInt64Array;
export declare function decodeUnsignedZigZagDeltaInt32(data: Uint32Array): Uint32Array;
export declare function decodeUnsignedZigZagDeltaInt64(data: BigUint64Array): BigUint64Array;
export declare function decodeUnsignedComponentwiseDeltaVec2(data: Uint32Array): Uint32Array;
export declare function decodeUnsignedComponentwiseDeltaVec2Scaled(data: Uint32Array, scale: number, min: number, max: number): Uint32Array;
export declare function decodeUnsignedConstRleInt32(data: Int32Array | Uint32Array): number;
export declare function decodeZigZagConstRleInt32(data: Int32Array | Uint32Array): number;
export declare function decodeZigZagSequenceRleInt32(data: Int32Array | Uint32Array): [baseValue: number, delta: number];
export declare function decodeUnsignedConstRleInt64(data: BigInt64Array | BigUint64Array): bigint;
export declare function decodeZigZagConstRleInt64(data: BigInt64Array | BigUint64Array): bigint;
export declare function decodeZigZagSequenceRleInt64(data: BigInt64Array | BigUint64Array): [baseValue: bigint, delta: bigint];

View File

@@ -0,0 +1,598 @@
import { createFastPforWireDecodeWorkspace, decodeFastPforInt32, ensureFastPforWireEncodedWordsCapacity, } from "./fastPforDecoder";
import { decodeBigEndianInt32sInto } from "./bigEndianDecode";
export { createFastPforWireDecodeWorkspace } from "./fastPforDecoder";
//based on https://github.com/mapbox/pbf/blob/main/index.js
export function decodeVarintInt32(buf, bufferOffset, numValues) {
const dst = new Uint32Array(numValues);
let dstOffset = 0;
let offset = bufferOffset.get();
for (let i = 0; i < dst.length; i++) {
let b = buf[offset++];
let val = b & 0x7f;
if (b < 0x80) {
dst[dstOffset++] = val;
continue;
}
b = buf[offset++];
val |= (b & 0x7f) << 7;
if (b < 0x80) {
dst[dstOffset++] = val;
continue;
}
b = buf[offset++];
val |= (b & 0x7f) << 14;
if (b < 0x80) {
dst[dstOffset++] = val;
continue;
}
b = buf[offset++];
val |= (b & 0x7f) << 21;
if (b < 0x80) {
dst[dstOffset++] = val;
continue;
}
b = buf[offset++];
val |= (b & 0x0f) << 28;
dst[dstOffset++] = val;
}
bufferOffset.set(offset);
return dst;
}
export function decodeVarintInt64(src, offset, numValues) {
const dst = new BigUint64Array(numValues);
for (let i = 0; i < dst.length; i++) {
dst[i] = decodeVarintInt64Value(src, offset);
}
return dst;
}
// Source: https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/util/VarInt.java
function decodeVarintInt64Value(bytes, pos) {
let value = 0n;
let shift = 0;
let index = pos.get();
while (index < bytes.length) {
const b = bytes[index++];
value |= BigInt(b & 0x7f) << BigInt(shift);
if ((b & 0x80) === 0) {
break;
}
shift += 7;
if (shift >= 64) {
throw new Error("Varint too long");
}
}
pos.set(index);
return value;
}
/*
* Since decoding Int64 values to BigInt is more than an order of magnitude slower in the tests then using a Float64,
* this decoding method limits the max size of a Long value to 53 bits
*/
export function decodeVarintFloat64(src, offset, numValues) {
const dst = new Float64Array(numValues);
for (let i = 0; i < numValues; i++) {
dst[i] = decodeVarintFloat64Value(src, offset);
}
return dst;
}
//based on https://github.com/mapbox/pbf/blob/main/index.js
function decodeVarintFloat64Value(buf, offset) {
let val;
let b;
b = buf[offset.get()];
offset.increment();
val = b & 0x7f;
if (b < 0x80)
return val;
b = buf[offset.get()];
offset.increment();
val |= (b & 0x7f) << 7;
if (b < 0x80)
return val;
b = buf[offset.get()];
offset.increment();
val |= (b & 0x7f) << 14;
if (b < 0x80)
return val;
b = buf[offset.get()];
offset.increment();
val |= (b & 0x7f) << 21;
if (b < 0x80)
return val;
b = buf[offset.get()];
val |= (b & 0x0f) << 28;
return decodeVarintRemainder(val, buf, offset);
}
function decodeVarintRemainder(l, buf, offset) {
let h;
let b;
b = buf[offset.get()];
offset.increment();
h = (b & 0x70) >> 4;
if (b < 0x80)
return h * 0x100000000 + (l >>> 0);
b = buf[offset.get()];
offset.increment();
h |= (b & 0x7f) << 3;
if (b < 0x80)
return h * 0x100000000 + (l >>> 0);
b = buf[offset.get()];
offset.increment();
h |= (b & 0x7f) << 10;
if (b < 0x80)
return h * 0x100000000 + (l >>> 0);
b = buf[offset.get()];
offset.increment();
h |= (b & 0x7f) << 17;
if (b < 0x80)
return h * 0x100000000 + (l >>> 0);
b = buf[offset.get()];
offset.increment();
h |= (b & 0x7f) << 24;
if (b < 0x80)
return h * 0x100000000 + (l >>> 0);
b = buf[offset.get()];
offset.increment();
h |= (b & 0x01) << 31;
if (b < 0x80)
return h * 0x100000000 + (l >>> 0);
throw new Error("Expected varint not more than 10 bytes");
}
export function decodeFastPfor(encodedBytes, expectedValueCount, encodedByteLength, offset) {
const workspace = createFastPforWireDecodeWorkspace(encodedByteLength >>> 2);
return decodeFastPforWithWorkspace(encodedBytes, expectedValueCount, encodedByteLength, offset, workspace);
}
export function decodeFastPforWithWorkspace(encodedBytes, expectedValueCount, encodedByteLength, offset, workspace) {
const inputByteOffset = offset.get();
if ((encodedByteLength & 3) !== 0) {
throw new Error(`FastPFOR: invalid encodedByteLength=${encodedByteLength} at offset=${inputByteOffset} (encodedBytes.length=${encodedBytes.length}; expected a multiple of 4 bytes for an int32 big-endian word stream)`);
}
const encodedWordCount = encodedByteLength >>> 2;
const encodedWordBuffer = ensureFastPforWireEncodedWordsCapacity(workspace, encodedWordCount);
decodeBigEndianInt32sInto(encodedBytes, inputByteOffset, encodedByteLength, encodedWordBuffer);
const decodedValues = decodeFastPforInt32(encodedWordBuffer.subarray(0, encodedWordCount), expectedValueCount, workspace.decoderWorkspace);
offset.add(encodedByteLength);
return decodedValues;
}
export function decodeZigZagInt32Value(encoded) {
return (encoded >>> 1) ^ -(encoded & 1);
}
export function decodeZigZagInt64Value(encoded) {
return (encoded >> 1n) ^ -(encoded & 1n);
}
export function decodeZigZagFloat64Value(encoded) {
return encoded % 2 === 1 ? (encoded + 1) / -2 : encoded / 2;
}
export function decodeZigZagInt32(encodedData) {
const decodedValues = new Int32Array(encodedData.length);
for (let i = 0; i < encodedData.length; i++) {
decodedValues[i] = decodeZigZagInt32Value(encodedData[i]);
}
return decodedValues;
}
export function decodeZigZagInt64(encodedData) {
const decodedValues = new BigInt64Array(encodedData.length);
for (let i = 0; i < encodedData.length; i++) {
decodedValues[i] = decodeZigZagInt64Value(encodedData[i]);
}
return decodedValues;
}
export function decodeZigZagFloat64(encodedData) {
for (let i = 0; i < encodedData.length; i++) {
encodedData[i] = decodeZigZagFloat64Value(encodedData[i]);
}
}
export function decodeUnsignedRleInt32(encodedData, numRuns, numTotalValues) {
// If numTotalValues not provided, calculate from runs (nullable case)
if (numTotalValues === undefined) {
numTotalValues = 0;
for (let i = 0; i < numRuns; i++) {
numTotalValues += encodedData[i];
}
}
const decodedValues = new Uint32Array(numTotalValues);
let offset = 0;
for (let i = 0; i < numRuns; i++) {
const runLength = encodedData[i];
const value = encodedData[i + numRuns];
decodedValues.fill(value, offset, offset + runLength);
offset += runLength;
}
return decodedValues;
}
export function decodeUnsignedRleInt64(encodedData, numRuns, numTotalValues) {
// If numTotalValues not provided, calculate from runs (nullable case)
if (numTotalValues === undefined) {
numTotalValues = 0;
for (let i = 0; i < numRuns; i++) {
numTotalValues += Number(encodedData[i]);
}
}
const decodedValues = new BigUint64Array(numTotalValues);
let offset = 0;
for (let i = 0; i < numRuns; i++) {
const runLength = Number(encodedData[i]);
const value = encodedData[i + numRuns];
decodedValues.fill(value, offset, offset + runLength);
offset += runLength;
}
return decodedValues;
}
export function decodeUnsignedRleFloat64(encodedData, numRuns, numTotalValues) {
const decodedValues = new Float64Array(numTotalValues);
let offset = 0;
for (let i = 0; i < numRuns; i++) {
const runLength = encodedData[i];
const value = encodedData[i + numRuns];
decodedValues.fill(value, offset, offset + runLength);
offset += runLength;
}
return decodedValues;
}
/*
* In place decoding of the zigzag encoded delta values.
* Inspired by https://github.com/lemire/JavaFastPFOR/blob/master/src/main/java/me/lemire/integercompression/differential/Delta.java
*/
export function decodeZigZagDeltaInt32(data) {
const decodedValues = new Int32Array(data.length);
decodedValues[0] = decodeZigZagInt32Value(data[0]);
const sz0 = (data.length / 4) * 4;
let i = 1;
if (sz0 >= 4) {
for (; i < sz0 - 4; i += 4) {
const data1 = data[i];
const data2 = data[i + 1];
const data3 = data[i + 2];
const data4 = data[i + 3];
decodedValues[i] = decodeZigZagInt32Value(data1) + decodedValues[i - 1];
decodedValues[i + 1] = decodeZigZagInt32Value(data2) + decodedValues[i];
decodedValues[i + 2] = decodeZigZagInt32Value(data3) + decodedValues[i + 1];
decodedValues[i + 3] = decodeZigZagInt32Value(data4) + decodedValues[i + 2];
}
}
for (; i !== data.length; ++i) {
decodedValues[i] = decodeZigZagInt32Value(data[i]) + decodedValues[i - 1];
}
return decodedValues;
}
export function decodeZigZagDeltaInt64(data) {
const decodedValues = new BigInt64Array(data.length);
decodedValues[0] = decodeZigZagInt64Value(data[0]);
const sz0 = (data.length / 4) * 4;
let i = 1;
if (sz0 >= 4) {
for (; i < sz0 - 4; i += 4) {
const data1 = data[i];
const data2 = data[i + 1];
const data3 = data[i + 2];
const data4 = data[i + 3];
decodedValues[i] = decodeZigZagInt64Value(data1) + decodedValues[i - 1];
decodedValues[i + 1] = decodeZigZagInt64Value(data2) + decodedValues[i];
decodedValues[i + 2] = decodeZigZagInt64Value(data3) + decodedValues[i + 1];
decodedValues[i + 3] = decodeZigZagInt64Value(data4) + decodedValues[i + 2];
}
}
for (; i !== decodedValues.length; ++i) {
decodedValues[i] = decodeZigZagInt64Value(data[i]) + decodedValues[i - 1];
}
return decodedValues;
}
export function decodeZigZagDeltaFloat64(data) {
data[0] = decodeZigZagFloat64Value(data[0]);
const sz0 = (data.length / 4) * 4;
let i = 1;
if (sz0 >= 4) {
for (; i < sz0 - 4; i += 4) {
const data1 = data[i];
const data2 = data[i + 1];
const data3 = data[i + 2];
const data4 = data[i + 3];
data[i] = decodeZigZagFloat64Value(data1) + data[i - 1];
data[i + 1] = decodeZigZagFloat64Value(data2) + data[i];
data[i + 2] = decodeZigZagFloat64Value(data3) + data[i + 1];
data[i + 3] = decodeZigZagFloat64Value(data4) + data[i + 2];
}
}
for (; i !== data.length; ++i) {
data[i] = decodeZigZagFloat64Value(data[i]) + data[i - 1];
}
}
export function decodeZigZagRleInt32(data, numRuns, numTotalValues) {
// If numTotalValues not provided, calculate from runs (nullable case)
if (numTotalValues === undefined) {
numTotalValues = 0;
for (let i = 0; i < numRuns; i++) {
numTotalValues += data[i];
}
}
const decodedValues = new Int32Array(numTotalValues);
let offset = 0;
for (let i = 0; i < numRuns; i++) {
const runLength = data[i];
let value = data[i + numRuns];
value = decodeZigZagInt32Value(value);
decodedValues.fill(value, offset, offset + runLength);
offset += runLength;
}
return decodedValues;
}
export function decodeZigZagRleInt64(data, numRuns, numTotalValues) {
// If numTotalValues not provided, calculate from runs (nullable case)
if (numTotalValues === undefined) {
numTotalValues = 0;
for (let i = 0; i < numRuns; i++) {
numTotalValues += Number(data[i]);
}
}
const decodedValues = new BigInt64Array(numTotalValues);
let offset = 0;
for (let i = 0; i < numRuns; i++) {
const runLength = Number(data[i]);
let value = data[i + numRuns];
value = decodeZigZagInt64Value(value);
decodedValues.fill(value, offset, offset + runLength);
offset += runLength;
}
return decodedValues;
}
export function decodeZigZagRleFloat64(data, numRuns, numTotalValues) {
const decodedValues = new Float64Array(numTotalValues);
let offset = 0;
for (let i = 0; i < numRuns; i++) {
const runLength = data[i];
let value = data[i + numRuns];
value = decodeZigZagFloat64Value(value);
decodedValues.fill(value, offset, offset + runLength);
offset += runLength;
}
return decodedValues;
}
/*
* Inspired by https://github.com/lemire/JavaFastPFOR/blob/master/src/main/java/me/lemire/integercompression/differential/Delta.java
*/
export function fastInverseDelta(data) {
const sz0 = (data.length / 4) * 4;
let i = 1;
if (sz0 >= 4) {
for (let a = data[0]; i < sz0 - 4; i += 4) {
a = data[i] += a;
a = data[i + 1] += a;
a = data[i + 2] += a;
a = data[i + 3] += a;
}
}
while (i !== data.length) {
data[i] += data[i - 1];
++i;
}
}
export function inverseDelta(data) {
let prevValue = 0;
for (let i = 0; i < data.length; i++) {
data[i] += prevValue;
prevValue = data[i];
}
}
/*
* In place decoding of the zigzag delta encoded Vec2.
* Inspired by https://github.com/lemire/JavaFastPFOR/blob/master/src/main/java/me/lemire/integercompression/differential/Delta.java
*/
export function decodeComponentwiseDeltaVec2(data) {
if (data.length < 2)
return new Int32Array(data);
const decodedData = new Int32Array(data.length);
decodedData[0] = decodeZigZagInt32Value(data[0]);
decodedData[1] = decodeZigZagInt32Value(data[1]);
const sz0 = (data.length / 4) * 4;
let i = 2;
if (sz0 >= 4) {
for (; i < sz0 - 4; i += 4) {
const x1 = data[i];
const y1 = data[i + 1];
const x2 = data[i + 2];
const y2 = data[i + 3];
decodedData[i] = decodeZigZagInt32Value(x1) + decodedData[i - 2];
decodedData[i + 1] = decodeZigZagInt32Value(y1) + decodedData[i - 1];
decodedData[i + 2] = decodeZigZagInt32Value(x2) + decodedData[i];
decodedData[i + 3] = decodeZigZagInt32Value(y2) + decodedData[i + 1];
}
}
for (; i !== data.length; i += 2) {
decodedData[i] = decodeZigZagInt32Value(data[i]) + decodedData[i - 2];
decodedData[i + 1] = decodeZigZagInt32Value(data[i + 1]) + decodedData[i - 1];
}
return decodedData;
}
export function decodeComponentwiseDeltaVec2Scaled(data, scale, min, max) {
if (data.length < 2)
return new Int32Array(data);
const decodedData = new Int32Array(data.length);
let previousVertexX = decodeZigZagInt32Value(data[0]);
let previousVertexY = decodeZigZagInt32Value(data[1]);
decodedData[0] = clamp(Math.round(previousVertexX * scale), min, max);
decodedData[1] = clamp(Math.round(previousVertexY * scale), min, max);
const sz0 = data.length / 16;
let i = 2;
if (sz0 >= 4) {
for (; i < sz0 - 4; i += 4) {
const x1 = data[i];
const y1 = data[i + 1];
const currentVertexX = decodeZigZagInt32Value(x1) + previousVertexX;
const currentVertexY = decodeZigZagInt32Value(y1) + previousVertexY;
decodedData[i] = clamp(Math.round(currentVertexX * scale), min, max);
decodedData[i + 1] = clamp(Math.round(currentVertexY * scale), min, max);
const x2 = data[i + 2];
const y2 = data[i + 3];
previousVertexX = decodeZigZagInt32Value(x2) + currentVertexX;
previousVertexY = decodeZigZagInt32Value(y2) + currentVertexY;
decodedData[i + 2] = clamp(Math.round(previousVertexX * scale), min, max);
decodedData[i + 3] = clamp(Math.round(previousVertexY * scale), min, max);
}
}
for (; i !== data.length; i += 2) {
previousVertexX += decodeZigZagInt32Value(data[i]);
previousVertexY += decodeZigZagInt32Value(data[i + 1]);
decodedData[i] = clamp(Math.round(previousVertexX * scale), min, max);
decodedData[i + 1] = clamp(Math.round(previousVertexY * scale), min, max);
}
return decodedData;
}
function clamp(n, min, max) {
return Math.min(max, Math.max(min, n));
}
/* Transform data to allow util access ------------------------------------------------------------------------ */
export function decodeZigZagDeltaOfDeltaInt32(data) {
const decodedData = new Int32Array(data.length + 1);
decodedData[0] = 0;
decodedData[1] = decodeZigZagInt32Value(data[0]);
let deltaSum = decodedData[1];
for (let i = 2; i !== decodedData.length; ++i) {
const zigZagValue = data[i - 1];
const delta = decodeZigZagInt32Value(zigZagValue);
deltaSum += delta;
decodedData[i] = decodedData[i - 1] + deltaSum;
}
return new Uint32Array(decodedData);
}
export function decodeZigZagRleDeltaInt32(data, numRuns, numTotalValues) {
const decodedValues = new Int32Array(numTotalValues + 1);
decodedValues[0] = 0;
let offset = 1;
let previousValue = decodedValues[0];
for (let i = 0; i < numRuns; i++) {
const runLength = data[i];
let value = data[i + numRuns];
value = decodeZigZagInt32Value(value);
for (let j = offset; j < offset + runLength; j++) {
decodedValues[j] = value + previousValue;
previousValue = decodedValues[j];
}
offset += runLength;
}
return decodedValues;
}
export function decodeRleDeltaInt32(data, numRuns, numTotalValues) {
const decodedValues = new Uint32Array(numTotalValues + 1);
decodedValues[0] = 0;
let offset = 1;
let previousValue = decodedValues[0];
for (let i = 0; i < numRuns; i++) {
const runLength = data[i];
const value = data[i + numRuns];
for (let j = offset; j < offset + runLength; j++) {
decodedValues[j] = value + previousValue;
previousValue = decodedValues[j];
}
offset += runLength;
}
return decodedValues;
}
/**
* Decode Delta-RLE with multiple runs by fully reconstructing values.
*
* @param data RLE encoded data: [run1, run2, ..., value1, value2, ...]
* @param numRuns Number of runs in the RLE encoding
* @param numValues Total number of values to reconstruct
* @returns Reconstructed values with deltas applied
*/
export function decodeDeltaRleInt32(data, numRuns, numValues) {
const result = new Int32Array(numValues);
let outPos = 0;
let previousValue = 0;
for (let i = 0; i < numRuns; i++) {
const runLength = data[i];
const zigZagDelta = data[i + numRuns];
const delta = decodeZigZagInt32Value(zigZagDelta);
for (let j = 0; j < runLength; j++) {
previousValue += delta;
result[outPos++] = previousValue;
}
}
return result;
}
/**
* Decode Delta-RLE with multiple runs for 64-bit integers.
*/
export function decodeDeltaRleInt64(data, numRuns, numValues) {
const result = new BigInt64Array(numValues);
let outPos = 0;
let previousValue = 0n;
for (let i = 0; i < numRuns; i++) {
const runLength = Number(data[i]);
const zigZagDelta = data[i + numRuns];
const delta = decodeZigZagInt64Value(zigZagDelta);
for (let j = 0; j < runLength; j++) {
previousValue += delta;
result[outPos++] = previousValue;
}
}
return result;
}
export function decodeUnsignedZigZagDeltaInt32(data) {
const decodedValues = new Uint32Array(data.length);
decodedValues[0] = decodeZigZagInt32Value(data[0]) >>> 0;
for (let i = 1; i < data.length; i++) {
decodedValues[i] = (decodedValues[i - 1] + decodeZigZagInt32Value(data[i])) >>> 0;
}
return decodedValues;
}
export function decodeUnsignedZigZagDeltaInt64(data) {
const decodedValues = new BigUint64Array(data.length);
decodedValues[0] = BigInt.asUintN(64, decodeZigZagInt64Value(data[0]));
for (let i = 1; i < data.length; i++) {
decodedValues[i] = BigInt.asUintN(64, decodedValues[i - 1] + decodeZigZagInt64Value(data[i]));
}
return decodedValues;
}
export function decodeUnsignedComponentwiseDeltaVec2(data) {
if (data.length < 2) {
return new Uint32Array(data);
}
const decodedData = new Uint32Array(data.length);
decodedData[0] = decodeZigZagInt32Value(data[0]) >>> 0;
decodedData[1] = decodeZigZagInt32Value(data[1]) >>> 0;
for (let i = 2; i < data.length; i += 2) {
decodedData[i] = (decodedData[i - 2] + decodeZigZagInt32Value(data[i])) >>> 0;
decodedData[i + 1] = (decodedData[i - 1] + decodeZigZagInt32Value(data[i + 1])) >>> 0;
}
return decodedData;
}
export function decodeUnsignedComponentwiseDeltaVec2Scaled(data, scale, min, max) {
const scaledValues = decodeComponentwiseDeltaVec2Scaled(data, scale, min, max);
return new Uint32Array(scaledValues);
}
export function decodeUnsignedConstRleInt32(data) {
return data[1];
}
export function decodeZigZagConstRleInt32(data) {
return decodeZigZagInt32Value(data[1]);
}
export function decodeZigZagSequenceRleInt32(data) {
/* base value and delta value are equal */
if (data.length === 2) {
const value = decodeZigZagInt32Value(data[1]);
return [value, value];
}
/* base value and delta value are not equal -> 2 runs and 2 values*/
const base = decodeZigZagInt32Value(data[2]);
const delta = decodeZigZagInt32Value(data[3]);
return [base, delta];
}
export function decodeUnsignedConstRleInt64(data) {
return data[1];
}
export function decodeZigZagConstRleInt64(data) {
return decodeZigZagInt64Value(data[1]);
}
export function decodeZigZagSequenceRleInt64(data) {
/* base value and delta value are equal */
if (data.length === 2) {
const value = decodeZigZagInt64Value(data[1]);
return [value, value];
}
/* base value and delta value are not equal -> 2 runs and 2 values*/
const base = decodeZigZagInt64Value(data[2]);
const delta = decodeZigZagInt64Value(data[3]);
return [base, delta];
}
//# sourceMappingURL=integerDecodingUtils.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,19 @@
import IntWrapper from "./intWrapper";
import type { StreamMetadata } from "../metadata/tile/streamMetadataDecoder";
import BitVector from "../vector/flat/bitVector";
import { VectorType } from "../vector/vectorType";
import type GeometryScaling from "./geometryScaling";
export declare function decodeSignedInt32Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata, scalingData?: GeometryScaling, nullabilityBuffer?: BitVector): Int32Array;
export declare function decodeUnsignedInt32Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata, scalingData?: GeometryScaling, nullabilityBuffer?: BitVector): Uint32Array;
export declare function decodeLengthStreamToOffsetBuffer(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata): Uint32Array;
export declare function decodeSignedConstInt32Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata): number;
export declare function decodeUnsignedConstInt32Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata): number;
export declare function decodeSequenceInt32Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata): [baseValue: number, delta: number];
export declare function decodeSequenceInt64Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata): [baseValue: bigint, delta: bigint];
export declare function decodeSignedInt64Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata, nullabilityBuffer?: BitVector): BigInt64Array;
export declare function decodeUnsignedInt64Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata, nullabilityBuffer?: BitVector): BigUint64Array;
export declare function decodeSignedInt64AsFloat64Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata): Float64Array;
export declare function decodeUnsignedInt64AsFloat64Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata): Float64Array;
export declare function decodeSignedConstInt64Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata): bigint;
export declare function decodeUnsignedConstInt64Stream(data: Uint8Array, offset: IntWrapper, streamMetadata: StreamMetadata): bigint;
export declare function getVectorType(streamMetadata: StreamMetadata, sizeOrNullabilityBuffer: number | BitVector, data: Uint8Array, offset: IntWrapper, varintWidth?: "int32" | "int64"): VectorType;

View File

@@ -0,0 +1,339 @@
import { PhysicalLevelTechnique } from "../metadata/tile/physicalLevelTechnique";
import IntWrapper from "./intWrapper";
import { decodeComponentwiseDeltaVec2, decodeComponentwiseDeltaVec2Scaled, decodeDeltaRleInt32, decodeDeltaRleInt64, decodeFastPfor, decodeUnsignedComponentwiseDeltaVec2, decodeUnsignedComponentwiseDeltaVec2Scaled, decodeUnsignedConstRleInt32, decodeUnsignedConstRleInt64, decodeUnsignedRleInt32, decodeUnsignedRleInt64, decodeUnsignedRleFloat64, decodeUnsignedZigZagDeltaInt32, decodeUnsignedZigZagDeltaInt64, decodeVarintInt32, decodeVarintInt64, decodeVarintFloat64, decodeZigZagInt32, decodeZigZagInt64, decodeZigZagFloat64, decodeZigZagConstRleInt32, decodeZigZagConstRleInt64, decodeZigZagDeltaInt32, decodeZigZagDeltaInt64, decodeZigZagDeltaFloat64, decodeZigZagSequenceRleInt32, decodeZigZagSequenceRleInt64, decodeZigZagInt32Value, decodeZigZagInt64Value, fastInverseDelta, inverseDelta, decodeRleDeltaInt32, decodeZigZagDeltaOfDeltaInt32, decodeZigZagRleDeltaInt32, decodeZigZagRleInt32, decodeZigZagRleInt64, decodeZigZagRleFloat64, } from "./integerDecodingUtils";
import { LogicalLevelTechnique } from "../metadata/tile/logicalLevelTechnique";
import BitVector from "../vector/flat/bitVector";
import { VectorType } from "../vector/vectorType";
import { unpackNullable } from "./unpackNullableUtils";
export function decodeSignedInt32Stream(data, offset, streamMetadata, scalingData, nullabilityBuffer) {
const values = decodePhysicalLevelTechnique(data, offset, streamMetadata);
return decodeSignedInt32(values, streamMetadata, scalingData, nullabilityBuffer);
}
export function decodeUnsignedInt32Stream(data, offset, streamMetadata, scalingData, nullabilityBuffer) {
const values = decodePhysicalLevelTechnique(data, offset, streamMetadata);
return decodeUnsignedInt32(values, streamMetadata, scalingData, nullabilityBuffer);
}
export function decodeLengthStreamToOffsetBuffer(data, offset, streamMetadata) {
const values = decodePhysicalLevelTechnique(data, offset, streamMetadata);
return decodeLengthToOffsetBuffer(values, streamMetadata);
}
function decodePhysicalLevelTechnique(data, offset, streamMetadata) {
const physicalLevelTechnique = streamMetadata.physicalLevelTechnique;
switch (physicalLevelTechnique) {
case PhysicalLevelTechnique.FAST_PFOR:
return decodeFastPfor(data, streamMetadata.numValues, streamMetadata.byteLength, offset);
case PhysicalLevelTechnique.VARINT:
return decodeVarintInt32(data, offset, streamMetadata.numValues);
case PhysicalLevelTechnique.NONE: {
const dataOffset = offset.get();
const byteLength = streamMetadata.byteLength;
offset.add(byteLength);
const slice = data.subarray(dataOffset, offset.get());
return new Uint32Array(slice);
}
default:
throw new Error(`Specified physicalLevelTechnique ${physicalLevelTechnique} is not supported (yet).`);
}
}
export function decodeSignedConstInt32Stream(data, offset, streamMetadata) {
const values = decodePhysicalLevelTechnique(data, offset, streamMetadata);
if (values.length === 1) {
return decodeZigZagInt32Value(values[0]);
}
return decodeZigZagConstRleInt32(values);
}
export function decodeUnsignedConstInt32Stream(data, offset, streamMetadata) {
const values = decodePhysicalLevelTechnique(data, offset, streamMetadata);
if (values.length === 1) {
return values[0];
}
return decodeUnsignedConstRleInt32(values);
}
export function decodeSequenceInt32Stream(data, offset, streamMetadata) {
const values = decodePhysicalLevelTechnique(data, offset, streamMetadata);
return decodeZigZagSequenceRleInt32(values);
}
export function decodeSequenceInt64Stream(data, offset, streamMetadata) {
const values = decodeVarintInt64(data, offset, streamMetadata.numValues);
return decodeZigZagSequenceRleInt64(values);
}
export function decodeSignedInt64Stream(data, offset, streamMetadata, nullabilityBuffer) {
const values = decodeVarintInt64(data, offset, streamMetadata.numValues);
return decodeSignedInt64(values, streamMetadata, nullabilityBuffer);
}
export function decodeUnsignedInt64Stream(data, offset, streamMetadata, nullabilityBuffer) {
const values = decodeVarintInt64(data, offset, streamMetadata.numValues);
return decodeUnsignedInt64(values, streamMetadata, nullabilityBuffer);
}
export function decodeSignedInt64AsFloat64Stream(data, offset, streamMetadata) {
const values = decodeVarintFloat64(data, offset, streamMetadata.numValues);
return decodeFloat64Values(values, streamMetadata, true);
}
export function decodeUnsignedInt64AsFloat64Stream(data, offset, streamMetadata) {
const values = decodeVarintFloat64(data, offset, streamMetadata.numValues);
return decodeFloat64Values(values, streamMetadata, false);
}
export function decodeSignedConstInt64Stream(data, offset, streamMetadata) {
const values = decodeVarintInt64(data, offset, streamMetadata.numValues);
if (values.length === 1) {
return decodeZigZagInt64Value(values[0]);
}
return decodeZigZagConstRleInt64(values);
}
export function decodeUnsignedConstInt64Stream(data, offset, streamMetadata) {
const values = decodeVarintInt64(data, offset, streamMetadata.numValues);
if (values.length === 1) {
return values[0];
}
return decodeUnsignedConstRleInt64(values);
}
/**
* This method decodes integer streams.
* Currently the encoder uses only fixed combinations of encodings.
* For performance reasons it is also uses a fixed combination of the encodings on the decoding side.
* The following encodings and combinations are used:
* - Morton Delta -> always sorted so not ZigZag encoding needed
* - Delta -> currently always in combination with ZigZag encoding
* - Rle -> in combination with ZigZag encoding if data type is signed
* - Delta Rle
* - Componentwise Delta -> always ZigZag encoding is used
*/
function decodeSignedInt32(values, streamMetadata, scalingData, nullabilityBuffer) {
let decodedValues;
switch (streamMetadata.logicalLevelTechnique1) {
case LogicalLevelTechnique.DELTA:
if (streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.RLE) {
const rleMetadata = streamMetadata;
if (!nullabilityBuffer) {
return decodeDeltaRleInt32(values, rleMetadata.runs, rleMetadata.numRleValues);
}
values = decodeUnsignedRleInt32(values, rleMetadata.runs, rleMetadata.numRleValues);
decodedValues = decodeZigZagDeltaInt32(values);
}
else {
decodedValues = decodeZigZagDeltaInt32(values);
}
break;
case LogicalLevelTechnique.RLE:
decodedValues = decodeZigZagRleInt32(values, streamMetadata.runs, streamMetadata.numRleValues);
break;
case LogicalLevelTechnique.MORTON:
fastInverseDelta(values);
decodedValues = new Int32Array(values);
break;
case LogicalLevelTechnique.COMPONENTWISE_DELTA:
if (scalingData && !nullabilityBuffer) {
return decodeComponentwiseDeltaVec2Scaled(values, scalingData.scale, scalingData.min, scalingData.max);
}
decodedValues = decodeComponentwiseDeltaVec2(values);
break;
case LogicalLevelTechnique.NONE:
decodedValues = decodeZigZagInt32(values);
break;
default:
throw new Error(`The specified Logical level technique is not supported: ${streamMetadata.logicalLevelTechnique1}`);
}
if (nullabilityBuffer) {
return unpackNullable(decodedValues, nullabilityBuffer, 0);
}
return decodedValues;
}
function decodeUnsignedInt32(values, streamMetadata, scalingData, nullabilityBuffer) {
let decodedValues;
switch (streamMetadata.logicalLevelTechnique1) {
case LogicalLevelTechnique.DELTA:
if (streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.RLE) {
const rleMetadata = streamMetadata;
const deltaValues = decodeUnsignedRleInt32(values, rleMetadata.runs, rleMetadata.numRleValues);
decodedValues = decodeUnsignedZigZagDeltaInt32(deltaValues);
}
else {
decodedValues = decodeUnsignedZigZagDeltaInt32(values);
}
break;
case LogicalLevelTechnique.RLE:
decodedValues = decodeUnsignedRleInt32(values, streamMetadata.runs, streamMetadata.numRleValues);
break;
case LogicalLevelTechnique.MORTON:
fastInverseDelta(values);
decodedValues = values;
break;
case LogicalLevelTechnique.COMPONENTWISE_DELTA:
if (scalingData && !nullabilityBuffer) {
decodedValues = decodeUnsignedComponentwiseDeltaVec2Scaled(values, scalingData.scale, scalingData.min, scalingData.max);
}
else {
decodedValues = decodeUnsignedComponentwiseDeltaVec2(values);
}
break;
case LogicalLevelTechnique.NONE:
decodedValues = values;
break;
default:
throw new Error(`The specified Logical level technique is not supported: ${streamMetadata.logicalLevelTechnique1}`);
}
if (nullabilityBuffer) {
return unpackNullable(decodedValues, nullabilityBuffer, 0);
}
return decodedValues;
}
function decodeSignedInt64(values, streamMetadata, nullabilityBuffer) {
let decodedValues;
switch (streamMetadata.logicalLevelTechnique1) {
case LogicalLevelTechnique.DELTA:
if (streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.RLE) {
const rleMetadata = streamMetadata;
if (!nullabilityBuffer) {
return decodeDeltaRleInt64(values, rleMetadata.runs, rleMetadata.numRleValues);
}
values = decodeUnsignedRleInt64(values, rleMetadata.runs, rleMetadata.numRleValues);
decodedValues = decodeZigZagDeltaInt64(values);
}
else {
decodedValues = decodeZigZagDeltaInt64(values);
}
break;
case LogicalLevelTechnique.RLE:
decodedValues = decodeZigZagRleInt64(values, streamMetadata.runs, streamMetadata.numRleValues);
break;
case LogicalLevelTechnique.NONE:
decodedValues = decodeZigZagInt64(values);
break;
default:
throw new Error(`The specified Logical level technique is not supported: ${streamMetadata.logicalLevelTechnique1}`);
}
if (nullabilityBuffer) {
return unpackNullable(decodedValues, nullabilityBuffer, 0n);
}
return decodedValues;
}
function decodeUnsignedInt64(values, streamMetadata, nullabilityBuffer) {
let decodedValues;
switch (streamMetadata.logicalLevelTechnique1) {
case LogicalLevelTechnique.DELTA:
if (streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.RLE) {
const rleMetadata = streamMetadata;
const deltaValues = decodeUnsignedRleInt64(values, rleMetadata.runs, rleMetadata.numRleValues);
decodedValues = decodeUnsignedZigZagDeltaInt64(deltaValues);
}
else {
decodedValues = decodeUnsignedZigZagDeltaInt64(values);
}
break;
case LogicalLevelTechnique.RLE:
decodedValues = decodeUnsignedRleInt64(values, streamMetadata.runs, streamMetadata.numRleValues);
break;
case LogicalLevelTechnique.NONE:
decodedValues = values;
break;
default:
throw new Error(`The specified Logical level technique is not supported: ${streamMetadata.logicalLevelTechnique1}`);
}
if (nullabilityBuffer) {
return unpackNullable(decodedValues, nullabilityBuffer, 0n);
}
return decodedValues;
}
function decodeFloat64Values(values, streamMetadata, isSigned) {
switch (streamMetadata.logicalLevelTechnique1) {
case LogicalLevelTechnique.DELTA:
if (streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.RLE) {
const rleMetadata = streamMetadata;
values = decodeUnsignedRleFloat64(values, rleMetadata.runs, rleMetadata.numRleValues);
}
decodeZigZagDeltaFloat64(values);
return values;
case LogicalLevelTechnique.RLE:
return decodeRleFloat64(values, streamMetadata, isSigned);
case LogicalLevelTechnique.NONE:
if (isSigned) {
decodeZigZagFloat64(values);
}
return values;
default:
throw new Error(`The specified Logical level technique is not supported: ${streamMetadata.logicalLevelTechnique1}`);
}
}
function decodeLengthToOffsetBuffer(values, streamMetadata) {
if (streamMetadata.logicalLevelTechnique1 === LogicalLevelTechnique.DELTA &&
streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.NONE) {
return decodeZigZagDeltaOfDeltaInt32(values);
}
if (streamMetadata.logicalLevelTechnique1 === LogicalLevelTechnique.RLE &&
streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.NONE) {
const rleMetadata = streamMetadata;
return decodeRleDeltaInt32(values, rleMetadata.runs, rleMetadata.numRleValues);
}
if (streamMetadata.logicalLevelTechnique1 === LogicalLevelTechnique.NONE &&
streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.NONE) {
//TODO: use fastInverseDelta again and check what are the performance problems in zoom 14
//fastInverseDelta(values);
inverseDelta(values);
const offsets = new Uint32Array(streamMetadata.numValues + 1);
offsets[0] = 0;
offsets.set(values, 1);
return offsets;
}
if (streamMetadata.logicalLevelTechnique1 === LogicalLevelTechnique.DELTA &&
streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.RLE) {
const rleMetadata = streamMetadata;
const decodedValues = decodeZigZagRleDeltaInt32(values, rleMetadata.runs, rleMetadata.numRleValues);
fastInverseDelta(decodedValues);
return new Uint32Array(decodedValues);
}
throw new Error("Only delta encoding is supported for transforming length to offset streams yet.");
}
export function getVectorType(streamMetadata, sizeOrNullabilityBuffer, data, offset, varintWidth = "int32") {
const logicalLevelTechnique1 = streamMetadata.logicalLevelTechnique1;
if (logicalLevelTechnique1 === LogicalLevelTechnique.RLE) {
return streamMetadata.runs === 1 ? VectorType.CONST : VectorType.FLAT;
}
if (logicalLevelTechnique1 !== LogicalLevelTechnique.DELTA ||
streamMetadata.logicalLevelTechnique2 !== LogicalLevelTechnique.RLE) {
return streamMetadata.numValues === 1 ? VectorType.CONST : VectorType.FLAT;
}
const numFeatures = sizeOrNullabilityBuffer instanceof BitVector ? sizeOrNullabilityBuffer.size() : sizeOrNullabilityBuffer;
const rleMetadata = streamMetadata;
if (rleMetadata.numRleValues !== numFeatures) {
return VectorType.FLAT;
}
// Single run is always a sequence
if (rleMetadata.runs === 1) {
return VectorType.SEQUENCE;
}
if (rleMetadata.runs !== 2) {
return streamMetadata.numValues === 1 ? VectorType.CONST : VectorType.FLAT;
}
// Two runs can be a sequence if both deltas are equal to 1
const savedOffset = offset.get();
if (streamMetadata.physicalLevelTechnique === PhysicalLevelTechnique.VARINT) {
if (isDeltaRleSequenceVarintWidth(data, offset, varintWidth)) {
return VectorType.SEQUENCE;
}
return streamMetadata.numValues === 1 ? VectorType.CONST : VectorType.FLAT;
}
const byteOffset = offset.get();
const values = new Int32Array(data.buffer, data.byteOffset + byteOffset, 4);
offset.set(savedOffset);
// Check if both deltas are encoded 1
const zigZagOne = 2;
if (values[2] === zigZagOne && values[3] === zigZagOne) {
return VectorType.SEQUENCE;
}
return streamMetadata.numValues === 1 ? VectorType.CONST : VectorType.FLAT;
}
function isDeltaRleSequenceVarintWidth(data, offset, varintWidth) {
const peekOffset = new IntWrapper(offset.get());
if (varintWidth === "int64") {
const values = decodeVarintInt64(data, peekOffset, 4);
return values[2] === 2n && values[3] === 2n;
}
const values = decodeVarintInt32(data, peekOffset, 4);
return values[2] === 2 && values[3] === 2;
}
function decodeRleFloat64(data, streamMetadata, isSigned) {
return isSigned
? decodeZigZagRleFloat64(data, streamMetadata.runs, streamMetadata.numRleValues)
: decodeUnsignedRleFloat64(data, streamMetadata.runs, streamMetadata.numRleValues);
}
//# sourceMappingURL=integerStreamDecoder.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,4 @@
import type IntWrapper from "./intWrapper";
import { type Column } from "../metadata/tileset/tilesetMetadata";
import type Vector from "../vector/vector";
export declare function decodePropertyColumn(data: Uint8Array, offset: IntWrapper, columnMetadata: Column, numStreams: number, numFeatures: number, propertyColumnNames?: Set<string>): Vector | Vector[];

View File

@@ -0,0 +1,132 @@
import { ScalarType } from "../metadata/tileset/tilesetMetadata";
import BitVector from "../vector/flat/bitVector";
import { decodeStreamMetadata } from "../metadata/tile/streamMetadataDecoder";
import { VectorType } from "../vector/vectorType";
import { BooleanFlatVector } from "../vector/flat/booleanFlatVector";
import { DoubleFlatVector } from "../vector/flat/doubleFlatVector";
import { FloatFlatVector } from "../vector/flat/floatFlatVector";
import { Int64ConstVector } from "../vector/constant/int64ConstVector";
import { Int64FlatVector } from "../vector/flat/int64FlatVector";
import { Int32FlatVector } from "../vector/flat/int32FlatVector";
import { Int32ConstVector } from "../vector/constant/int32ConstVector";
import { decodeBooleanRle, decodeDoublesLE, decodeFloatsLE, skipColumn } from "./decodingUtils";
import { decodeSignedConstInt32Stream, decodeSignedConstInt64Stream, decodeSignedInt32Stream, decodeSignedInt64Stream, decodeUnsignedInt32Stream, decodeUnsignedConstInt32Stream, decodeUnsignedConstInt64Stream, decodeUnsignedInt64Stream, decodeSequenceInt32Stream, decodeSequenceInt64Stream, getVectorType, } from "./integerStreamDecoder";
import { Int32SequenceVector } from "../vector/sequence/int32SequenceVector";
import { Int64SequenceVector } from "../vector/sequence/int64SequenceVector";
import { decodeSharedDictionary, decodeString } from "./stringDecoder";
export function decodePropertyColumn(data, offset, columnMetadata, numStreams, numFeatures, propertyColumnNames) {
if (columnMetadata.type === "scalarType") {
if (propertyColumnNames && !propertyColumnNames.has(columnMetadata.name)) {
skipColumn(numStreams, data, offset);
return null;
}
return decodeScalarPropertyColumn(numStreams, data, offset, numFeatures, columnMetadata.scalarType, columnMetadata);
}
if (numStreams === 0) {
return null;
}
return decodeSharedDictionary(data, offset, columnMetadata, numFeatures, propertyColumnNames);
}
function decodeScalarPropertyColumn(numStreams, data, offset, numFeatures, column, columnMetadata) {
let nullabilityBuffer = null;
if (numStreams === 0) {
return null;
}
if (columnMetadata.nullable) {
const presentStreamMetadata = decodeStreamMetadata(data, offset);
const numValues = presentStreamMetadata.numValues;
const streamDataStart = offset.get();
const presentVector = decodeBooleanRle(data, numValues, presentStreamMetadata.byteLength, offset);
offset.set(streamDataStart + presentStreamMetadata.byteLength);
nullabilityBuffer = new BitVector(presentVector, presentStreamMetadata.numValues);
}
const sizeOrNullabilityBuffer = nullabilityBuffer ?? numFeatures;
const scalarType = column.physicalType;
switch (scalarType) {
case ScalarType.UINT_32:
case ScalarType.INT_32:
return decodeInt32Column(data, offset, columnMetadata, column, sizeOrNullabilityBuffer);
case ScalarType.STRING: {
// In embedded format: numStreams includes nullability stream if column is nullable
const stringDataStreams = columnMetadata.nullable ? numStreams - 1 : numStreams;
return decodeString(columnMetadata.name, data, offset, stringDataStreams, nullabilityBuffer);
}
case ScalarType.BOOLEAN:
return decodeBooleanColumn(data, offset, columnMetadata, numFeatures, sizeOrNullabilityBuffer);
case ScalarType.UINT_64:
case ScalarType.INT_64:
return decodeInt64Column(data, offset, columnMetadata, sizeOrNullabilityBuffer, column);
case ScalarType.FLOAT:
return decodeFloatColumn(data, offset, columnMetadata, sizeOrNullabilityBuffer);
case ScalarType.DOUBLE:
return decodeDoubleColumn(data, offset, columnMetadata, sizeOrNullabilityBuffer);
default:
throw new Error(`The specified data type for the field is currently not supported: ${column}`);
}
}
function decodeBooleanColumn(data, offset, column, _numFeatures, sizeOrNullabilityBuffer) {
const dataStreamMetadata = decodeStreamMetadata(data, offset);
const numValues = dataStreamMetadata.numValues;
const streamDataStart = offset.get();
const nullabilityBuffer = isNullabilityBuffer(sizeOrNullabilityBuffer) ? sizeOrNullabilityBuffer : undefined;
const dataStream = decodeBooleanRle(data, numValues, dataStreamMetadata.byteLength, offset, nullabilityBuffer);
offset.set(streamDataStart + dataStreamMetadata.byteLength);
const dataVector = new BitVector(dataStream, numValues);
return new BooleanFlatVector(column.name, dataVector, sizeOrNullabilityBuffer);
}
function decodeFloatColumn(data, offset, column, sizeOrNullabilityBuffer) {
const dataStreamMetadata = decodeStreamMetadata(data, offset);
const nullabilityBuffer = isNullabilityBuffer(sizeOrNullabilityBuffer) ? sizeOrNullabilityBuffer : undefined;
const dataStream = decodeFloatsLE(data, offset, dataStreamMetadata.numValues, nullabilityBuffer);
return new FloatFlatVector(column.name, dataStream, sizeOrNullabilityBuffer);
}
function decodeDoubleColumn(data, offset, column, sizeOrNullabilityBuffer) {
const dataStreamMetadata = decodeStreamMetadata(data, offset);
const nullabilityBuffer = isNullabilityBuffer(sizeOrNullabilityBuffer) ? sizeOrNullabilityBuffer : undefined;
const dataStream = decodeDoublesLE(data, offset, dataStreamMetadata.numValues, nullabilityBuffer);
return new DoubleFlatVector(column.name, dataStream, sizeOrNullabilityBuffer);
}
function decodeInt64Column(data, offset, column, sizeOrNullabilityBuffer, scalarColumn) {
const dataStreamMetadata = decodeStreamMetadata(data, offset);
const vectorType = getVectorType(dataStreamMetadata, sizeOrNullabilityBuffer, data, offset, "int64");
const isSigned = scalarColumn.physicalType === ScalarType.INT_64;
if (vectorType === VectorType.FLAT) {
const nullabilityBuffer = isNullabilityBuffer(sizeOrNullabilityBuffer) ? sizeOrNullabilityBuffer : undefined;
const dataStream = isSigned
? decodeSignedInt64Stream(data, offset, dataStreamMetadata, nullabilityBuffer)
: decodeUnsignedInt64Stream(data, offset, dataStreamMetadata, nullabilityBuffer);
return new Int64FlatVector(column.name, dataStream, sizeOrNullabilityBuffer);
}
if (vectorType === VectorType.SEQUENCE) {
const id = decodeSequenceInt64Stream(data, offset, dataStreamMetadata);
return new Int64SequenceVector(column.name, id[0], id[1], dataStreamMetadata.numRleValues);
}
const constValue = isSigned
? decodeSignedConstInt64Stream(data, offset, dataStreamMetadata)
: decodeUnsignedConstInt64Stream(data, offset, dataStreamMetadata);
return new Int64ConstVector(column.name, constValue, sizeOrNullabilityBuffer, isSigned);
}
function decodeInt32Column(data, offset, column, scalarColumn, sizeOrNullabilityBuffer) {
const dataStreamMetadata = decodeStreamMetadata(data, offset);
const vectorType = getVectorType(dataStreamMetadata, sizeOrNullabilityBuffer, data, offset);
const isSigned = scalarColumn.physicalType === ScalarType.INT_32;
if (vectorType === VectorType.FLAT) {
const nullabilityBuffer = isNullabilityBuffer(sizeOrNullabilityBuffer) ? sizeOrNullabilityBuffer : undefined;
const dataStream = isSigned
? decodeSignedInt32Stream(data, offset, dataStreamMetadata, undefined, nullabilityBuffer)
: decodeUnsignedInt32Stream(data, offset, dataStreamMetadata, undefined, nullabilityBuffer);
return new Int32FlatVector(column.name, dataStream, sizeOrNullabilityBuffer);
}
if (vectorType === VectorType.SEQUENCE) {
const id = decodeSequenceInt32Stream(data, offset, dataStreamMetadata);
return new Int32SequenceVector(column.name, id[0], id[1], dataStreamMetadata.numRleValues);
}
const constValue = isSigned
? decodeSignedConstInt32Stream(data, offset, dataStreamMetadata)
: decodeUnsignedConstInt32Stream(data, offset, dataStreamMetadata);
return new Int32ConstVector(column.name, constValue, sizeOrNullabilityBuffer, isSigned);
}
function isNullabilityBuffer(sizeOrNullabilityBuffer) {
return sizeOrNullabilityBuffer instanceof BitVector;
}
//# sourceMappingURL=propertyDecoder.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,6 @@
import type IntWrapper from "./intWrapper";
import BitVector from "../vector/flat/bitVector";
import type Vector from "../vector/vector";
import { type Column } from "../metadata/tileset/tilesetMetadata";
export declare function decodeString(name: string, data: Uint8Array, offset: IntWrapper, numStreams: number, bitVector?: BitVector): Vector;
export declare function decodeSharedDictionary(data: Uint8Array, offset: IntWrapper, column: Column, numFeatures: number, propertyColumnNames?: Set<string>): Vector[];

View File

@@ -0,0 +1,174 @@
import { decodeStreamMetadata } from "../metadata/tile/streamMetadataDecoder";
import { StringFlatVector } from "../vector/flat/stringFlatVector";
import { StringDictionaryVector } from "../vector/dictionary/stringDictionaryVector";
import BitVector from "../vector/flat/bitVector";
import { PhysicalStreamType } from "../metadata/tile/physicalStreamType";
import { DictionaryType } from "../metadata/tile/dictionaryType";
import { LengthType } from "../metadata/tile/lengthType";
import { decodeUnsignedInt32Stream, decodeLengthStreamToOffsetBuffer } from "./integerStreamDecoder";
import { ScalarType } from "../metadata/tileset/tilesetMetadata";
import { decodeVarintInt32 } from "./integerDecodingUtils";
import { decodeBooleanRle, skipColumn } from "./decodingUtils";
import { StringFsstDictionaryVector } from "../vector/fsst-dictionary/stringFsstDictionaryVector";
export function decodeString(name, data, offset, numStreams, bitVector) {
let dictionaryLengthStream = null;
let offsetStream = null;
let dictionaryStream = null;
let symbolLengthStream = null;
let symbolTableStream = null;
let nullabilityBuffer = bitVector ?? null;
let plainLengthStream = null;
let plainDataStream = null;
for (let i = 0; i < numStreams; i++) {
const streamMetadata = decodeStreamMetadata(data, offset);
switch (streamMetadata.physicalStreamType) {
case PhysicalStreamType.PRESENT: {
const presentData = decodeBooleanRle(data, streamMetadata.numValues, streamMetadata.byteLength, offset);
const presentStream = new BitVector(presentData, streamMetadata.numValues);
nullabilityBuffer = bitVector ?? presentStream;
break;
}
case PhysicalStreamType.OFFSET: {
offsetStream = decodeUnsignedInt32Stream(data, offset, streamMetadata, undefined, nullabilityBuffer);
break;
}
case PhysicalStreamType.LENGTH: {
const lengthStream = decodeLengthStreamToOffsetBuffer(data, offset, streamMetadata);
if (LengthType.DICTIONARY === streamMetadata.logicalStreamType.lengthType) {
dictionaryLengthStream = lengthStream;
}
else if (LengthType.SYMBOL === streamMetadata.logicalStreamType.lengthType) {
symbolLengthStream = lengthStream;
}
else {
// Plain string encoding uses VAR_BINARY length type
plainLengthStream = lengthStream;
}
break;
}
case PhysicalStreamType.DATA: {
const dataStream = data.subarray(offset.get(), offset.get() + streamMetadata.byteLength);
offset.add(streamMetadata.byteLength);
const dictType = streamMetadata.logicalStreamType.dictionaryType;
if (DictionaryType.FSST === dictType) {
symbolTableStream = dataStream;
}
else if (DictionaryType.SINGLE === dictType || DictionaryType.SHARED === dictType) {
dictionaryStream = dataStream;
}
else if (DictionaryType.NONE === dictType) {
plainDataStream = dataStream;
}
break;
}
}
}
return (decodeFsstDictionaryVector(name, symbolTableStream, offsetStream, dictionaryLengthStream, dictionaryStream, symbolLengthStream, nullabilityBuffer) ??
decodeDictionaryVector(name, dictionaryStream, offsetStream, dictionaryLengthStream, nullabilityBuffer) ??
decodePlainStringVector(name, plainLengthStream, plainDataStream, offsetStream, nullabilityBuffer));
}
function decodeFsstDictionaryVector(name, symbolTableStream, offsetStream, dictionaryLengthStream, dictionaryStream, symbolLengthStream, nullabilityBuffer) {
if (!symbolTableStream) {
return null;
}
return new StringFsstDictionaryVector(name, offsetStream, dictionaryLengthStream, dictionaryStream, symbolLengthStream, symbolTableStream, nullabilityBuffer);
}
function decodeDictionaryVector(name, dictionaryStream, offsetStream, dictionaryLengthStream, nullabilityBuffer) {
if (!dictionaryStream) {
return null;
}
return nullabilityBuffer
? new StringDictionaryVector(name, offsetStream, dictionaryLengthStream, dictionaryStream, nullabilityBuffer)
: new StringDictionaryVector(name, offsetStream, dictionaryLengthStream, dictionaryStream);
}
function decodePlainStringVector(name, plainLengthStream, plainDataStream, offsetStream, nullabilityBuffer) {
if (!plainLengthStream || !plainDataStream) {
return null;
}
if (offsetStream) {
return nullabilityBuffer
? new StringDictionaryVector(name, offsetStream, plainLengthStream, plainDataStream, nullabilityBuffer)
: new StringDictionaryVector(name, offsetStream, plainLengthStream, plainDataStream);
}
if (nullabilityBuffer && nullabilityBuffer.size() !== plainLengthStream.length - 1) {
const sparseOffsetStream = new Uint32Array(nullabilityBuffer.size());
let valueIndex = 0;
for (let i = 0; i < nullabilityBuffer.size(); i++) {
if (nullabilityBuffer.get(i)) {
sparseOffsetStream[i] = valueIndex++;
}
else {
sparseOffsetStream[i] = 0;
}
}
return new StringDictionaryVector(name, sparseOffsetStream, plainLengthStream, plainDataStream, nullabilityBuffer);
}
return nullabilityBuffer
? new StringFlatVector(name, plainLengthStream, plainDataStream, nullabilityBuffer)
: new StringFlatVector(name, plainLengthStream, plainDataStream);
}
export function decodeSharedDictionary(data, offset, column, numFeatures, propertyColumnNames) {
let dictionaryOffsetBuffer = null;
let dictionaryBuffer = null;
let symbolOffsetBuffer = null;
let symbolTableBuffer = null;
let dictionaryStreamDecoded = false;
while (!dictionaryStreamDecoded) {
const streamMetadata = decodeStreamMetadata(data, offset);
switch (streamMetadata.physicalStreamType) {
case PhysicalStreamType.LENGTH:
if (LengthType.DICTIONARY === streamMetadata.logicalStreamType.lengthType) {
dictionaryOffsetBuffer = decodeLengthStreamToOffsetBuffer(data, offset, streamMetadata);
}
else {
symbolOffsetBuffer = decodeLengthStreamToOffsetBuffer(data, offset, streamMetadata);
}
break;
case PhysicalStreamType.DATA:
if (DictionaryType.SINGLE === streamMetadata.logicalStreamType.dictionaryType ||
DictionaryType.SHARED === streamMetadata.logicalStreamType.dictionaryType) {
dictionaryBuffer = data.subarray(offset.get(), offset.get() + streamMetadata.byteLength);
dictionaryStreamDecoded = true;
}
else {
symbolTableBuffer = data.subarray(offset.get(), offset.get() + streamMetadata.byteLength);
}
offset.add(streamMetadata.byteLength);
break;
}
}
const childFields = column.complexType.children;
const stringDictionaryVectors = [];
let i = 0;
for (const childField of childFields) {
const numStreams = decodeVarintInt32(data, offset, 1)[0];
if (numStreams === 0) {
/* Column is not present in the tile */
continue;
}
const columnName = childField.name ? `${column.name}${childField.name}` : column.name;
if (propertyColumnNames) {
if (!propertyColumnNames.has(columnName)) {
//TODO: add size of sub column to Mlt for faster skipping
skipColumn(numStreams, data, offset);
continue;
}
}
if (numStreams !== 2 ||
childField.type !== "scalarField" ||
childField.scalarField.physicalType !== ScalarType.STRING) {
throw new Error("Currently only optional string fields are implemented for a struct.");
}
const presentStreamMetadata = decodeStreamMetadata(data, offset);
const presentStream = decodeBooleanRle(data, presentStreamMetadata.numValues, presentStreamMetadata.byteLength, offset);
const offsetStreamMetadata = decodeStreamMetadata(data, offset);
const offsetCount = offsetStreamMetadata.decompressedCount;
const isNullable = offsetCount !== numFeatures;
const offsetStream = decodeUnsignedInt32Stream(data, offset, offsetStreamMetadata, undefined, isNullable ? new BitVector(presentStream, presentStreamMetadata.numValues) : undefined);
stringDictionaryVectors[i++] = symbolTableBuffer
? new StringFsstDictionaryVector(columnName, offsetStream, dictionaryOffsetBuffer, dictionaryBuffer, symbolOffsetBuffer, symbolTableBuffer, new BitVector(presentStream, presentStreamMetadata.numValues))
: new StringDictionaryVector(columnName, offsetStream, dictionaryOffsetBuffer, dictionaryBuffer, new BitVector(presentStream, presentStreamMetadata.numValues));
}
return stringDictionaryVectors;
}
//# sourceMappingURL=stringDecoder.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,25 @@
import BitVector from "../vector/flat/bitVector.js";
/**
* Type constraint for TypedArray types that can be unpacked
*/
export type TypedArrayConstructor = Int32ArrayConstructor | Uint32ArrayConstructor | BigInt64ArrayConstructor | BigUint64ArrayConstructor | Float32ArrayConstructor | Float64ArrayConstructor;
export type TypedArrayInstance = Int32Array | Uint32Array | BigInt64Array | BigUint64Array | Float32Array | Float64Array;
/**
* Generic unpacking function.
* Reconstructs the full array by inserting default values at null positions.
*
* @param dataStream The compact data stream containing only non-null values
* @param presentBits BitVector indicating which positions have values (null if non-nullable)
* @param defaultValue The default value to insert at null positions (0, 0n, etc.)
* @returns Full array with default values at null positions
*/
export declare function unpackNullable<T extends TypedArrayInstance>(dataStream: T, presentBits: BitVector | null, defaultValue: number | bigint): T;
/**
* Special case for boolean columns because BitVector is not directly compatible with TypedArray.
*
* @param dataStream The compact BitVector data containing only non-null boolean values
* @param dataStreamSize The number of actual values in dataStream
* @param presentBits BitVector indicating which positions have values (null if non-nullable)
* @returns Uint8Array buffer for BitVector with false at null positions
*/
export declare function unpackNullableBoolean(dataStream: Uint8Array, dataStreamSize: number, presentBits: BitVector | null): Uint8Array;

View File

@@ -0,0 +1,51 @@
import BitVector from "../vector/flat/bitVector.js";
/**
* Generic unpacking function.
* Reconstructs the full array by inserting default values at null positions.
*
* @param dataStream The compact data stream containing only non-null values
* @param presentBits BitVector indicating which positions have values (null if non-nullable)
* @param defaultValue The default value to insert at null positions (0, 0n, etc.)
* @returns Full array with default values at null positions
*/
export function unpackNullable(dataStream, presentBits, defaultValue) {
// Non-nullable case: return data stream as-is
if (!presentBits) {
return dataStream;
}
const size = presentBits.size();
// Create new array of same type with full size
const constructor = dataStream.constructor;
const result = new constructor(size);
let counter = 0;
for (let i = 0; i < size; i++) {
// If position has a value, take from data stream; otherwise use default
result[i] = presentBits.get(i) ? dataStream[counter++] : defaultValue;
}
return result;
}
/**
* Special case for boolean columns because BitVector is not directly compatible with TypedArray.
*
* @param dataStream The compact BitVector data containing only non-null boolean values
* @param dataStreamSize The number of actual values in dataStream
* @param presentBits BitVector indicating which positions have values (null if non-nullable)
* @returns Uint8Array buffer for BitVector with false at null positions
*/
export function unpackNullableBoolean(dataStream, dataStreamSize, presentBits) {
// Non-nullable case
if (!presentBits) {
return dataStream;
}
const numFeatures = presentBits.size();
const bitVector = new BitVector(dataStream, dataStreamSize);
const result = new BitVector(new Uint8Array(Math.ceil(numFeatures / 8)), numFeatures);
let counter = 0;
for (let i = 0; i < numFeatures; i++) {
// If position has a value, take from data stream; otherwise use false
const value = presentBits.get(i) ? bitVector.get(counter++) : false;
result.set(i, value);
}
return result.getBuffer();
}
//# sourceMappingURL=unpackNullableUtils.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"unpackNullableUtils.js","sourceRoot":"","sources":["../../src/decoding/unpackNullableUtils.ts"],"names":[],"mappings":"AAAA,OAAO,SAAS,MAAM,6BAA6B,CAAC;AAqBpD;;;;;;;;GAQG;AACH,MAAM,UAAU,cAAc,CAC1B,UAAa,EACb,WAA6B,EAC7B,YAA6B;IAE7B,8CAA8C;IAC9C,IAAI,CAAC,WAAW,EAAE,CAAC;QACf,OAAO,UAAU,CAAC;IACtB,CAAC;IAED,MAAM,IAAI,GAAG,WAAW,CAAC,IAAI,EAAE,CAAC;IAChC,+CAA+C;IAC/C,MAAM,WAAW,GAAG,UAAU,CAAC,WAAoC,CAAC;IACpE,MAAM,MAAM,GAAG,IAAI,WAAW,CAAC,IAAI,CAAM,CAAC;IAE1C,IAAI,OAAO,GAAG,CAAC,CAAC;IAChB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,EAAE,CAAC,EAAE,EAAE,CAAC;QAC5B,wEAAwE;QACxE,MAAM,CAAC,CAAC,CAAC,GAAG,WAAW,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,UAAU,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,CAAE,YAAoB,CAAC;IACnF,CAAC;IAED,OAAO,MAAM,CAAC;AAClB,CAAC;AAED;;;;;;;GAOG;AACH,MAAM,UAAU,qBAAqB,CACjC,UAAsB,EACtB,cAAsB,EACtB,WAA6B;IAE7B,oBAAoB;IACpB,IAAI,CAAC,WAAW,EAAE,CAAC;QACf,OAAO,UAAU,CAAC;IACtB,CAAC;IAED,MAAM,WAAW,GAAG,WAAW,CAAC,IAAI,EAAE,CAAC;IACvC,MAAM,SAAS,GAAG,IAAI,SAAS,CAAC,UAAU,EAAE,cAAc,CAAC,CAAC;IAC5D,MAAM,MAAM,GAAG,IAAI,SAAS,CAAC,IAAI,UAAU,CAAC,IAAI,CAAC,IAAI,CAAC,WAAW,GAAG,CAAC,CAAC,CAAC,EAAE,WAAW,CAAC,CAAC;IAEtF,IAAI,OAAO,GAAG,CAAC,CAAC;IAChB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,WAAW,EAAE,CAAC,EAAE,EAAE,CAAC;QACnC,sEAAsE;QACtE,MAAM,KAAK,GAAG,WAAW,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,GAAG,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;QACpE,MAAM,CAAC,GAAG,CAAC,CAAC,EAAE,KAAK,CAAC,CAAC;IACzB,CAAC;IAED,OAAO,MAAM,CAAC,SAAS,EAAE,CAAC;AAC9B,CAAC","sourcesContent":["import BitVector from \"../vector/flat/bitVector.js\";\n\n/**\n * Type constraint for TypedArray types that can be unpacked\n */\nexport type TypedArrayConstructor =\n | Int32ArrayConstructor\n | Uint32ArrayConstructor\n | BigInt64ArrayConstructor\n | BigUint64ArrayConstructor\n | Float32ArrayConstructor\n | Float64ArrayConstructor;\n\nexport type TypedArrayInstance =\n | Int32Array\n | Uint32Array\n | BigInt64Array\n | BigUint64Array\n | Float32Array\n | Float64Array;\n\n/**\n * Generic unpacking function.\n * Reconstructs the full array by inserting default values at null positions.\n *\n * @param dataStream The compact data stream containing only non-null values\n * @param presentBits BitVector indicating which positions have values (null if non-nullable)\n * @param defaultValue The default value to insert at null positions (0, 0n, etc.)\n * @returns Full array with default values at null positions\n */\nexport function unpackNullable<T extends TypedArrayInstance>(\n dataStream: T,\n presentBits: BitVector | null,\n defaultValue: number | bigint,\n): T {\n // Non-nullable case: return data stream as-is\n if (!presentBits) {\n return dataStream;\n }\n\n const size = presentBits.size();\n // Create new array of same type with full size\n const constructor = dataStream.constructor as TypedArrayConstructor;\n const result = new constructor(size) as T;\n\n let counter = 0;\n for (let i = 0; i < size; i++) {\n // If position has a value, take from data stream; otherwise use default\n result[i] = presentBits.get(i) ? dataStream[counter++] : (defaultValue as any);\n }\n\n return result;\n}\n\n/**\n * Special case for boolean columns because BitVector is not directly compatible with TypedArray.\n *\n * @param dataStream The compact BitVector data containing only non-null boolean values\n * @param dataStreamSize The number of actual values in dataStream\n * @param presentBits BitVector indicating which positions have values (null if non-nullable)\n * @returns Uint8Array buffer for BitVector with false at null positions\n */\nexport function unpackNullableBoolean(\n dataStream: Uint8Array,\n dataStreamSize: number,\n presentBits: BitVector | null,\n): Uint8Array {\n // Non-nullable case\n if (!presentBits) {\n return dataStream;\n }\n\n const numFeatures = presentBits.size();\n const bitVector = new BitVector(dataStream, dataStreamSize);\n const result = new BitVector(new Uint8Array(Math.ceil(numFeatures / 8)), numFeatures);\n\n let counter = 0;\n for (let i = 0; i < numFeatures; i++) {\n // If position has a value, take from data stream; otherwise use false\n const value = presentBits.get(i) ? bitVector.get(counter++) : false;\n result.set(i, value);\n }\n\n return result.getBuffer();\n}\n"]}

View File

@@ -0,0 +1,7 @@
/**
* Serializes an `Int32Array` to a big-endian byte stream.
*
* @param values - Int32 words to serialize.
* @returns Big-endian byte stream (`values.length * 4` bytes).
*/
export declare function encodeBigEndianInt32s(values: Uint32Array): Uint8Array;

View File

@@ -0,0 +1,16 @@
import { bswap32 } from "../decoding/fastPforShared";
/**
* Serializes an `Int32Array` to a big-endian byte stream.
*
* @param values - Int32 words to serialize.
* @returns Big-endian byte stream (`values.length * 4` bytes).
*/
export function encodeBigEndianInt32s(values) {
const bytes = new Uint8Array(values.length * 4);
const u32 = new Uint32Array(bytes.buffer, bytes.byteOffset, values.length);
for (let i = 0; i < values.length; i++) {
u32[i] = bswap32(values[i]);
}
return bytes;
}
//# sourceMappingURL=bigEndianEncode.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"bigEndianEncode.js","sourceRoot":"","sources":["../../src/encoding/bigEndianEncode.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,OAAO,EAAE,MAAM,4BAA4B,CAAC;AAErD;;;;;GAKG;AACH,MAAM,UAAU,qBAAqB,CAAC,MAAmB;IACrD,MAAM,KAAK,GAAG,IAAI,UAAU,CAAC,MAAM,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;IAChD,MAAM,GAAG,GAAG,IAAI,WAAW,CAAC,KAAK,CAAC,MAAM,EAAE,KAAK,CAAC,UAAU,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC;IAE3E,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;QACrC,GAAG,CAAC,CAAC,CAAC,GAAG,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;IAChC,CAAC;IACD,OAAO,KAAK,CAAC;AACjB,CAAC","sourcesContent":["import { bswap32 } from \"../decoding/fastPforShared\";\n\n/**\n * Serializes an `Int32Array` to a big-endian byte stream.\n *\n * @param values - Int32 words to serialize.\n * @returns Big-endian byte stream (`values.length * 4` bytes).\n */\nexport function encodeBigEndianInt32s(values: Uint32Array): Uint8Array {\n const bytes = new Uint8Array(values.length * 4);\n const u32 = new Uint32Array(bytes.buffer, bytes.byteOffset, values.length);\n\n for (let i = 0; i < values.length; i++) {\n u32[i] = bswap32(values[i]);\n }\n return bytes;\n}\n"]}

View File

@@ -0,0 +1,19 @@
import type { GeometryVector, MortonSettings } from "../vector/geometry/geometryVector";
export declare const DEFAULT_MORTON_SETTINGS: MortonSettings;
export declare function encode(x: number, y: number): number;
export declare function encodePointGeometryVector(x: number, y: number): GeometryVector;
export declare function encodePointGeometryVectorWithOffset(x: number, y: number): GeometryVector;
export declare function encodePointGeometryVectorWithMortonEncoding(x: number, y: number): GeometryVector;
export declare function encodePointsGeometryVector(points: number[]): GeometryVector;
export declare function encodeMultiPointGeometryVector(points: number[][]): GeometryVector;
export declare function encodeLineStringGeometryVector(lines: [number, number][]): GeometryVector;
export declare function encodeLineStringGeometryVectorWithMortonEncoding(line: [number, number][]): GeometryVector;
export declare function encodePolygonGeometryVector(polygon: [number, number][][]): GeometryVector;
export declare function encodePolygonGeometryVectorWithOffsets(polygon: [number, number][][]): GeometryVector;
export declare function encodePolygonGeometryVectorWithMortonOffsets(polygon: [number, number][][]): GeometryVector;
export declare function encodeMultiLineStringGeometryVector(lines: [number, number][][]): GeometryVector;
export declare function encodeMultiLineStringGeometryVectorWithOffsets(lines: [number, number][][]): GeometryVector;
export declare function encodeMultiLineStringGeometryVectorWithMortonOffsets(lines: [number, number][][]): GeometryVector;
export declare function encodeMultiPolygonGeometryVector(polygons: [number, number][][][]): GeometryVector;
export declare function encodeMultiPolygonGeometryVectorWithOffsets(polygons: [number, number][][][]): GeometryVector;
export declare function encodeMultiPolygonGeometryVectorWithMortonOffsets(polygons: [number, number][][][]): GeometryVector;

View File

@@ -0,0 +1,248 @@
import { ConstGeometryVector } from "../vector/geometry/constGeometryVector";
import { GEOMETRY_TYPE } from "../vector/geometry/geometryType";
import { VertexBufferType } from "../vector/geometry/vertexBufferType";
import { encodeZOrderCurve } from "./zOrderCurveEncoder";
export const DEFAULT_MORTON_SETTINGS = { numBits: 16, coordinateShift: 0 };
export function encode(x, y) {
return encodeZOrderCurve(x, y, DEFAULT_MORTON_SETTINGS.numBits, DEFAULT_MORTON_SETTINGS.coordinateShift);
}
export function encodePointGeometryVector(x, y) {
return new ConstGeometryVector(1, GEOMETRY_TYPE.POINT, VertexBufferType.VEC_2, {
geometryOffsets: new Uint32Array([0]),
partOffsets: new Uint32Array([0]),
ringOffsets: new Uint32Array([0]),
}, undefined, new Int32Array([x, y]));
}
export function encodePointGeometryVectorWithOffset(x, y) {
return new ConstGeometryVector(1, GEOMETRY_TYPE.POINT, VertexBufferType.VEC_2, {
geometryOffsets: new Uint32Array([0]),
partOffsets: new Uint32Array([0]),
ringOffsets: new Uint32Array([0]),
}, new Uint32Array([1]), new Int32Array([99, 99, x, y]));
}
export function encodePointGeometryVectorWithMortonEncoding(x, y) {
const mortonEncoded = encode(x, y);
return new ConstGeometryVector(1, GEOMETRY_TYPE.POINT, VertexBufferType.MORTON, {
geometryOffsets: new Uint32Array([0]),
partOffsets: new Uint32Array([0]),
ringOffsets: new Uint32Array([0]),
}, new Uint32Array([0]), new Int32Array([mortonEncoded]), DEFAULT_MORTON_SETTINGS);
}
export function encodePointsGeometryVector(points) {
return new ConstGeometryVector(points.length / 2, GEOMETRY_TYPE.POINT, VertexBufferType.VEC_2, {
geometryOffsets: new Uint32Array([0]),
partOffsets: new Uint32Array([0]),
ringOffsets: new Uint32Array([0]),
}, undefined, new Int32Array(points));
}
export function encodeMultiPointGeometryVector(points) {
const vertexBuffer = new Int32Array(points.flatMap((point) => [point[0], point[1]]));
return new ConstGeometryVector(1, GEOMETRY_TYPE.MULTIPOINT, VertexBufferType.VEC_2, {
geometryOffsets: new Uint32Array([0, points.length]),
partOffsets: undefined,
ringOffsets: undefined,
}, undefined, vertexBuffer);
}
export function encodeLineStringGeometryVector(lines) {
const vertexBuffer = new Int32Array(lines.flatMap((line) => [line[0], line[1]]));
return new ConstGeometryVector(1, GEOMETRY_TYPE.LINESTRING, VertexBufferType.VEC_2, {
geometryOffsets: undefined,
partOffsets: new Uint32Array([0, vertexBuffer.length / 2]),
ringOffsets: undefined,
}, undefined, vertexBuffer);
}
export function encodeLineStringGeometryVectorWithMortonEncoding(line) {
const numVertices = line.length;
const vertexBuffer = new Int32Array(numVertices);
const offsetBuffer = new Uint32Array(numVertices);
for (let i = 0; i < numVertices; i++) {
vertexBuffer[i] = encode(line[i][0], line[i][1]);
offsetBuffer[i] = i;
}
return new ConstGeometryVector(1, GEOMETRY_TYPE.LINESTRING, VertexBufferType.MORTON, {
geometryOffsets: undefined,
partOffsets: new Uint32Array([0, numVertices]),
ringOffsets: undefined,
}, offsetBuffer, vertexBuffer, DEFAULT_MORTON_SETTINGS);
}
export function encodePolygonGeometryVector(polygon) {
const vertexBuffer = new Int32Array(polygon.flatMap((ring) => ring.flatMap((point) => [point[0], point[1]])));
const ringOffsets = new Uint32Array(polygon.length + 1);
ringOffsets[0] = 0;
let ringIndex = 1;
for (const ring of polygon) {
ringOffsets[ringIndex] = ringOffsets[ringIndex - 1] + ring.length;
ringIndex++;
}
return new ConstGeometryVector(1, GEOMETRY_TYPE.POLYGON, VertexBufferType.VEC_2, {
geometryOffsets: undefined,
partOffsets: new Uint32Array([0, polygon.length]),
ringOffsets,
}, undefined, vertexBuffer);
}
export function encodePolygonGeometryVectorWithOffsets(polygon) {
const vertexBuffer = new Int32Array(polygon.flatMap((ring) => ring.flatMap((point) => [point[0], point[1]])));
const ringOffsets = new Uint32Array(polygon.length + 1);
ringOffsets[0] = 0;
let ringIndex = 1;
for (const ring of polygon) {
ringOffsets[ringIndex] = ringOffsets[ringIndex - 1] + ring.length;
ringIndex++;
}
const offsetBuffer = new Uint32Array(vertexBuffer.length / 2);
for (let i = 0; i < offsetBuffer.length; i++) {
offsetBuffer[i] = i;
}
return new ConstGeometryVector(1, GEOMETRY_TYPE.POLYGON, VertexBufferType.VEC_2, {
geometryOffsets: undefined,
partOffsets: new Uint32Array([0, polygon.length]),
ringOffsets,
}, offsetBuffer, vertexBuffer);
}
export function encodePolygonGeometryVectorWithMortonOffsets(polygon) {
const vertexBuffer = new Int32Array(polygon.flatMap((ring) => ring.flatMap((point) => encode(point[0], point[1]))));
const ringOffsets = new Uint32Array(polygon.length + 1);
ringOffsets[0] = 0;
let ringIndex = 1;
for (const ring of polygon) {
ringOffsets[ringIndex] = ringOffsets[ringIndex - 1] + ring.length;
ringIndex++;
}
const offsetBuffer = new Uint32Array(vertexBuffer.length);
for (let i = 0; i < offsetBuffer.length; i++) {
offsetBuffer[i] = i;
}
return new ConstGeometryVector(1, GEOMETRY_TYPE.POLYGON, VertexBufferType.MORTON, {
geometryOffsets: undefined,
partOffsets: new Uint32Array([0, polygon.length]),
ringOffsets,
}, offsetBuffer, vertexBuffer, DEFAULT_MORTON_SETTINGS);
}
export function encodeMultiLineStringGeometryVector(lines) {
const vertexBuffer = new Int32Array(lines.flatMap((line) => line.flatMap((point) => [point[0], point[1]])));
const partOffsets = new Uint32Array(lines.length + 1);
partOffsets[0] = 0;
let partIndex = 1;
for (const line of lines) {
partOffsets[partIndex] = partOffsets[partIndex - 1] + line.length;
partIndex++;
}
return new ConstGeometryVector(1, GEOMETRY_TYPE.MULTILINESTRING, VertexBufferType.VEC_2, {
geometryOffsets: new Uint32Array([0, lines.length]),
partOffsets,
ringOffsets: undefined,
}, undefined, vertexBuffer);
}
export function encodeMultiLineStringGeometryVectorWithOffsets(lines) {
const vertexBuffer = new Int32Array(lines.flatMap((line) => line.flatMap((point) => [point[0], point[1]])));
const partOffsets = new Uint32Array(lines.length + 1);
partOffsets[0] = 0;
let partIndex = 1;
for (const line of lines) {
partOffsets[partIndex] = partOffsets[partIndex - 1] + line.length;
partIndex++;
}
const offsetBuffer = new Uint32Array(vertexBuffer.length / 2);
for (let i = 0; i < offsetBuffer.length; i++) {
offsetBuffer[i] = i;
}
return new ConstGeometryVector(1, GEOMETRY_TYPE.MULTILINESTRING, VertexBufferType.VEC_2, {
geometryOffsets: new Uint32Array([0, lines.length]),
partOffsets,
ringOffsets: undefined,
}, offsetBuffer, vertexBuffer);
}
export function encodeMultiLineStringGeometryVectorWithMortonOffsets(lines) {
const vertexBuffer = new Int32Array(lines.flatMap((line) => line.flatMap((point) => encode(point[0], point[1]))));
const partOffsets = new Uint32Array(lines.length + 1);
partOffsets[0] = 0;
let partIndex = 1;
for (const line of lines) {
partOffsets[partIndex] = partOffsets[partIndex - 1] + line.length;
partIndex++;
}
const offsetBuffer = new Uint32Array(vertexBuffer.length);
for (let i = 0; i < offsetBuffer.length; i++) {
offsetBuffer[i] = i;
}
return new ConstGeometryVector(1, GEOMETRY_TYPE.MULTILINESTRING, VertexBufferType.MORTON, {
geometryOffsets: new Uint32Array([0, lines.length]),
partOffsets,
ringOffsets: undefined,
}, offsetBuffer, vertexBuffer, DEFAULT_MORTON_SETTINGS);
}
export function encodeMultiPolygonGeometryVector(polygons) {
const vertexBuffer = new Int32Array(polygons.flatMap((polygon) => polygon.flatMap((ring) => ring.flatMap((point) => [point[0], point[1]]))));
const ringOffsets = new Uint32Array(polygons.reduce((sum, polygon) => sum + polygon.length, 0) + 1);
const partOffsets = new Uint32Array(polygons.length + 1);
ringOffsets[0] = 0;
partOffsets[0] = 0;
let ringIndex = 1;
let partIndex = 1;
for (const polygon of polygons) {
for (const ring of polygon) {
ringOffsets[ringIndex] = ringOffsets[ringIndex - 1] + ring.length;
ringIndex++;
}
partOffsets[partIndex] = partOffsets[partIndex - 1] + polygon.length;
partIndex++;
}
return new ConstGeometryVector(1, GEOMETRY_TYPE.MULTIPOLYGON, VertexBufferType.VEC_2, {
geometryOffsets: new Uint32Array([0, polygons.length]),
partOffsets,
ringOffsets,
}, undefined, vertexBuffer);
}
export function encodeMultiPolygonGeometryVectorWithOffsets(polygons) {
const vertexBuffer = new Int32Array(polygons.flatMap((polygon) => polygon.flatMap((ring) => ring.flatMap((point) => [point[0], point[1]]))));
const ringOffsets = new Uint32Array(polygons.reduce((sum, polygon) => sum + polygon.length, 0) + 1);
const partOffsets = new Uint32Array(polygons.length + 1);
ringOffsets[0] = 0;
partOffsets[0] = 0;
let ringIndex = 1;
let partIndex = 1;
for (const polygon of polygons) {
for (const ring of polygon) {
ringOffsets[ringIndex] = ringOffsets[ringIndex - 1] + ring.length;
ringIndex++;
}
partOffsets[partIndex] = partOffsets[partIndex - 1] + polygon.length;
partIndex++;
}
const offsetBuffer = new Uint32Array(vertexBuffer.length / 2);
for (let i = 0; i < offsetBuffer.length; i++) {
offsetBuffer[i] = i;
}
return new ConstGeometryVector(1, GEOMETRY_TYPE.MULTIPOLYGON, VertexBufferType.VEC_2, {
geometryOffsets: new Uint32Array([0, polygons.length]),
partOffsets,
ringOffsets,
}, offsetBuffer, vertexBuffer);
}
export function encodeMultiPolygonGeometryVectorWithMortonOffsets(polygons) {
const vertexBuffer = new Int32Array(polygons.flatMap((polygon) => polygon.flatMap((ring) => ring.flatMap((point) => encode(point[0], point[1])))));
const ringOffsets = new Uint32Array(polygons.reduce((sum, polygon) => sum + polygon.length, 0) + 1);
const partOffsets = new Uint32Array(polygons.length + 1);
ringOffsets[0] = 0;
partOffsets[0] = 0;
let ringIndex = 1;
let partIndex = 1;
for (const polygon of polygons) {
for (const ring of polygon) {
ringOffsets[ringIndex] = ringOffsets[ringIndex - 1] + ring.length;
ringIndex++;
}
partOffsets[partIndex] = partOffsets[partIndex - 1] + polygon.length;
partIndex++;
}
const offsetBuffer = new Uint32Array(vertexBuffer.length);
for (let i = 0; i < offsetBuffer.length; i++) {
offsetBuffer[i] = i;
}
return new ConstGeometryVector(1, GEOMETRY_TYPE.MULTIPOLYGON, VertexBufferType.MORTON, {
geometryOffsets: new Uint32Array([0, polygons.length]),
partOffsets,
ringOffsets,
}, offsetBuffer, vertexBuffer, DEFAULT_MORTON_SETTINGS);
}
//# sourceMappingURL=constGeometryVectorEncoder.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,16 @@
/**
* Encodes a single typeCode as a varint.
*/
export declare function encodeTypeCode(typeCode: number): Uint8Array;
/**
* Encodes a field name as a length-prefixed UTF-8 string.
*/
export declare function encodeFieldName(name: string): Uint8Array;
/**
* Encodes a child count as a varint.
*/
export declare function encodeChildCount(count: number): Uint8Array;
/**
* Computes typeCode for a scalar field.
*/
export declare function scalarTypeCode(scalarType: number, nullable: boolean): number;

View File

@@ -0,0 +1,40 @@
import IntWrapper from "../decoding/intWrapper";
import { encodeVarintInt32Value } from "./integerEncodingUtils";
import { concatenateBuffers } from "../decoding/decodingTestUtils";
/**
* Encodes a single typeCode as a varint.
*/
export function encodeTypeCode(typeCode) {
const buffer = new Uint8Array(5);
const offset = new IntWrapper(0);
encodeVarintInt32Value(typeCode, buffer, offset);
return buffer.slice(0, offset.get());
}
/**
* Encodes a field name as a length-prefixed UTF-8 string.
*/
export function encodeFieldName(name) {
const textEncoder = new TextEncoder();
const nameBytes = textEncoder.encode(name);
const lengthBuf = new Uint8Array(5);
const offset = new IntWrapper(0);
encodeVarintInt32Value(nameBytes.length, lengthBuf, offset);
const lengthSlice = lengthBuf.slice(0, offset.get());
return concatenateBuffers(lengthSlice, nameBytes);
}
/**
* Encodes a child count as a varint.
*/
export function encodeChildCount(count) {
const buffer = new Uint8Array(5);
const offset = new IntWrapper(0);
encodeVarintInt32Value(count, buffer, offset);
return buffer.slice(0, offset.get());
}
/**
* Computes typeCode for a scalar field.
*/
export function scalarTypeCode(scalarType, nullable) {
return 10 + scalarType * 2 + (nullable ? 1 : 0);
}
//# sourceMappingURL=embeddedTilesetMetadataEncoder.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"embeddedTilesetMetadataEncoder.js","sourceRoot":"","sources":["../../src/encoding/embeddedTilesetMetadataEncoder.ts"],"names":[],"mappings":"AAAA,OAAO,UAAU,MAAM,wBAAwB,CAAC;AAChD,OAAO,EAAE,sBAAsB,EAAE,MAAM,wBAAwB,CAAC;AAChE,OAAO,EAAE,kBAAkB,EAAE,MAAM,+BAA+B,CAAC;AAEnE;;GAEG;AACH,MAAM,UAAU,cAAc,CAAC,QAAgB;IAC3C,MAAM,MAAM,GAAG,IAAI,UAAU,CAAC,CAAC,CAAC,CAAC;IACjC,MAAM,MAAM,GAAG,IAAI,UAAU,CAAC,CAAC,CAAC,CAAC;IACjC,sBAAsB,CAAC,QAAQ,EAAE,MAAM,EAAE,MAAM,CAAC,CAAC;IACjD,OAAO,MAAM,CAAC,KAAK,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,EAAE,CAAC,CAAC;AACzC,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,eAAe,CAAC,IAAY;IACxC,MAAM,WAAW,GAAG,IAAI,WAAW,EAAE,CAAC;IACtC,MAAM,SAAS,GAAG,WAAW,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC;IAC3C,MAAM,SAAS,GAAG,IAAI,UAAU,CAAC,CAAC,CAAC,CAAC;IACpC,MAAM,MAAM,GAAG,IAAI,UAAU,CAAC,CAAC,CAAC,CAAC;IACjC,sBAAsB,CAAC,SAAS,CAAC,MAAM,EAAE,SAAS,EAAE,MAAM,CAAC,CAAC;IAC5D,MAAM,WAAW,GAAG,SAAS,CAAC,KAAK,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,EAAE,CAAC,CAAC;IACrD,OAAO,kBAAkB,CAAC,WAAW,EAAE,SAAS,CAAC,CAAC;AACtD,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,gBAAgB,CAAC,KAAa;IAC1C,MAAM,MAAM,GAAG,IAAI,UAAU,CAAC,CAAC,CAAC,CAAC;IACjC,MAAM,MAAM,GAAG,IAAI,UAAU,CAAC,CAAC,CAAC,CAAC;IACjC,sBAAsB,CAAC,KAAK,EAAE,MAAM,EAAE,MAAM,CAAC,CAAC;IAC9C,OAAO,MAAM,CAAC,KAAK,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,EAAE,CAAC,CAAC;AACzC,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,cAAc,CAAC,UAAkB,EAAE,QAAiB;IAChE,OAAO,EAAE,GAAG,UAAU,GAAG,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,CAAC","sourcesContent":["import IntWrapper from \"../decoding/intWrapper\";\nimport { encodeVarintInt32Value } from \"./integerEncodingUtils\";\nimport { concatenateBuffers } from \"../decoding/decodingTestUtils\";\n\n/**\n * Encodes a single typeCode as a varint.\n */\nexport function encodeTypeCode(typeCode: number): Uint8Array {\n const buffer = new Uint8Array(5);\n const offset = new IntWrapper(0);\n encodeVarintInt32Value(typeCode, buffer, offset);\n return buffer.slice(0, offset.get());\n}\n\n/**\n * Encodes a field name as a length-prefixed UTF-8 string.\n */\nexport function encodeFieldName(name: string): Uint8Array {\n const textEncoder = new TextEncoder();\n const nameBytes = textEncoder.encode(name);\n const lengthBuf = new Uint8Array(5);\n const offset = new IntWrapper(0);\n encodeVarintInt32Value(nameBytes.length, lengthBuf, offset);\n const lengthSlice = lengthBuf.slice(0, offset.get());\n return concatenateBuffers(lengthSlice, nameBytes);\n}\n\n/**\n * Encodes a child count as a varint.\n */\nexport function encodeChildCount(count: number): Uint8Array {\n const buffer = new Uint8Array(5);\n const offset = new IntWrapper(0);\n encodeVarintInt32Value(count, buffer, offset);\n return buffer.slice(0, offset.get());\n}\n\n/**\n * Computes typeCode for a scalar field.\n */\nexport function scalarTypeCode(scalarType: number, nullable: boolean): number {\n return 10 + scalarType * 2 + (nullable ? 1 : 0);\n}\n"]}

View File

@@ -0,0 +1,7 @@
export declare function encodeFloatsLE(values: Float32Array): Uint8Array;
export declare function encodeDoubleLE(values: Float64Array): Uint8Array;
export declare function encodeBooleanRle(values: boolean[]): Uint8Array;
export declare function encodeByteRle(values: Uint8Array): Uint8Array;
export declare function encodeStrings(strings: string[]): Uint8Array;
export declare function createStringLengths(strings: string[]): Uint32Array;
export declare function concatenateBuffers(...buffers: Uint8Array[]): Uint8Array;

View File

@@ -0,0 +1,107 @@
export function encodeFloatsLE(values) {
const buffer = new Uint8Array(values.length * 4);
const view = new DataView(buffer.buffer);
for (let i = 0; i < values.length; i++) {
view.setFloat32(i * 4, values[i], true);
}
return buffer;
}
export function encodeDoubleLE(values) {
const buffer = new Uint8Array(values.length * Float64Array.BYTES_PER_ELEMENT);
const view = new DataView(buffer.buffer);
for (let i = 0; i < values.length; i++) {
view.setFloat64(i * Float64Array.BYTES_PER_ELEMENT, values[i], true);
}
return buffer;
}
export function encodeBooleanRle(values) {
// Pack booleans into bytes (8 booleans per byte)
const numBytes = Math.ceil(values.length / 8);
const packed = new Uint8Array(numBytes);
for (let i = 0; i < values.length; i++) {
if (values[i]) {
const byteIndex = Math.floor(i / 8);
const bitIndex = i % 8;
packed[byteIndex] |= 1 << bitIndex;
}
}
const result = new Uint8Array(1 + numBytes);
result[0] = 256 - numBytes;
result.set(packed, 1);
return result;
}
export function encodeByteRle(values) {
const result = [];
let i = 0;
while (i < values.length) {
const currentByte = values[i];
let runLength = 1;
while (i + runLength < values.length && values[i + runLength] === currentByte && runLength < 131) {
runLength++;
}
if (runLength >= 3) {
const header = runLength - 3;
result.push(Math.min(header, 0x7f));
result.push(currentByte);
i += runLength;
}
else {
const literalStart = i;
while (i < values.length) {
let nextRunLength = 1;
if (i + 1 < values.length) {
while (i + nextRunLength < values.length &&
values[i + nextRunLength] === values[i] &&
nextRunLength < 3) {
nextRunLength++;
}
}
if (nextRunLength >= 3) {
break;
}
i++;
if (i - literalStart >= 128) {
break;
}
}
const numLiterals = i - literalStart;
const header = 256 - numLiterals;
result.push(header);
for (let j = literalStart; j < i; j++) {
result.push(values[j]);
}
}
}
return new Uint8Array(result);
}
export function encodeStrings(strings) {
const encoder = new TextEncoder();
const encoded = strings.map((s) => encoder.encode(s));
const totalLength = encoded.reduce((sum, arr) => sum + arr.length, 0);
const result = new Uint8Array(totalLength);
let offset = 0;
for (const arr of encoded) {
result.set(arr, offset);
offset += arr.length;
}
return result;
}
export function createStringLengths(strings) {
const lengths = new Uint32Array(strings.length);
const encoder = new TextEncoder();
for (let i = 0; i < strings.length; i++) {
lengths[i] = encoder.encode(strings[i]).length;
}
return lengths;
}
export function concatenateBuffers(...buffers) {
const totalLength = buffers.reduce((sum, buf) => sum + buf.length, 0);
const result = new Uint8Array(totalLength);
let offset = 0;
for (const buffer of buffers) {
result.set(buffer, offset);
offset += buffer.length;
}
return result;
}
//# sourceMappingURL=encodingUtils.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,18 @@
/**
* Internal workspace for the FastPFOR encoder.
* Exposed so callers can avoid allocations.
* Use one workspace per concurrent encode call.
*/
export type FastPforEncoderWorkspace = {
dataToBePacked: Array<Uint32Array | undefined>;
dataPointers: Int32Array;
byteContainer: Uint8Array;
bitWidthFrequencies: Int32Array;
bestBitWidthPlan: Int32Array;
};
export declare function fastPack32(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number, bitWidth: number): void;
export declare function createFastPforEncoderWorkspace(): FastPforEncoderWorkspace;
/**
* Encodes an int32 stream using the FastPFOR wire format (pages + VByte tail).
*/
export declare function encodeFastPforInt32WithWorkspace(values: Uint32Array, workspace: FastPforEncoderWorkspace): Uint32Array;

View File

@@ -0,0 +1,310 @@
import { MASKS, DEFAULT_PAGE_SIZE, BLOCK_SIZE, greatestMultiple, roundUpToMultipleOf32, normalizePageSize, } from "../decoding/fastPforShared";
const EXCEPTION_OVERHEAD_BITS = 8;
const MAX_BIT_WIDTH = 32;
const BIT_WIDTH_SLOTS = MAX_BIT_WIDTH + 1;
const PAGE_SIZE = normalizePageSize(DEFAULT_PAGE_SIZE);
const INITIAL_PACKED_BUFFER_SIZE_WORDS = (PAGE_SIZE / 32) * 4;
const BYTE_CONTAINER_SIZE = ((3 * PAGE_SIZE) / BLOCK_SIZE + PAGE_SIZE) | 0;
function requiredBits(value) {
return 32 - Math.clz32(value >>> 0);
}
function ensureInt32Capacity(buffer, requiredLength) {
if (requiredLength <= buffer.length)
return buffer;
let newLength = buffer.length === 0 ? 1 : buffer.length;
while (newLength < requiredLength) {
newLength *= 2;
}
const next = new Uint32Array(newLength);
next.set(buffer);
return next;
}
function ensureUint8Capacity(buffer, requiredLength) {
if (requiredLength <= buffer.length)
return buffer;
let newLength = buffer.length === 0 ? 1 : buffer.length;
while (newLength < requiredLength) {
newLength *= 2;
}
const next = new Uint8Array(newLength);
next.set(buffer);
return next;
}
export function fastPack32(inValues, inPos, out, outPos, bitWidth) {
if (bitWidth === 0)
return;
if (bitWidth === 32) {
out.set(inValues.subarray(inPos, inPos + 32), outPos);
return;
}
const mask = MASKS[bitWidth] >>> 0;
let outputWordIndex = outPos;
let bitOffset = 0;
let currentWord = 0;
for (let i = 0; i < 32; i++) {
const value = (inValues[inPos + i] >>> 0) & mask;
if (bitOffset + bitWidth <= 32) {
currentWord |= value << bitOffset;
bitOffset += bitWidth;
if (bitOffset === 32) {
out[outputWordIndex++] = currentWord | 0;
bitOffset = 0;
currentWord = 0;
}
}
else {
const lowBits = 32 - bitOffset;
const lowMask = MASKS[lowBits] >>> 0;
currentWord |= (value & lowMask) << bitOffset;
out[outputWordIndex++] = currentWord | 0;
currentWord = value >>> lowBits;
bitOffset = bitWidth - lowBits;
}
}
}
export function createFastPforEncoderWorkspace() {
const dataToBePacked = new Array(BIT_WIDTH_SLOTS);
for (let k = 1; k < BIT_WIDTH_SLOTS; k++) {
dataToBePacked[k] = new Uint32Array(INITIAL_PACKED_BUFFER_SIZE_WORDS);
}
return {
dataToBePacked,
dataPointers: new Int32Array(BIT_WIDTH_SLOTS),
byteContainer: new Uint8Array(BYTE_CONTAINER_SIZE),
bitWidthFrequencies: new Int32Array(BIT_WIDTH_SLOTS),
bestBitWidthPlan: new Int32Array(3),
};
}
function computeBestBitWidthPlan(inValues, pos, workspace) {
const bitWidthFrequencies = workspace.bitWidthFrequencies;
const bestBitWidthPlan = workspace.bestBitWidthPlan;
bitWidthFrequencies.fill(0);
for (let k = pos, kEnd = pos + BLOCK_SIZE; k < kEnd; k++) {
bitWidthFrequencies[requiredBits(inValues[k])]++;
}
let maxBitWidth = MAX_BIT_WIDTH;
while (bitWidthFrequencies[maxBitWidth] === 0)
maxBitWidth--;
let bestBitWidth = maxBitWidth;
let bestCost = maxBitWidth * BLOCK_SIZE;
let exceptionCount = 0;
let bestExceptionCount = exceptionCount;
for (let candidateBitWidth = maxBitWidth - 1; candidateBitWidth >= 0; candidateBitWidth--) {
exceptionCount += bitWidthFrequencies[candidateBitWidth + 1];
if (exceptionCount === BLOCK_SIZE)
break;
let candidateCost = exceptionCount * EXCEPTION_OVERHEAD_BITS +
exceptionCount * (maxBitWidth - candidateBitWidth) +
candidateBitWidth * BLOCK_SIZE +
8;
if (maxBitWidth - candidateBitWidth === 1)
candidateCost -= exceptionCount;
if (candidateCost < bestCost) {
bestCost = candidateCost;
bestBitWidth = candidateBitWidth;
bestExceptionCount = exceptionCount;
}
}
bestBitWidthPlan[0] = bestBitWidth;
bestBitWidthPlan[1] = bestExceptionCount;
bestBitWidthPlan[2] = maxBitWidth;
}
function writeByte(workspace, byteContainerPos, byteValue) {
if (byteContainerPos >= workspace.byteContainer.length) {
workspace.byteContainer = ensureUint8Capacity(workspace.byteContainer, byteContainerPos + 1);
}
workspace.byteContainer[byteContainerPos] = byteValue & 0xff;
return byteContainerPos + 1;
}
function ensureExceptionValuesCapacity(dataToBePacked, dataPointers, exceptionBitWidth, exceptionCount) {
if (exceptionBitWidth === 1)
return;
const needed = dataPointers[exceptionBitWidth] + exceptionCount;
const currentExceptionValues = dataToBePacked[exceptionBitWidth];
if (!currentExceptionValues || needed >= currentExceptionValues.length) {
let newSize = 2 * needed;
newSize = roundUpToMultipleOf32(newSize);
const next = new Uint32Array(newSize);
if (currentExceptionValues)
next.set(currentExceptionValues);
dataToBePacked[exceptionBitWidth] = next;
}
}
function writeBlockHeader(workspace, byteContainerPos, bitWidth, exceptionCount, maxBitWidth) {
byteContainerPos = writeByte(workspace, byteContainerPos, bitWidth);
byteContainerPos = writeByte(workspace, byteContainerPos, exceptionCount);
if (exceptionCount > 0) {
byteContainerPos = writeByte(workspace, byteContainerPos, maxBitWidth);
}
return byteContainerPos;
}
function recordBlockExceptions(workspace, inValues, blockPos, bitWidth, exceptionCount, exceptionBitWidth, byteContainerPos) {
if (exceptionCount === 0)
return byteContainerPos;
const dataToBePacked = workspace.dataToBePacked;
const dataPointers = workspace.dataPointers;
ensureExceptionValuesCapacity(dataToBePacked, dataPointers, exceptionBitWidth, exceptionCount);
for (let k = 0; k < BLOCK_SIZE; k++) {
const value = inValues[blockPos + k] >>> 0;
if (value >>> bitWidth !== 0) {
byteContainerPos = writeByte(workspace, byteContainerPos, k);
if (exceptionBitWidth !== 1) {
const exceptionValues = dataToBePacked[exceptionBitWidth];
exceptionValues[dataPointers[exceptionBitWidth]++] = (value >>> bitWidth) | 0;
}
}
}
return byteContainerPos;
}
function packBlock(inValues, blockPos, bitWidth, state) {
for (let k = 0; k < BLOCK_SIZE; k += 32) {
state.out = ensureInt32Capacity(state.out, state.outPos + bitWidth);
fastPack32(inValues, blockPos + k, state.out, state.outPos, bitWidth);
state.outPos += bitWidth;
}
}
function padByteContainerToInt32(workspace, byteContainerPos) {
while ((byteContainerPos & 3) !== 0) {
byteContainerPos = writeByte(workspace, byteContainerPos, 0);
}
return byteContainerPos;
}
function writeByteContainerInts(workspace, state, byteContainerPos) {
const howManyInts = byteContainerPos / 4;
state.out = ensureInt32Capacity(state.out, state.outPos + howManyInts);
const byteContainer = workspace.byteContainer;
for (let i = 0; i < howManyInts; i++) {
const base = i * 4;
const packedWord = byteContainer[base] |
(byteContainer[base + 1] << 8) |
(byteContainer[base + 2] << 16) |
(byteContainer[base + 3] << 24) |
0;
state.out[state.outPos + i] = packedWord;
}
state.outPos += howManyInts;
}
function computeExceptionBitmap(dataPointers) {
let bitmap = 0;
for (let k = 2; k <= MAX_BIT_WIDTH; k++) {
if (dataPointers[k] !== 0) {
bitmap |= k === MAX_BIT_WIDTH ? 0x80000000 : 1 << (k - 1);
}
}
return bitmap;
}
function writeExceptionStreams(workspace, state) {
const dataPointers = workspace.dataPointers;
const dataToBePacked = workspace.dataToBePacked;
const bitmap = computeExceptionBitmap(dataPointers);
state.out = ensureInt32Capacity(state.out, state.outPos + 1);
state.out[state.outPos++] = bitmap;
for (let k = 2; k <= MAX_BIT_WIDTH; k++) {
const size = dataPointers[k];
if (size !== 0) {
state.out = ensureInt32Capacity(state.out, state.outPos + 1);
state.out[state.outPos++] = size | 0;
let j = 0;
for (; j < size; j += 32) {
const exceptionValues = dataToBePacked[k];
state.out = ensureInt32Capacity(state.out, state.outPos + k);
fastPack32(exceptionValues, j, state.out, state.outPos, k);
state.outPos += k;
}
const overflow = j - size;
state.outPos -= (overflow * k) >>> 5;
}
}
}
function encodePage(inValues, thisSize, state, workspace) {
const headerPos = state.outPos;
state.out = ensureInt32Capacity(state.out, headerPos + 1);
state.outPos = (state.outPos + 1) | 0;
const dataPointers = workspace.dataPointers;
dataPointers.fill(0);
let byteContainerPos = 0;
let tmpInPos = state.inPos;
const finalInPos = tmpInPos + thisSize - BLOCK_SIZE;
for (; tmpInPos <= finalInPos; tmpInPos += BLOCK_SIZE) {
computeBestBitWidthPlan(inValues, tmpInPos, workspace);
const bestBitWidthPlan = workspace.bestBitWidthPlan;
const bitWidth = bestBitWidthPlan[0];
const exceptionCount = bestBitWidthPlan[1];
const maxBitWidth = bestBitWidthPlan[2];
const exceptionBitWidth = exceptionCount > 0 ? maxBitWidth - bitWidth : 0;
byteContainerPos = writeBlockHeader(workspace, byteContainerPos, bitWidth, exceptionCount, maxBitWidth);
byteContainerPos = recordBlockExceptions(workspace, inValues, tmpInPos, bitWidth, exceptionCount, exceptionBitWidth, byteContainerPos);
packBlock(inValues, tmpInPos, bitWidth, state);
}
const pageEndOutPos = state.outPos;
state.inPos = tmpInPos;
state.out[headerPos] = (pageEndOutPos - headerPos) | 0;
const byteSize = byteContainerPos;
byteContainerPos = padByteContainerToInt32(workspace, byteContainerPos);
state.out = ensureInt32Capacity(state.out, state.outPos + 1);
state.out[state.outPos++] = byteSize | 0;
writeByteContainerInts(workspace, state, byteContainerPos);
writeExceptionStreams(workspace, state);
}
function encodeAlignedPages(inValues, inLength, state, workspace) {
const alignedLength = greatestMultiple(inLength, BLOCK_SIZE);
const finalInPos = state.inPos + alignedLength;
while (state.inPos !== finalInPos) {
const thisSize = Math.min(PAGE_SIZE, finalInPos - state.inPos);
encodePage(inValues, thisSize, state, workspace);
}
}
function encode(inValues, inLength, state, workspace) {
const alignedLength = greatestMultiple(inLength, BLOCK_SIZE);
state.out = ensureInt32Capacity(state.out, state.outPos + 1);
state.out[state.outPos++] = alignedLength;
if (alignedLength === 0)
return;
encodeAlignedPages(inValues, alignedLength, state, workspace);
}
/**
* VByte encoding for FastPFOR tail values (MSB=1 terminator).
* Note: Inverts standard Protobuf Varint (MSB=0 terminator), so we cannot reuse generic methods.
*/
function encodeVByte(inValues, inLength, state, workspace) {
if (inLength === 0)
return;
const requiredBytes = inLength * 5 + 3;
workspace.byteContainer = ensureUint8Capacity(workspace.byteContainer, requiredBytes);
const start = state.inPos;
let bytePos = 0;
for (let k = start; k < start + inLength; k++) {
let value = inValues[k] >>> 0;
while (value >= 0x80) {
workspace.byteContainer[bytePos++] = value & 0x7f;
value >>>= 7;
}
workspace.byteContainer[bytePos++] = (value | 0x80) & 0xff;
}
while ((bytePos & 3) !== 0)
workspace.byteContainer[bytePos++] = 0;
const intsToWrite = bytePos / 4;
state.out = ensureInt32Capacity(state.out, state.outPos + intsToWrite);
let outIdx = state.outPos;
for (let i = 0; i < bytePos; i += 4) {
const packedWord = workspace.byteContainer[i] |
(workspace.byteContainer[i + 1] << 8) |
(workspace.byteContainer[i + 2] << 16) |
(workspace.byteContainer[i + 3] << 24) |
0;
state.out[outIdx++] = packedWord;
}
state.outPos = outIdx;
state.inPos = (state.inPos + inLength) | 0;
}
/**
* Encodes an int32 stream using the FastPFOR wire format (pages + VByte tail).
*/
export function encodeFastPforInt32WithWorkspace(values, workspace) {
const state = { inPos: 0, outPos: 0, out: new Uint32Array(values.length + 1024) };
encode(values, values.length, state, workspace);
const remaining = values.length - state.inPos;
encodeVByte(values, remaining, state, workspace);
return state.out.subarray(0, state.outPos);
}
//# sourceMappingURL=fastPforEncoder.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,21 @@
/**
* Create symbol table from string array
*
* @param symbolStrings Array of symbol strings
* @returns Symbol table buffer and lengths
*/
export declare function createSymbolTable(symbolStrings: string[]): {
symbols: Uint8Array;
symbolLengths: Uint32Array;
};
/**
* Encode data using FSST compression with pre-defined symbol table
* Encoder requires pre-defined symbol table. Real FSST learns optimal symbols from data. This
* implementation is for testing decoder only.
*
* @param symbols Array of symbols, where each symbol can be between 1 and 8 bytes
* @param symbolLengths Array of symbol lengths, length of each symbol in symbols array
* @param uncompressedData Data to compress
* @returns FSST compressed data, where each entry is an index to the symbols array
*/
export declare function encodeFsst(symbols: Uint8Array, symbolLengths: Uint32Array, uncompressedData: Uint8Array): Uint8Array;

View File

@@ -0,0 +1,78 @@
/**
* Create symbol table from string array
*
* @param symbolStrings Array of symbol strings
* @returns Symbol table buffer and lengths
*/
export function createSymbolTable(symbolStrings) {
const textEncoder = new TextEncoder();
const symbolBuffers = symbolStrings.map((s) => textEncoder.encode(s));
const symbolLengths = new Uint32Array(symbolBuffers.map((b) => b.length));
const totalLength = symbolBuffers.reduce((sum, b) => sum + b.length, 0);
const symbols = new Uint8Array(totalLength);
let offset = 0;
for (const buffer of symbolBuffers) {
symbols.set(buffer, offset);
offset += buffer.length;
}
return { symbols, symbolLengths };
}
/**
* Encode data using FSST compression with pre-defined symbol table
* Encoder requires pre-defined symbol table. Real FSST learns optimal symbols from data. This
* implementation is for testing decoder only.
*
* @param symbols Array of symbols, where each symbol can be between 1 and 8 bytes
* @param symbolLengths Array of symbol lengths, length of each symbol in symbols array
* @param uncompressedData Data to compress
* @returns FSST compressed data, where each entry is an index to the symbols array
*/
export function encodeFsst(symbols, symbolLengths, uncompressedData) {
if (uncompressedData.length === 0) {
return new Uint8Array(0);
}
// Calculate symbol offsets (cumulative sum of lengths)
const symbolOffsets = new Array(symbolLengths.length).fill(0);
for (let i = 1; i < symbolLengths.length; i++) {
symbolOffsets[i] = symbolOffsets[i - 1] + symbolLengths[i - 1];
}
const result = [];
let pos = 0;
while (pos < uncompressedData.length) {
let bestSymbolIndex = -1;
let bestSymbolLength = 0;
// Try to find longest matching symbol at current position
for (let symbolIndex = 0; symbolIndex < symbolLengths.length; symbolIndex++) {
const symbolLength = symbolLengths[symbolIndex];
const symbolOffset = symbolOffsets[symbolIndex];
// Check if symbol could fit and is longer than current best
if (pos + symbolLength <= uncompressedData.length && symbolLength > bestSymbolLength) {
// Check if bytes match
let matches = true;
for (let i = 0; i < symbolLength; i++) {
if (symbols[symbolOffset + i] !== uncompressedData[pos + i]) {
matches = false;
break;
}
}
if (matches) {
bestSymbolIndex = symbolIndex;
bestSymbolLength = symbolLength;
}
}
}
if (bestSymbolIndex !== -1) {
// Found a matching symbol
result.push(bestSymbolIndex);
pos += bestSymbolLength;
}
else {
// No match - emit escape sequence (255 followed by literal byte)
result.push(255);
result.push(uncompressedData[pos]);
pos++;
}
}
return new Uint8Array(result);
}
//# sourceMappingURL=fsstEncoder.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,68 @@
import IntWrapper from "../decoding/intWrapper";
export declare function encodeVarintInt32Value(value: number, dst: Uint8Array, offset: IntWrapper): void;
export declare function encodeVarintInt32(values: Uint32Array): Uint8Array;
export declare function encodeVarintInt64(values: BigUint64Array): Uint8Array;
export declare function encodeVarintFloat64(values: Float64Array): Uint8Array;
export declare function encodeFastPfor(values: Uint32Array): Uint8Array;
export declare function encodeZigZagInt32Value(value: number): number;
export declare function encodeZigZagInt64Value(value: bigint): bigint;
export declare function encodeZigZagFloat64Value(n: number): number;
export declare function encodeZigZagInt32(data: Int32Array): Uint32Array;
export declare function encodeZigZagInt64(data: BigInt64Array): BigUint64Array;
export declare function encodeZigZagFloat64(data: Float64Array): void;
export declare function encodeUnsignedRleInt32(input: Uint32Array): {
data: Uint32Array;
runs: number;
};
export declare function encodeUnsignedRleInt64(input: BigInt64Array): {
data: BigUint64Array;
runs: number;
};
export declare function encodeUnsignedRleFloat64(input: Float64Array): {
data: Float64Array;
runs: number;
};
export declare function encodeZigZagDeltaInt32(data: Int32Array): Uint32Array;
export declare function encodeZigZagDeltaInt64(data: BigInt64Array): BigUint64Array;
export declare function encodeZigZagDeltaFloat64(data: Float64Array): void;
export declare function encodeZigZagRleInt32(input: Int32Array): {
data: Uint32Array;
runs: number;
numTotalValues: number;
};
export declare function encodeZigZagRleInt64(input: BigInt64Array): {
data: BigUint64Array;
runs: number;
numTotalValues: number;
};
export declare function encodeZigZagRleFloat64(input: Float64Array): {
data: Float64Array;
runs: number;
numTotalValues: number;
};
/**
* This is not really a encode, but more of a decode method...
*/
export declare function encodeDeltaInt32(data: Int32Array | Uint32Array): void;
export declare function encodeComponentwiseDeltaVec2(data: Int32Array): Uint32Array;
export declare function encodeComponentwiseDeltaVec2Scaled(data: Int32Array, scale: number): Uint32Array;
export declare function encodeZigZagRleDeltaInt32(values: Int32Array | number[]): {
data: Uint32Array;
runs: number;
numTotalValues: number;
};
export declare function encodeRleDeltaInt32(values: Uint32Array | number[]): {
data: Uint32Array;
runs: number;
numTotalValues: number;
};
export declare function encodeDeltaRleInt32(input: Int32Array): {
data: Uint32Array;
runs: number;
numValues: number;
};
export declare function encodeDeltaRleInt64(input: BigInt64Array): {
data: BigUint64Array;
runs: number;
numValues: number;
};

View File

@@ -0,0 +1,671 @@
import IntWrapper from "../decoding/intWrapper";
import { createFastPforEncoderWorkspace, encodeFastPforInt32WithWorkspace } from "./fastPforEncoder";
import { encodeBigEndianInt32s } from "./bigEndianEncode";
export function encodeVarintInt32Value(value, dst, offset) {
let v = value;
while (v > 0x7f) {
dst[offset.get()] = (v & 0x7f) | 0x80;
offset.increment();
v >>>= 7;
}
dst[offset.get()] = v & 0x7f;
offset.increment();
}
export function encodeVarintInt32(values) {
const buffer = new Uint8Array(values.length * 5);
const offset = new IntWrapper(0);
for (const value of values) {
encodeVarintInt32Value(value, buffer, offset);
}
return buffer.slice(0, offset.get());
}
export function encodeVarintInt64(values) {
const buffer = new Uint8Array(values.length * 10);
const offset = new IntWrapper(0);
for (const value of values) {
encodeVarintInt64Value(value, buffer, offset);
}
return buffer.slice(0, offset.get());
}
function encodeVarintInt64Value(value, dst, offset) {
let v = value;
while (v > 0x7fn) {
dst[offset.get()] = Number(v & 0x7fn) | 0x80;
offset.increment();
v >>= 7n;
}
dst[offset.get()] = Number(v & 0x7fn);
offset.increment();
}
export function encodeVarintFloat64(values) {
// 1. Calculate the exact size required for the buffer
let size = 0;
for (let i = 0; i < values.length; i++) {
let val = values[i];
// Ensure we handle the value as a positive integer
val = val < 0 ? 0 : Math.floor(val);
// 0 always takes 1 byte
if (val === 0) {
size++;
continue;
}
// Calculate bytes needed: ceil(log128(val + 1))
while (val > 0) {
size++;
val = Math.floor(val / 128);
}
}
const dst = new Uint8Array(size);
const offset = new IntWrapper(0);
for (let i = 0; i < values.length; i++) {
encodeVarintFloat64Value(values[i], dst, offset);
}
return dst;
}
/**
* Encodes a single number into the buffer at the given offset using Varint encoding.
* Handles numbers up to 2^53 (MAX_SAFE_INTEGER) correctly.
*/
function encodeVarintFloat64Value(val, buf, offset) {
// Ensure integer
val = Math.floor(val);
// Handle 0 explicitly or ensure loop runs once
if (val === 0) {
buf[offset.get()] = 0;
offset.increment();
return;
}
while (val >= 128) {
// Write 7 bits of data | 0x80 (continuation bit)
buf[offset.get()] = (val % 128) | 0x80;
offset.increment();
// Shift right by 7 bits
val = Math.floor(val / 128);
}
// Write the last byte (no continuation bit)
buf[offset.get()] = val;
offset.increment();
}
export function encodeFastPfor(values) {
const encoderWorkspace = createFastPforEncoderWorkspace();
const encodedWords = encodeFastPforInt32WithWorkspace(values, encoderWorkspace);
return encodeBigEndianInt32s(encodedWords);
}
export function encodeZigZagInt32Value(value) {
return (value << 1) ^ (value >> 31);
}
export function encodeZigZagInt64Value(value) {
return (value << 1n) ^ (value >> 63n);
}
export function encodeZigZagFloat64Value(n) {
return n >= 0 ? n * 2 : n * -2 - 1;
}
export function encodeZigZagInt32(data) {
const result = new Uint32Array(data.length);
for (let i = 0; i < data.length; i++) {
result[i] = encodeZigZagInt32Value(data[i]);
}
return result;
}
export function encodeZigZagInt64(data) {
const result = new BigUint64Array(data.length);
for (let i = 0; i < data.length; i++) {
result[i] = encodeZigZagInt64Value(data[i]);
}
return result;
}
export function encodeZigZagFloat64(data) {
for (let i = 0; i < data.length; i++) {
data[i] = encodeZigZagFloat64Value(data[i]);
}
}
export function encodeUnsignedRleInt32(input) {
if (input.length === 0) {
return { data: new Uint32Array(0), runs: 0 };
}
const runLengths = [];
const runValues = [];
let currentRunLength = 0;
let currentValue = input[0];
for (let i = 0; i < input.length; i++) {
const nextValue = input[i];
if (nextValue === currentValue) {
currentRunLength++;
}
else {
// End of the current run, record it
runLengths.push(currentRunLength);
runValues.push(currentValue);
// Start a new run
currentValue = nextValue;
currentRunLength = 1;
}
}
// Record the final run after the loop finishes
runLengths.push(currentRunLength);
runValues.push(currentValue);
// Combine lengths and values into the final structured output array
const numRuns = runLengths.length;
const encodedData = new Uint32Array(numRuns * 2);
// Populate the first half with lengths
encodedData.set(runLengths, 0);
// Populate the second half with values, offset by the total number of runs
encodedData.set(runValues, numRuns);
return { data: encodedData, runs: numRuns };
}
export function encodeUnsignedRleInt64(input) {
if (input.length === 0) {
return { data: new BigUint64Array(0), runs: 0 };
}
const runLengths = [];
const runValues = [];
let currentRunLength = 0;
let currentValue = input[0];
for (let i = 0; i < input.length; i++) {
const nextValue = input[i];
if (nextValue === currentValue) {
currentRunLength++;
}
else {
// End of the current run, record it
runLengths.push(currentRunLength);
runValues.push(currentValue);
// Start a new run
currentValue = nextValue;
currentRunLength = 1;
}
}
// Record the final run after the loop finishes
runLengths.push(currentRunLength);
runValues.push(currentValue);
// Combine lengths and values into the final structured output array (BigUint64Array)
const numRuns = runLengths.length;
const encodedData = new BigUint64Array(numRuns * 2);
// Populate the first half with lengths, converting the run length numbers to bigint for storage in the BigUint64Array.
for (let i = 0; i < numRuns; i++) {
encodedData[i] = BigInt(runLengths[i]);
}
// Populate the second half with values, offset by the total number of runs
encodedData.set(runValues, numRuns);
return { data: encodedData, runs: numRuns };
}
export function encodeUnsignedRleFloat64(input) {
if (input.length === 0) {
return { data: new Float64Array(0), runs: 0 };
}
const runLengths = [];
const runValues = [];
let currentRunLength = 0;
let currentValue = input[0];
for (let i = 0; i < input.length; i++) {
const nextValue = input[i];
if (nextValue === currentValue) {
currentRunLength++;
}
else {
// End of the current run, record it
runLengths.push(currentRunLength);
runValues.push(currentValue);
// Start a new run
currentValue = nextValue;
currentRunLength = 1;
}
}
// Record the final run after the loop finishes
runLengths.push(currentRunLength);
runValues.push(currentValue);
// Combine lengths and values into the final structured output array (Float64Array)
const numRuns = runLengths.length;
// The final array is twice the size of the number of runs
const encodedData = new Float64Array(numRuns * 2);
// Populate the first half with lengths
encodedData.set(runLengths, 0);
// Populate the second half with values, offset by the total number of runs
encodedData.set(runValues, numRuns);
return { data: encodedData, runs: numRuns };
}
export function encodeZigZagDeltaInt32(data) {
if (data.length === 0) {
return new Uint32Array(0);
}
const encodedData = new Uint32Array(data.length);
let previousValue = data[0];
encodedData[0] = encodeZigZagInt32Value(previousValue);
for (let i = 1; i < data.length; i++) {
const currentValue = data[i];
const delta = currentValue - previousValue;
const encodedDelta = encodeZigZagInt32Value(delta);
// Store the encoded delta back into the array
encodedData[i] = encodedDelta;
// Update the previous value tracker for the next iteration's delta calculation
previousValue = currentValue;
}
return encodedData;
}
export function encodeZigZagDeltaInt64(data) {
if (data.length === 0) {
return new BigUint64Array(0);
}
const encodedData = new BigUint64Array(data.length);
let previousValue = data[0];
encodedData[0] = encodeZigZagInt64Value(previousValue);
for (let i = 1; i < data.length; i++) {
const currentValue = data[i];
const delta = currentValue - previousValue;
const encodedDelta = encodeZigZagInt64Value(delta);
// Store the encoded delta back into the array
encodedData[i] = encodedDelta;
// Update the previous value tracker for the next iteration's delta calculation
previousValue = currentValue;
}
return encodedData;
}
export function encodeZigZagDeltaFloat64(data) {
if (data.length === 0) {
return;
}
let previousValue = data[0];
data[0] = encodeZigZagFloat64Value(previousValue);
for (let i = 1; i < data.length; i++) {
const currentValue = data[i];
const delta = currentValue - previousValue;
const encodedDelta = encodeZigZagFloat64Value(delta);
// Store the encoded delta back into the array
data[i] = encodedDelta;
// Update the previous value tracker for the next iteration's delta calculation
previousValue = currentValue;
}
}
export function encodeZigZagRleInt32(input) {
if (input.length === 0) {
return { data: new Uint32Array(0), runs: 0, numTotalValues: 0 };
}
const zigzagEncodedStream = [];
// Step 1: Apply Zigzag Encoding to all values
for (let i = 0; i < input.length; i++) {
zigzagEncodedStream.push(encodeZigZagInt32Value(input[i]));
}
// zigzagEncodedStream now holds the intermediate stream of zigzag values
// Step 2: Apply RLE to the stream of zigzag-encoded values
const runLengths = [];
const runZigZagValues = [];
let currentRunLength = 0;
let currentValue = zigzagEncodedStream[0];
for (let i = 0; i < zigzagEncodedStream.length; i++) {
const nextValue = zigzagEncodedStream[i];
if (nextValue === currentValue) {
currentRunLength++;
}
else {
runLengths.push(currentRunLength);
runZigZagValues.push(currentValue);
currentValue = nextValue;
currentRunLength = 1;
}
}
// Record the final run
runLengths.push(currentRunLength);
runZigZagValues.push(currentValue);
// Step 3: Combine lengths and values into the final structured output array
const numRuns = runLengths.length;
// The final array uses Uint32Array for lengths AND values
const encodedData = new Uint32Array(numRuns * 2);
// Populate the first half with lengths
encodedData.set(runLengths, 0);
// Populate the second half with zigzagged values
encodedData.set(runZigZagValues, numRuns);
return {
data: encodedData,
runs: numRuns,
numTotalValues: input.length, // Total original values count
};
}
export function encodeZigZagRleInt64(input) {
if (input.length === 0) {
return { data: new BigUint64Array(0), runs: 0, numTotalValues: 0 };
}
const zigzagEncodedStream = [];
// Step 1: Apply Zigzag Encoding to all values
for (let i = 0; i < input.length; i++) {
zigzagEncodedStream.push(encodeZigZagInt64Value(input[i]));
}
// zigzagEncodedStream now holds the intermediate stream of zigzag values
// Step 2: Apply RLE to the stream of zigzag-encoded values
const runLengths = [];
const runZigZagValues = [];
let currentRunLength = 0;
let currentValue = zigzagEncodedStream[0];
for (let i = 0; i < zigzagEncodedStream.length; i++) {
const nextValue = zigzagEncodedStream[i];
if (nextValue === currentValue) {
currentRunLength++;
}
else {
runLengths.push(currentRunLength);
runZigZagValues.push(currentValue);
currentValue = nextValue;
currentRunLength = 1;
}
}
// Record the final run
runLengths.push(currentRunLength);
runZigZagValues.push(currentValue);
// Step 3: Combine lengths and values into the final structured output array
const numRuns = runLengths.length;
// The final array uses BigUint64Array for lengths AND values
const encodedData = new BigUint64Array(numRuns * 2);
// Populate the first half with lengths (converting numbers back to BigUint64Array format)
for (let i = 0; i < numRuns; i++) {
encodedData[i] = BigInt(runLengths[i]);
}
// Populate the second half with zigzagged values
encodedData.set(runZigZagValues, numRuns);
return {
data: encodedData,
runs: numRuns,
numTotalValues: input.length, // Total original values count
};
}
export function encodeZigZagRleFloat64(input) {
if (input.length === 0) {
return { data: new Float64Array(0), runs: 0, numTotalValues: 0 };
}
const zigzagEncodedStream = [];
// Step 1: Apply Float-based Zigzag Encoding to all values
for (let i = 0; i < input.length; i++) {
zigzagEncodedStream.push(encodeZigZagFloat64Value(input[i]));
}
// zigzagEncodedStream now holds the intermediate stream of zigzag values (as floats acting as integers)
// Step 2: Apply RLE to the stream of zigzag-encoded values
const runLengths = [];
const runZigZagValues = [];
let currentRunLength = 0;
let currentValue = zigzagEncodedStream[0];
for (let i = 0; i < zigzagEncodedStream.length; i++) {
const nextValue = zigzagEncodedStream[i];
if (nextValue === currentValue) {
currentRunLength++;
}
else {
runLengths.push(currentRunLength);
runZigZagValues.push(currentValue);
currentValue = nextValue;
currentRunLength = 1;
}
}
// Record the final run
runLengths.push(currentRunLength);
runZigZagValues.push(currentValue);
// Step 3: Combine lengths and values into the final structured output array
const numRuns = runLengths.length;
// The final array uses Float64Array for lengths AND values
const encodedData = new Float64Array(numRuns * 2);
// Populate the first half with lengths
encodedData.set(runLengths, 0);
// Populate the second half with zigzagged values
encodedData.set(runZigZagValues, numRuns);
return {
data: encodedData,
runs: numRuns,
numTotalValues: input.length, // Total original values count
};
}
/**
* This is not really a encode, but more of a decode method...
*/
export function encodeDeltaInt32(data) {
if (data.length === 0) {
return;
}
for (let i = data.length - 1; i >= 1; i--) {
data[i] = data[i] - data[i - 1];
}
}
export function encodeComponentwiseDeltaVec2(data) {
if (data.length < 2)
return new Uint32Array(data);
const encoded = new Uint32Array(data.length);
// Reverse iterate to avoid overwriting data needed for delta computation
for (let i = data.length - 2; i >= 2; i -= 2) {
const deltaX = data[i] - data[i - 2];
const deltaY = data[i + 1] - data[i - 1];
encoded[i] = encodeZigZagInt32Value(deltaX);
encoded[i + 1] = encodeZigZagInt32Value(deltaY);
}
// Encode first vertex last (after computing all deltas)
encoded[0] = encodeZigZagInt32Value(data[0]);
encoded[1] = encodeZigZagInt32Value(data[1]);
return encoded;
}
export function encodeComponentwiseDeltaVec2Scaled(data, scale) {
if (data.length < 2)
return new Uint32Array(data);
const encoded = new Uint32Array(data.length);
// First, inverse scale all values (tile space -> original space)
for (let i = 0; i < data.length; i++) {
encoded[i] = Math.round(data[i] / scale);
}
// Then apply componentwise delta encoding (same as non-scaled version)
// Reverse iterate to avoid overwriting data needed for delta computation
for (let i = encoded.length - 2; i >= 2; i -= 2) {
const deltaX = encoded[i] - encoded[i - 2];
const deltaY = encoded[i + 1] - encoded[i - 1];
encoded[i] = encodeZigZagInt32Value(deltaX);
encoded[i + 1] = encodeZigZagInt32Value(deltaY);
}
// Encode first vertex last (after computing all deltas)
encoded[0] = encodeZigZagInt32Value(encoded[0]);
encoded[1] = encodeZigZagInt32Value(encoded[1]);
return encoded;
}
// HM TODO:
// zigZagDeltaOfDeltaDecoding
export function encodeZigZagRleDeltaInt32(values) {
if (values.length === 0) {
return { data: new Uint32Array(0), runs: 0, numTotalValues: 0 };
}
const runLengths = [];
const encodedDeltas = [];
// The decoder explicitly sets decodedValues[0] = 0 and uses previousValue = 0.
// Therefore, we initialize our 'previous' tracker to 0 to calculate the first delta correctly.
let previousValue = 0;
// Variables to track the current run
let currentDelta = null;
let currentRunLength = 0;
for (let i = 0; i < values.length; i++) {
const value = values[i];
const delta = value - previousValue;
previousValue = value;
if (currentDelta === null) {
// First element initialization
currentDelta = delta;
currentRunLength = 1;
}
else if (delta === currentDelta) {
// Continuation of the current run
currentRunLength++;
}
else {
// The run has broken (delta changed)
// 1. Push the length of the previous run
runLengths.push(currentRunLength);
// 2. ZigZag encode the previous delta and push it
encodedDeltas.push(encodeZigZagInt32Value(currentDelta));
// Start the new run
currentDelta = delta;
currentRunLength = 1;
}
}
// Flush the final run remaining after the loop finishes
if (currentDelta !== null) {
runLengths.push(currentRunLength);
encodedDeltas.push(encodeZigZagInt32Value(currentDelta));
}
const numRuns = runLengths.length;
// The decoder expects 'data' to be: [RunLength 1, RunLength 2... | Value 1, Value 2...]
// Size is numRuns * 2 (First half lengths, second half values)
const data = new Uint32Array(numRuns * 2);
for (let i = 0; i < numRuns; i++) {
data[i] = runLengths[i]; // First half: Run Lengths
data[i + numRuns] = encodedDeltas[i]; // Second half: ZigZag Encoded Deltas
}
return {
data: data,
runs: numRuns,
numTotalValues: values.length,
};
}
export function encodeRleDeltaInt32(values) {
if (values.length === 0) {
return { data: new Uint32Array(0), runs: 0, numTotalValues: 0 };
}
const runLengths = [];
const deltas = [];
// The decoder logic relies on: decodedValues[0] = 0; previousValue = 0;
// So the encoder must assume the sequence starts relative to 0.
let previousValue = 0;
// Track the current run of deltas
let currentDelta = null;
let currentRunLength = 0;
for (let i = 0; i < values.length; i++) {
const value = values[i];
const delta = value - previousValue;
previousValue = value;
if (currentDelta === null) {
// Initialize first run
currentDelta = delta;
currentRunLength = 1;
}
else if (delta === currentDelta) {
// Continue current run
currentRunLength++;
}
else {
// Delta changed: flush the previous run
runLengths.push(currentRunLength);
deltas.push(currentDelta);
// Start new run
currentDelta = delta;
currentRunLength = 1;
}
}
// Flush the final run
if (currentDelta !== null) {
runLengths.push(currentRunLength);
deltas.push(currentDelta);
}
const numRuns = runLengths.length;
// Pack into Uint32Array: [ RunLength 1...N | Delta 1...N ]
const data = new Uint32Array(numRuns * 2);
for (let i = 0; i < numRuns; i++) {
data[i] = runLengths[i];
data[i + numRuns] = deltas[i];
}
return {
data: data,
runs: numRuns,
numTotalValues: values.length,
};
}
export function encodeDeltaRleInt32(input) {
if (input.length === 0) {
return { data: new Uint32Array(0), runs: 0, numValues: 0 };
}
const deltasAndEncoded = [];
let previousValue = 0;
// Step 1 & 2: Calculate Deltas and Zigzag Encode them
for (let i = 0; i < input.length; i++) {
const currentValue = input[i];
const delta = currentValue - previousValue;
const encodedDelta = encodeZigZagInt32Value(delta);
deltasAndEncoded.push(encodedDelta);
previousValue = currentValue;
}
// deltasAndEncoded now holds the intermediate stream of zigzagged deltas
// Step 3: Apply RLE to the stream of zigzag-encoded deltas
const runLengths = [];
const runZigZagDeltas = [];
let currentRunLength = 0;
let currentRunValue = deltasAndEncoded[0];
for (let i = 0; i < deltasAndEncoded.length; i++) {
const nextValue = deltasAndEncoded[i];
if (nextValue === currentRunValue) {
currentRunLength++;
}
else {
runLengths.push(currentRunLength);
runZigZagDeltas.push(currentRunValue);
currentRunValue = nextValue;
currentRunLength = 1;
}
}
// Record the final run
runLengths.push(currentRunLength);
runZigZagDeltas.push(currentRunValue);
// Step 4: Combine lengths and values into the final structured output array
const numRuns = runLengths.length;
const encodedData = new Uint32Array(numRuns * 2);
// Populate the first half with lengths
for (let i = 0; i < numRuns; i++) {
encodedData[i] = runLengths[i];
}
// Populate the second half with zigzagged deltas
// Uint32Array.set() works with standard number arrays
encodedData.set(runZigZagDeltas, numRuns);
return {
data: encodedData,
runs: numRuns,
numValues: input.length, // Total original values count
};
}
export function encodeDeltaRleInt64(input) {
if (input.length === 0) {
return { data: new BigUint64Array(0), runs: 0, numValues: 0 };
}
const deltasAndEncoded = [];
let previousValue = 0n;
// Step 1 & 2: Calculate Deltas and Zigzag Encode them
for (let i = 0; i < input.length; i++) {
const currentValue = input[i];
const delta = currentValue - previousValue;
const encodedDelta = encodeZigZagInt64Value(delta);
deltasAndEncoded.push(encodedDelta);
previousValue = currentValue;
}
// deltasAndEncoded now holds the intermediate stream of zigzagged deltas
// Step 3: Apply RLE to the stream of zigzag-encoded deltas
const runLengths = [];
const runZigZagDeltas = [];
let currentRunLength = 0;
let currentValue = deltasAndEncoded[0];
for (let i = 0; i < deltasAndEncoded.length; i++) {
const nextValue = deltasAndEncoded[i];
if (nextValue === currentValue) {
currentRunLength++;
}
else {
runLengths.push(currentRunLength);
runZigZagDeltas.push(currentValue);
currentValue = nextValue;
currentRunLength = 1;
}
}
// Record the final run
runLengths.push(currentRunLength);
runZigZagDeltas.push(currentValue);
// Step 4: Combine lengths and values into the final structured output array
const numRuns = runLengths.length;
const encodedData = new BigUint64Array(numRuns * 2);
// Populate the first half with lengths (converting numbers back to BigUint64Array for storage)
for (let i = 0; i < numRuns; i++) {
encodedData[i] = BigInt(runLengths[i]);
}
// Populate the second half with zigzagged deltas
encodedData.set(runZigZagDeltas, numRuns);
return {
data: encodedData,
runs: numRuns,
numValues: input.length, // Total original values count
};
}
//# sourceMappingURL=integerEncodingUtils.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,28 @@
import type { StreamMetadata } from "../metadata/tile/streamMetadataDecoder";
import type BitVector from "../vector/flat/bitVector";
import type GeometryScaling from "../decoding/geometryScaling";
export declare function encodeSignedInt32Stream(values: Int32Array, metadata: StreamMetadata, bitVector?: BitVector, scalingData?: GeometryScaling): Uint8Array;
export declare function encodeUnsignedInt32Stream(values: Uint32Array, metadata: StreamMetadata, bitVector?: BitVector, scalingData?: GeometryScaling): Uint8Array;
export declare function encodeFloat64(values: Float64Array, streamMetadata: StreamMetadata, isSigned: boolean): Float64Array;
/**
* Encodes BigInt64 values with zigzag encoding and varint compression
*/
export declare function encodeInt64SignedNone(values: BigInt64Array): Uint8Array;
/**
* Encodes BigInt64 values with delta encoding, zigzag, and varint
*/
export declare function encodeInt64SignedDelta(values: BigInt64Array): Uint8Array;
/**
* Encodes BigInt64 values with RLE, zigzag, and varint
* @param runs - Array of [runLength, value] pairs
*/
export declare function encodeInt64SignedRle(runs: Array<[number, bigint]>): Uint8Array;
/**
* Encodes BigInt64 values with delta+RLE, zigzag, and varint
* @param runs - Array of [runLength, deltaValue] pairs representing RLE-encoded delta values
*/
export declare function encodeInt64SignedDeltaRle(runs: Array<[number, bigint]>): Uint8Array;
/**
* Encodes unsigned BigInt64 values with varint compression (no zigzag)
*/
export declare function encodeInt64UnsignedNone(values: BigInt64Array): Uint8Array;

View File

@@ -0,0 +1,173 @@
import { LogicalLevelTechnique } from "../metadata/tile/logicalLevelTechnique";
import { encodeDeltaRleInt32, encodeZigZagInt32, encodeZigZagRleInt32, encodeUnsignedRleInt32, encodeDeltaInt32, encodeUnsignedRleFloat64, encodeZigZagDeltaFloat64, encodeZigZagFloat64, encodeZigZagRleFloat64, encodeVarintInt32, encodeVarintInt64, encodeZigZagInt64Value, encodeFastPfor, encodeComponentwiseDeltaVec2, encodeComponentwiseDeltaVec2Scaled, encodeZigZagDeltaInt32, } from "./integerEncodingUtils";
import { packNullable } from "./packNullableUtils";
import { PhysicalLevelTechnique } from "../metadata/tile/physicalLevelTechnique";
export function encodeSignedInt32Stream(values, metadata, bitVector, scalingData) {
const { data } = encodeSignedInt32(values, metadata, bitVector, scalingData);
return encodePhysicalLevelTechnique(data, metadata);
}
export function encodeUnsignedInt32Stream(values, metadata, bitVector, scalingData) {
const { data } = encodeUnsignedInt32(values, metadata, bitVector, scalingData);
return encodePhysicalLevelTechnique(data, metadata);
}
function encodePhysicalLevelTechnique(data, streamMetadata) {
const physicalLevelTechnique = streamMetadata.physicalLevelTechnique;
if (physicalLevelTechnique === PhysicalLevelTechnique.FAST_PFOR) {
return encodeFastPfor(data);
}
if (physicalLevelTechnique === PhysicalLevelTechnique.VARINT) {
return encodeVarintInt32(data);
}
if (physicalLevelTechnique === PhysicalLevelTechnique.NONE) {
const slice = data.subarray(0, streamMetadata.byteLength);
return new Uint8Array(slice);
}
throw new Error("Specified physicalLevelTechnique is not supported (yet).");
}
function encodeSignedInt32(values, streamMetadata, bitVector, scalingData) {
values = bitVector ? packNullable(values, bitVector) : new Int32Array(values);
let data;
switch (streamMetadata.logicalLevelTechnique1) {
case LogicalLevelTechnique.DELTA:
if (streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.RLE) {
const encoded = encodeDeltaRleInt32(values);
return { data: encoded.data, runs: encoded.runs };
}
else {
data = encodeZigZagDeltaInt32(values);
return { data };
}
case LogicalLevelTechnique.RLE: {
const encoded = encodeZigZagRleInt32(values);
return { data: encoded.data, runs: encoded.runs };
}
case LogicalLevelTechnique.MORTON:
encodeDeltaInt32(values);
data = new Uint32Array(values);
return { data };
case LogicalLevelTechnique.COMPONENTWISE_DELTA:
if (scalingData && !bitVector) {
const data = encodeComponentwiseDeltaVec2Scaled(values, scalingData.scale);
return { data };
}
data = encodeComponentwiseDeltaVec2(values);
return { data };
case LogicalLevelTechnique.NONE:
data = encodeZigZagInt32(values);
return { data };
default:
throw new Error(`The specified Logical level technique is not supported: ${streamMetadata.logicalLevelTechnique1}`);
}
}
function encodeUnsignedInt32(values, streamMetadata, bitVector, scalingData) {
values = bitVector ? packNullable(values, bitVector) : new Uint32Array(values);
let data;
switch (streamMetadata.logicalLevelTechnique1) {
case LogicalLevelTechnique.DELTA:
if (streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.RLE) {
const encoded = encodeDeltaRleInt32(new Int32Array(values.buffer, values.byteOffset, values.length));
return { data: encoded.data, runs: encoded.runs };
}
data = encodeZigZagDeltaInt32(new Int32Array(values.buffer, values.byteOffset, values.length));
return { data };
case LogicalLevelTechnique.RLE: {
const encoded = encodeUnsignedRleInt32(values);
return { data: encoded.data, runs: encoded.runs };
}
case LogicalLevelTechnique.MORTON:
encodeDeltaInt32(values);
data = values;
return { data };
case LogicalLevelTechnique.COMPONENTWISE_DELTA:
if (scalingData && !bitVector) {
const data = encodeComponentwiseDeltaVec2Scaled(new Int32Array(values), scalingData.scale);
return { data };
}
data = encodeComponentwiseDeltaVec2(new Int32Array(values));
return { data };
case LogicalLevelTechnique.NONE:
data = values;
return { data };
default:
throw new Error(`The specified Logical level technique is not supported: ${streamMetadata.logicalLevelTechnique1}`);
}
}
export function encodeFloat64(values, streamMetadata, isSigned) {
switch (streamMetadata.logicalLevelTechnique1) {
case LogicalLevelTechnique.DELTA:
encodeZigZagDeltaFloat64(values);
if (streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.RLE) {
values = encodeUnsignedRleFloat64(values).data;
}
return values;
case LogicalLevelTechnique.RLE:
return encodeRleFloat64(values, isSigned);
case LogicalLevelTechnique.NONE:
if (isSigned) {
encodeZigZagFloat64(values);
}
return values;
default:
throw new Error(`The specified Logical level technique is not supported: ${streamMetadata.logicalLevelTechnique1}`);
}
}
function encodeRleFloat64(data, isSigned) {
return isSigned ? encodeZigZagRleFloat64(data).data : encodeUnsignedRleFloat64(data).data;
}
/**
* Encodes BigInt64 values with zigzag encoding and varint compression
*/
export function encodeInt64SignedNone(values) {
const zigzagEncoded = new BigUint64Array(Array.from(values, (val) => encodeZigZagInt64Value(val)));
return encodeVarintInt64(zigzagEncoded);
}
/**
* Encodes BigInt64 values with delta encoding, zigzag, and varint
*/
export function encodeInt64SignedDelta(values) {
const deltaEncoded = new BigInt64Array(values.length);
deltaEncoded[0] = values[0];
for (let i = 1; i < values.length; i++) {
deltaEncoded[i] = values[i] - values[i - 1];
}
const zigzagEncoded = new BigUint64Array(deltaEncoded.length);
for (let i = 0; i < deltaEncoded.length; i++) {
zigzagEncoded[i] = encodeZigZagInt64Value(deltaEncoded[i]);
}
return encodeVarintInt64(zigzagEncoded);
}
/**
* Encodes BigInt64 values with RLE, zigzag, and varint
* @param runs - Array of [runLength, value] pairs
*/
export function encodeInt64SignedRle(runs) {
const runLengths = [];
const values = [];
for (const [runLength, value] of runs) {
runLengths.push(BigInt(runLength));
values.push(encodeZigZagInt64Value(value));
}
const rleValues = [...runLengths, ...values];
return encodeVarintInt64(new BigUint64Array(rleValues));
}
/**
* Encodes BigInt64 values with delta+RLE, zigzag, and varint
* @param runs - Array of [runLength, deltaValue] pairs representing RLE-encoded delta values
*/
export function encodeInt64SignedDeltaRle(runs) {
const runLengths = [];
const values = [];
for (const [runLength, value] of runs) {
runLengths.push(BigInt(runLength));
values.push(encodeZigZagInt64Value(value));
}
const rleValues = [...runLengths, ...values];
return encodeVarintInt64(new BigUint64Array(rleValues));
}
/**
* Encodes unsigned BigInt64 values with varint compression (no zigzag)
*/
export function encodeInt64UnsignedNone(values) {
return encodeVarintInt64(new BigUint64Array(values));
}
//# sourceMappingURL=integerStreamEncoder.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,4 @@
import type { TypedArrayInstance } from "../decoding/unpackNullableUtils";
import BitVector from "../vector/flat/bitVector";
export declare function packNullable<T extends TypedArrayInstance>(data: T, presentBits: BitVector | null): T;
export declare function packNullableBoolean(data: Uint8Array, dataSize: number, presentBits: BitVector | null): Uint8Array;

View File

@@ -0,0 +1,55 @@
import BitVector from "../vector/flat/bitVector";
export function packNullable(data, presentBits) {
// Non-nullable case: if no mask is provided, the data is already "packed"
if (!presentBits) {
return data;
}
const size = data.length;
// 1. First pass: Count how many elements are actually present
// This is required to allocate the correct size for the TypedArray
let packedCount = 0;
for (let i = 0; i < size; i++) {
if (presentBits.get(i)) {
packedCount++;
}
}
// 2. Create a new array of the same type with the reduced size
const constructor = data.constructor;
const result = new constructor(packedCount);
// 3. Second pass: Fill the result array with valid values
let counter = 0;
for (let i = 0; i < size; i++) {
if (presentBits.get(i)) {
result[counter++] = data[i];
}
}
return result;
}
export function packNullableBoolean(data, dataSize, presentBits) {
// Non-nullable case: if no mask is provided, the data is already "packed"
if (!presentBits) {
return data;
}
const inputBitVector = new BitVector(data, dataSize);
// 1. Calculate how many bits are actually marked as 'present'
// This determines the size of the final packed buffer.
let packedCount = 0;
for (let i = 0; i < dataSize; i++) {
if (presentBits.get(i)) {
packedCount++;
}
}
// 2. Initialize the result BitVector with the correct compressed size
const resultBuffer = new Uint8Array(Math.ceil(packedCount / 8));
const resultBitVector = new BitVector(resultBuffer, packedCount);
// 3. Fill the result: only copy bits where the mask is true
let targetIndex = 0;
for (let i = 0; i < dataSize; i++) {
if (presentBits.get(i)) {
const value = inputBitVector.get(i);
resultBitVector.set(targetIndex++, value);
}
}
return resultBitVector.getBuffer();
}
//# sourceMappingURL=packNullableUtils.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"packNullableUtils.js","sourceRoot":"","sources":["../../src/encoding/packNullableUtils.ts"],"names":[],"mappings":"AACA,OAAO,SAAS,MAAM,0BAA0B,CAAC;AAEjD,MAAM,UAAU,YAAY,CAA+B,IAAO,EAAE,WAA6B;IAC7F,0EAA0E;IAC1E,IAAI,CAAC,WAAW,EAAE,CAAC;QACf,OAAO,IAAI,CAAC;IAChB,CAAC;IAED,MAAM,IAAI,GAAG,IAAI,CAAC,MAAM,CAAC;IAEzB,8DAA8D;IAC9D,mEAAmE;IACnE,IAAI,WAAW,GAAG,CAAC,CAAC;IACpB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,EAAE,CAAC,EAAE,EAAE,CAAC;QAC5B,IAAI,WAAW,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC;YACrB,WAAW,EAAE,CAAC;QAClB,CAAC;IACL,CAAC;IAED,+DAA+D;IAC/D,MAAM,WAAW,GAAG,IAAI,CAAC,WAAoC,CAAC;IAC9D,MAAM,MAAM,GAAG,IAAI,WAAW,CAAC,WAAW,CAAM,CAAC;IAEjD,0DAA0D;IAC1D,IAAI,OAAO,GAAG,CAAC,CAAC;IAChB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,EAAE,CAAC,EAAE,EAAE,CAAC;QAC5B,IAAI,WAAW,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC;YACrB,MAAM,CAAC,OAAO,EAAE,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC;QAChC,CAAC;IACL,CAAC;IAED,OAAO,MAAM,CAAC;AAClB,CAAC;AAED,MAAM,UAAU,mBAAmB,CAAC,IAAgB,EAAE,QAAgB,EAAE,WAA6B;IACjG,0EAA0E;IAC1E,IAAI,CAAC,WAAW,EAAE,CAAC;QACf,OAAO,IAAI,CAAC;IAChB,CAAC;IAED,MAAM,cAAc,GAAG,IAAI,SAAS,CAAC,IAAI,EAAE,QAAQ,CAAC,CAAC;IAErD,8DAA8D;IAC9D,uDAAuD;IACvD,IAAI,WAAW,GAAG,CAAC,CAAC;IACpB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,QAAQ,EAAE,CAAC,EAAE,EAAE,CAAC;QAChC,IAAI,WAAW,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC;YACrB,WAAW,EAAE,CAAC;QAClB,CAAC;IACL,CAAC;IAED,sEAAsE;IACtE,MAAM,YAAY,GAAG,IAAI,UAAU,CAAC,IAAI,CAAC,IAAI,CAAC,WAAW,GAAG,CAAC,CAAC,CAAC,CAAC;IAChE,MAAM,eAAe,GAAG,IAAI,SAAS,CAAC,YAAY,EAAE,WAAW,CAAC,CAAC;IAEjE,4DAA4D;IAC5D,IAAI,WAAW,GAAG,CAAC,CAAC;IACpB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,QAAQ,EAAE,CAAC,EAAE,EAAE,CAAC;QAChC,IAAI,WAAW,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC;YACrB,MAAM,KAAK,GAAG,cAAc,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC;YACpC,eAAe,CAAC,GAAG,CAAC,WAAW,EAAE,EAAE,KAAK,CAAC,CAAC;QAC9C,CAAC;IACL,CAAC;IAED,OAAO,eAAe,CAAC,SAAS,EAAE,CAAC;AACvC,CAAC","sourcesContent":["import type { TypedArrayConstructor, TypedArrayInstance } from \"../decoding/unpackNullableUtils\";\nimport BitVector from \"../vector/flat/bitVector\";\n\nexport function packNullable<T extends TypedArrayInstance>(data: T, presentBits: BitVector | null): T {\n // Non-nullable case: if no mask is provided, the data is already \"packed\"\n if (!presentBits) {\n return data;\n }\n\n const size = data.length;\n\n // 1. First pass: Count how many elements are actually present\n // This is required to allocate the correct size for the TypedArray\n let packedCount = 0;\n for (let i = 0; i < size; i++) {\n if (presentBits.get(i)) {\n packedCount++;\n }\n }\n\n // 2. Create a new array of the same type with the reduced size\n const constructor = data.constructor as TypedArrayConstructor;\n const result = new constructor(packedCount) as T;\n\n // 3. Second pass: Fill the result array with valid values\n let counter = 0;\n for (let i = 0; i < size; i++) {\n if (presentBits.get(i)) {\n result[counter++] = data[i];\n }\n }\n\n return result;\n}\n\nexport function packNullableBoolean(data: Uint8Array, dataSize: number, presentBits: BitVector | null): Uint8Array {\n // Non-nullable case: if no mask is provided, the data is already \"packed\"\n if (!presentBits) {\n return data;\n }\n\n const inputBitVector = new BitVector(data, dataSize);\n\n // 1. Calculate how many bits are actually marked as 'present'\n // This determines the size of the final packed buffer.\n let packedCount = 0;\n for (let i = 0; i < dataSize; i++) {\n if (presentBits.get(i)) {\n packedCount++;\n }\n }\n\n // 2. Initialize the result BitVector with the correct compressed size\n const resultBuffer = new Uint8Array(Math.ceil(packedCount / 8));\n const resultBitVector = new BitVector(resultBuffer, packedCount);\n\n // 3. Fill the result: only copy bits where the mask is true\n let targetIndex = 0;\n for (let i = 0; i < dataSize; i++) {\n if (presentBits.get(i)) {\n const value = inputBitVector.get(i);\n resultBitVector.set(targetIndex++, value);\n }\n }\n\n return resultBitVector.getBuffer();\n}\n"]}

View File

@@ -0,0 +1,78 @@
/**
* Encodes INT_32 values with NONE encoding (no delta, no RLE)
*/
export declare function encodeInt32NoneColumn(values: Int32Array): Uint8Array;
/**
* Encodes INT_32 values with DELTA encoding
*/
export declare function encodeInt32DeltaColumn(values: Int32Array): Uint8Array;
/**
* Encodes INT_32 values with RLE encoding
* @param runs - Array of [runLength, value] pairs
*/
export declare function encodeInt32RleColumn(runs: Array<[number, number]>): Uint8Array;
/**
* Encodes INT_32 values with DELTA+RLE encoding
* @param runs - Array of [runLength, deltaValue] pairs, where first value is the base
*/
export declare function encodeInt32DeltaRleColumn(runs: Array<[number, number]>): Uint8Array;
/**
* Encodes nullable INT_32 values
*/
export declare function encodeInt32NullableColumn(values: (number | null)[]): Uint8Array;
/**
* Encodes UINT_32 values (no zigzag encoding)
*/
export declare function encodeUint32Column(values: Uint32Array): Uint8Array;
/**
* Encodes INT_64 values with NONE encoding
*/
export declare function encodeInt64NoneColumn(values: BigInt64Array): Uint8Array;
/**
* Encodes INT_64 values with DELTA encoding
*/
export declare function encodeInt64DeltaColumn(values: BigInt64Array): Uint8Array;
/**
* Encodes INT_64 values with RLE encoding
*/
export declare function encodeInt64RleColumn(runs: Array<[number, bigint]>): Uint8Array;
/**
* Encodes INT_64 values with DELTA+RLE encoding
*/
export declare function encodeInt64DeltaRleColumn(runs: Array<[number, bigint]>): Uint8Array;
/**
* Encodes nullable INT_64 values
*/
export declare function encodeInt64NullableColumn(values: (bigint | null)[]): Uint8Array;
/**
* Encodes UINT_64 values (no zigzag encoding)
*/
export declare function encodeUint64Column(values: BigUint64Array): Uint8Array;
/**
* Encodes nullable UINT_64 values
*/
export declare function encodeUint64NullableColumn(values: (bigint | null)[]): Uint8Array;
/**
* Encodes FLOAT values
*/
export declare function encodeFloatColumn(values: Float32Array): Uint8Array;
/**
* Encodes nullable FLOAT values
*/
export declare function encodeFloatNullableColumn(values: (number | null)[]): Uint8Array;
/**
* Encodes DOUBLE values
*/
export declare function encodeDoubleColumn(values: Float64Array): Uint8Array;
/**
* Encodes nullable DOUBLE values
*/
export declare function encodeDoubleNullableColumn(values: (number | null)[]): Uint8Array;
/**
* Encodes BOOLEAN values
*/
export declare function encodeBooleanColumn(values: boolean[]): Uint8Array;
/**
* Encodes nullable BOOLEAN values
*/
export declare function encodeBooleanNullableColumn(values: (boolean | null)[]): Uint8Array;

View File

@@ -0,0 +1,328 @@
import { LogicalLevelTechnique } from "../metadata/tile/logicalLevelTechnique";
import { PhysicalLevelTechnique } from "../metadata/tile/physicalLevelTechnique";
import { PhysicalStreamType } from "../metadata/tile/physicalStreamType";
import { DictionaryType } from "../metadata/tile/dictionaryType";
import IntWrapper from "../decoding/intWrapper";
import { encodeBooleanRle, encodeFloatsLE, encodeDoubleLE } from "./encodingUtils";
import { encodeVarintInt32Value, encodeVarintInt32, encodeVarintInt64, encodeZigZagInt32Value, encodeZigZagInt64Value, encodeZigZagInt32, } from "./integerEncodingUtils";
/**
* Encodes INT_32 values with NONE encoding (no delta, no RLE)
*/
export function encodeInt32NoneColumn(values) {
const zigzagEncoded = encodeZigZagInt32(values);
const encodedData = encodeVarintInt32(zigzagEncoded);
const streamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, values.length);
return buildEncodedStream(streamMetadata, encodedData);
}
/**
* Encodes INT_32 values with DELTA encoding
*/
export function encodeInt32DeltaColumn(values) {
// Delta encode: store deltas
const deltaEncoded = new Int32Array(values.length);
deltaEncoded[0] = values[0];
for (let i = 1; i < values.length; i++) {
deltaEncoded[i] = values[i] - values[i - 1];
}
const zigzagEncoded = encodeZigZagInt32(deltaEncoded);
const encodedData = encodeVarintInt32(zigzagEncoded);
const streamMetadata = createStreamMetadata(LogicalLevelTechnique.DELTA, LogicalLevelTechnique.NONE, values.length);
return buildEncodedStream(streamMetadata, encodedData);
}
/**
* Encodes INT_32 values with RLE encoding
* @param runs - Array of [runLength, value] pairs
*/
export function encodeInt32RleColumn(runs) {
const runLengths = [];
const values = [];
let totalValues = 0;
for (const [runLength, value] of runs) {
runLengths.push(runLength);
values.push(encodeZigZagInt32Value(value));
totalValues += runLength;
}
const rleValues = [...runLengths, ...values];
const encodedData = encodeVarintInt32(new Uint32Array(rleValues));
const streamMetadata = createRleMetadata(LogicalLevelTechnique.RLE, LogicalLevelTechnique.NONE, runs.length, totalValues);
return buildEncodedStream(streamMetadata, encodedData);
}
/**
* Encodes INT_32 values with DELTA+RLE encoding
* @param runs - Array of [runLength, deltaValue] pairs, where first value is the base
*/
export function encodeInt32DeltaRleColumn(runs) {
const runLengths = [];
const values = [];
let totalValues = 0;
for (const [runLength, value] of runs) {
runLengths.push(runLength);
values.push(encodeZigZagInt32Value(value));
totalValues += runLength;
}
const rleValues = [...runLengths, ...values];
const encodedData = encodeVarintInt32(new Uint32Array(rleValues));
const streamMetadata = createRleMetadata(LogicalLevelTechnique.DELTA, LogicalLevelTechnique.RLE, runs.length, totalValues);
return buildEncodedStream(streamMetadata, encodedData);
}
/**
* Encodes nullable INT_32 values
*/
export function encodeInt32NullableColumn(values) {
const nonNullValues = values.filter((v) => v !== null);
const zigzagEncoded = new Uint32Array(nonNullValues.map((v) => encodeZigZagInt32Value(v)));
const encodedData = encodeVarintInt32(zigzagEncoded);
const dataStreamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nonNullValues.length);
const dataStream = buildEncodedStream(dataStreamMetadata, encodedData);
// Nullability stream
const nullabilityValues = values.map((v) => v !== null);
const nullabilityEncoded = encodeBooleanRle(nullabilityValues);
const nullabilityMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nullabilityValues.length);
const nullabilityStream = buildEncodedStream(nullabilityMetadata, nullabilityEncoded);
return concatenateBuffers(nullabilityStream, dataStream);
}
/**
* Encodes UINT_32 values (no zigzag encoding)
*/
export function encodeUint32Column(values) {
const encodedData = encodeVarintInt32(values);
const streamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, values.length);
return buildEncodedStream(streamMetadata, encodedData);
}
/**
* Encodes INT_64 values with NONE encoding
*/
export function encodeInt64NoneColumn(values) {
const zigzagEncoded = new BigUint64Array(Array.from(values, (val) => encodeZigZagInt64Value(val)));
const encodedData = encodeVarintInt64(zigzagEncoded);
const streamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, values.length);
return buildEncodedStream(streamMetadata, encodedData);
}
/**
* Encodes INT_64 values with DELTA encoding
*/
export function encodeInt64DeltaColumn(values) {
const deltaEncoded = new BigInt64Array(values.length);
deltaEncoded[0] = values[0];
for (let i = 1; i < values.length; i++) {
deltaEncoded[i] = values[i] - values[i - 1];
}
const zigzagEncoded = new BigUint64Array(deltaEncoded.length);
for (let i = 0; i < deltaEncoded.length; i++) {
zigzagEncoded[i] = encodeZigZagInt64Value(deltaEncoded[i]);
}
const encodedData = encodeVarintInt64(zigzagEncoded);
const streamMetadata = createStreamMetadata(LogicalLevelTechnique.DELTA, LogicalLevelTechnique.NONE, values.length);
return buildEncodedStream(streamMetadata, encodedData);
}
/**
* Encodes INT_64 values with RLE encoding
*/
export function encodeInt64RleColumn(runs) {
const runLengths = [];
const values = [];
let totalValues = 0;
for (const [runLength, value] of runs) {
runLengths.push(BigInt(runLength));
values.push(encodeZigZagInt64Value(value));
totalValues += runLength;
}
const rleValues = [...runLengths, ...values];
const encodedData = encodeVarintInt64(new BigUint64Array(rleValues));
const streamMetadata = createRleMetadata(LogicalLevelTechnique.RLE, LogicalLevelTechnique.NONE, runs.length, totalValues);
return buildEncodedStream(streamMetadata, encodedData);
}
/**
* Encodes INT_64 values with DELTA+RLE encoding
*/
export function encodeInt64DeltaRleColumn(runs) {
const runLengths = [];
const values = [];
let totalValues = 0;
for (const [runLength, value] of runs) {
runLengths.push(BigInt(runLength));
values.push(encodeZigZagInt64Value(value));
totalValues += runLength;
}
const rleValues = [...runLengths, ...values];
const encodedData = encodeVarintInt64(new BigUint64Array(rleValues));
const streamMetadata = createRleMetadata(LogicalLevelTechnique.DELTA, LogicalLevelTechnique.RLE, runs.length, totalValues);
return buildEncodedStream(streamMetadata, encodedData);
}
/**
* Encodes nullable INT_64 values
*/
export function encodeInt64NullableColumn(values) {
const nonNullValues = values.filter((v) => v !== null);
const zigzagEncoded = new BigUint64Array(Array.from(nonNullValues, (val) => encodeZigZagInt64Value(val)));
const encodedData = encodeVarintInt64(zigzagEncoded);
const dataStreamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nonNullValues.length);
const dataStream = buildEncodedStream(dataStreamMetadata, encodedData);
const nullabilityValues = values.map((v) => v !== null);
const nullabilityEncoded = encodeBooleanRle(nullabilityValues);
const nullabilityMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nullabilityValues.length);
const nullabilityStream = buildEncodedStream(nullabilityMetadata, nullabilityEncoded);
return concatenateBuffers(nullabilityStream, dataStream);
}
/**
* Encodes UINT_64 values (no zigzag encoding)
*/
export function encodeUint64Column(values) {
const encodedData = encodeVarintInt64(values);
const streamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, values.length);
return buildEncodedStream(streamMetadata, encodedData);
}
/**
* Encodes nullable UINT_64 values
*/
export function encodeUint64NullableColumn(values) {
const nonNullValues = values.filter((v) => v !== null);
const encodedData = encodeVarintInt64(new BigUint64Array(nonNullValues));
const dataStreamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nonNullValues.length);
const dataStream = buildEncodedStream(dataStreamMetadata, encodedData);
const nullabilityValues = values.map((v) => v !== null);
const nullabilityEncoded = encodeBooleanRle(nullabilityValues);
const nullabilityMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nullabilityValues.length);
const nullabilityStream = buildEncodedStream(nullabilityMetadata, nullabilityEncoded);
return concatenateBuffers(nullabilityStream, dataStream);
}
/**
* Encodes FLOAT values
*/
export function encodeFloatColumn(values) {
const encodedData = encodeFloatsLE(values);
const streamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, values.length);
return buildEncodedStream(streamMetadata, encodedData);
}
/**
* Encodes nullable FLOAT values
*/
export function encodeFloatNullableColumn(values) {
const nonNullValues = values.filter((v) => v !== null);
const encodedData = encodeFloatsLE(new Float32Array(nonNullValues));
const dataStreamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nonNullValues.length);
const dataStream = buildEncodedStream(dataStreamMetadata, encodedData);
const nullabilityValues = values.map((v) => v !== null);
const nullabilityEncoded = encodeBooleanRle(nullabilityValues);
const nullabilityMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nullabilityValues.length);
const nullabilityStream = buildEncodedStream(nullabilityMetadata, nullabilityEncoded);
return concatenateBuffers(nullabilityStream, dataStream);
}
/**
* Encodes DOUBLE values
*/
export function encodeDoubleColumn(values) {
const encodedData = encodeDoubleLE(values);
const streamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, values.length);
return buildEncodedStream(streamMetadata, encodedData);
}
/**
* Encodes nullable DOUBLE values
*/
export function encodeDoubleNullableColumn(values) {
const nonNullValues = values.filter((v) => v !== null);
const encodedData = encodeDoubleLE(new Float64Array(nonNullValues));
const dataStreamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nonNullValues.length);
const dataStream = buildEncodedStream(dataStreamMetadata, encodedData);
const nullabilityValues = values.map((v) => v !== null);
const nullabilityEncoded = encodeBooleanRle(nullabilityValues);
const nullabilityMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nullabilityValues.length);
const nullabilityStream = buildEncodedStream(nullabilityMetadata, nullabilityEncoded);
return concatenateBuffers(nullabilityStream, dataStream);
}
/**
* Encodes BOOLEAN values
*/
export function encodeBooleanColumn(values) {
const encodedData = encodeBooleanRle(values);
const streamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, values.length);
return buildEncodedStream(streamMetadata, encodedData);
}
/**
* Encodes nullable BOOLEAN values
*/
export function encodeBooleanNullableColumn(values) {
const nonNullValues = values.filter((v) => v !== null);
const encodedData = encodeBooleanRle(nonNullValues);
const dataStreamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nonNullValues.length);
const dataStream = buildEncodedStream(dataStreamMetadata, encodedData);
const nullabilityValues = values.map((v) => v !== null);
const nullabilityEncoded = encodeBooleanRle(nullabilityValues);
const nullabilityMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nullabilityValues.length);
const nullabilityStream = buildEncodedStream(nullabilityMetadata, nullabilityEncoded);
return concatenateBuffers(nullabilityStream, dataStream);
}
function createStreamMetadata(logicalTechnique1, logicalTechnique2 = LogicalLevelTechnique.NONE, numValues = 3) {
return {
physicalStreamType: PhysicalStreamType.DATA,
logicalStreamType: { dictionaryType: DictionaryType.NONE },
logicalLevelTechnique1: logicalTechnique1,
logicalLevelTechnique2: logicalTechnique2,
physicalLevelTechnique: PhysicalLevelTechnique.VARINT,
numValues,
byteLength: 10,
decompressedCount: numValues,
};
}
function createRleMetadata(logicalTechnique1, logicalTechnique2, runs, numRleValues) {
return {
physicalStreamType: PhysicalStreamType.DATA,
logicalStreamType: { dictionaryType: DictionaryType.NONE },
logicalLevelTechnique1: logicalTechnique1,
logicalLevelTechnique2: logicalTechnique2,
physicalLevelTechnique: PhysicalLevelTechnique.VARINT,
numValues: runs * 2,
byteLength: 10,
decompressedCount: numRleValues,
runs,
numRleValues,
};
}
function buildEncodedStream(streamMetadata, encodedData) {
const updatedMetadata = {
...streamMetadata,
byteLength: encodedData.length,
};
const metadataBuffer = encodeStreamMetadata(updatedMetadata);
const result = new Uint8Array(metadataBuffer.length + encodedData.length);
result.set(metadataBuffer, 0);
result.set(encodedData, metadataBuffer.length);
return result;
}
function encodeStreamMetadata(metadata) {
const buffer = new Uint8Array(100);
let writeOffset = 0;
// Byte 1: Stream type
const physicalTypeIndex = Object.values(PhysicalStreamType).indexOf(metadata.physicalStreamType);
const lowerNibble = 0; // For DATA stream with NONE dictionary type
buffer[writeOffset++] = (physicalTypeIndex << 4) | lowerNibble;
// Byte 2: Encoding techniques
const llt1Index = Object.values(LogicalLevelTechnique).indexOf(metadata.logicalLevelTechnique1);
const llt2Index = Object.values(LogicalLevelTechnique).indexOf(metadata.logicalLevelTechnique2);
const pltIndex = Object.values(PhysicalLevelTechnique).indexOf(metadata.physicalLevelTechnique);
buffer[writeOffset++] = (llt1Index << 5) | (llt2Index << 2) | pltIndex;
// Variable-length fields
const offset = new IntWrapper(writeOffset);
encodeVarintInt32Value(metadata.numValues, buffer, offset);
encodeVarintInt32Value(metadata.byteLength, buffer, offset);
// RLE-specific fields
if (isRleMetadata(metadata)) {
encodeVarintInt32Value(metadata.runs, buffer, offset);
encodeVarintInt32Value(metadata.numRleValues, buffer, offset);
}
return buffer.slice(0, offset.get());
}
function isRleMetadata(metadata) {
return "runs" in metadata && "numRleValues" in metadata;
}
function concatenateBuffers(...buffers) {
const totalLength = buffers.reduce((sum, buf) => sum + buf.length, 0);
const result = new Uint8Array(totalLength);
let offset = 0;
for (const buffer of buffers) {
result.set(buffer, offset);
offset += buffer.length;
}
return result;
}
//# sourceMappingURL=propertyEncoder.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,12 @@
/**
* Encodes plain strings into a complete stream with PRESENT (if needed), LENGTH, and DATA streams.
* @param strings - Array of strings (can include null values)
* @returns Encoded Uint8Array that can be passed to decodeString
*/
export declare function encodePlainStrings(strings: (string | null)[]): Uint8Array;
/**
* Encodes dictionary-compressed strings into a complete stream.
* @param strings - Array of strings (can include null values)
* @returns Encoded Uint8Array that can be passed to decodeString
*/
export declare function encodeDictionaryStrings(strings: (string | null)[]): Uint8Array;

View File

@@ -0,0 +1,149 @@
import { PhysicalStreamType } from "../metadata/tile/physicalStreamType";
import { DictionaryType } from "../metadata/tile/dictionaryType";
import { LengthType } from "../metadata/tile/lengthType";
import { OffsetType } from "../metadata/tile/offsetType";
import { PhysicalLevelTechnique } from "../metadata/tile/physicalLevelTechnique";
import { LogicalLevelTechnique } from "../metadata/tile/logicalLevelTechnique";
import IntWrapper from "../decoding/intWrapper";
import { encodeBooleanRle, encodeStrings, createStringLengths, concatenateBuffers } from "./encodingUtils";
import { encodeVarintInt32Value, encodeVarintInt32 } from "./integerEncodingUtils";
/**
* Encodes plain strings into a complete stream with PRESENT (if needed), LENGTH, and DATA streams.
* @param strings - Array of strings (can include null values)
* @returns Encoded Uint8Array that can be passed to decodeString
*/
export function encodePlainStrings(strings) {
const hasNull = strings.some((s) => s === null);
const nonNullStrings = strings.filter((s) => s !== null);
const stringBytes = encodeStrings(nonNullStrings);
const streams = [];
// Add PRESENT stream if nulls exist
if (hasNull) {
const nullabilityValues = strings.map((s) => s !== null);
streams.push(createStream(PhysicalStreamType.PRESENT, encodeBooleanRle(nullabilityValues), {
technique: PhysicalLevelTechnique.VARINT,
count: nullabilityValues.length,
}));
}
// Add LENGTH stream
const lengths = createStringLengths(nonNullStrings);
streams.push(createStream(PhysicalStreamType.LENGTH, encodeVarintInt32(lengths), {
logical: { lengthType: LengthType.VAR_BINARY },
technique: PhysicalLevelTechnique.VARINT,
count: lengths.length,
}));
// Add DATA stream
streams.push(createStream(PhysicalStreamType.DATA, stringBytes, {
logical: { dictionaryType: DictionaryType.NONE },
}));
return concatenateBuffers(...streams);
}
/**
* Encodes dictionary-compressed strings into a complete stream.
* @param strings - Array of strings (can include null values)
* @returns Encoded Uint8Array that can be passed to decodeString
*/
export function encodeDictionaryStrings(strings) {
const hasNull = strings.some((s) => s === null);
const nonNullStrings = strings.filter((s) => s !== null);
// Create dictionary of unique strings
const uniqueStrings = Array.from(new Set(nonNullStrings));
const stringMap = new Map(uniqueStrings.map((s, i) => [s, i]));
const offsets = nonNullStrings.map((s) => {
const offset = stringMap.get(s);
if (offset === undefined) {
throw new Error(`String not found in dictionary: ${s}`);
}
return offset;
});
const stringBytes = encodeStrings(uniqueStrings);
const lengths = createStringLengths(uniqueStrings);
const streams = [];
// Add PRESENT stream if nulls exist
if (hasNull) {
const nullabilityValues = strings.map((s) => s !== null);
streams.push(createStream(PhysicalStreamType.PRESENT, encodeBooleanRle(nullabilityValues), {
technique: PhysicalLevelTechnique.VARINT,
count: nullabilityValues.length,
}));
}
// Add OFFSET stream
streams.push(createStream(PhysicalStreamType.OFFSET, encodeVarintInt32(new Uint32Array(offsets)), {
logical: { offsetType: OffsetType.STRING },
technique: PhysicalLevelTechnique.VARINT,
count: offsets.length,
}));
// Add LENGTH stream (for dictionary)
streams.push(createStream(PhysicalStreamType.LENGTH, encodeVarintInt32(lengths), {
logical: { lengthType: LengthType.DICTIONARY },
technique: PhysicalLevelTechnique.VARINT,
count: lengths.length,
}));
// Add DATA stream
streams.push(createStream(PhysicalStreamType.DATA, stringBytes, {
logical: { dictionaryType: DictionaryType.SINGLE },
}));
return concatenateBuffers(...streams);
}
function createStream(physicalType, data, options = {}) {
const count = options.count ?? 0;
return buildEncodedStream({
physicalStreamType: physicalType,
logicalStreamType: options.logical ?? {},
logicalLevelTechnique1: LogicalLevelTechnique.NONE,
logicalLevelTechnique2: LogicalLevelTechnique.NONE,
physicalLevelTechnique: options.technique ?? PhysicalLevelTechnique.NONE,
numValues: count,
byteLength: data.length,
decompressedCount: count,
}, data);
}
function buildEncodedStream(streamMetadata, encodedData) {
const updatedMetadata = {
...streamMetadata,
byteLength: encodedData.length,
};
const metadataBuffer = encodeStreamMetadata(updatedMetadata);
const result = new Uint8Array(metadataBuffer.length + encodedData.length);
result.set(metadataBuffer, 0);
result.set(encodedData, metadataBuffer.length);
return result;
}
function encodeStreamMetadata(metadata) {
const buffer = new Uint8Array(100);
let writeOffset = 0;
// Byte 1: Stream type
const physicalTypeIndex = Object.values(PhysicalStreamType).indexOf(metadata.physicalStreamType);
const lowerNibble = getLogicalSubtypeValue(metadata);
buffer[writeOffset++] = (physicalTypeIndex << 4) | lowerNibble;
// Byte 2: Encoding techniques
const llt1Index = Object.values(LogicalLevelTechnique).indexOf(metadata.logicalLevelTechnique1);
const llt2Index = Object.values(LogicalLevelTechnique).indexOf(metadata.logicalLevelTechnique2);
const pltIndex = Object.values(PhysicalLevelTechnique).indexOf(metadata.physicalLevelTechnique);
buffer[writeOffset++] = (llt1Index << 5) | (llt2Index << 2) | pltIndex;
// Variable-length fields
const offset = new IntWrapper(writeOffset);
encodeVarintInt32Value(metadata.numValues, buffer, offset);
encodeVarintInt32Value(metadata.byteLength, buffer, offset);
return buffer.slice(0, offset.get());
}
function getLogicalSubtypeValue(metadata) {
const { physicalStreamType, logicalStreamType } = metadata;
switch (physicalStreamType) {
case PhysicalStreamType.DATA:
return logicalStreamType.dictionaryType !== undefined
? Object.values(DictionaryType).indexOf(logicalStreamType.dictionaryType)
: 0;
case PhysicalStreamType.OFFSET:
return logicalStreamType.offsetType !== undefined
? Object.values(OffsetType).indexOf(logicalStreamType.offsetType)
: 0;
case PhysicalStreamType.LENGTH:
return logicalStreamType.lengthType !== undefined
? Object.values(LengthType).indexOf(logicalStreamType.lengthType)
: 0;
default:
return 0;
}
}
//# sourceMappingURL=stringEncoder.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1 @@
export declare function encodeZOrderCurve(x: number, y: number, numBits: number, coordinateShift: number): number;

View File

@@ -0,0 +1,10 @@
export function encodeZOrderCurve(x, y, numBits, coordinateShift) {
const shiftedX = x + coordinateShift;
const shiftedY = y + coordinateShift;
let code = 0;
for (let i = 0; i < numBits; i++) {
code |= ((shiftedX & (1 << i)) << i) | ((shiftedY & (1 << i)) << (i + 1));
}
return code;
}
//# sourceMappingURL=zOrderCurveEncoder.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"zOrderCurveEncoder.js","sourceRoot":"","sources":["../../src/encoding/zOrderCurveEncoder.ts"],"names":[],"mappings":"AAAA,MAAM,UAAU,iBAAiB,CAAC,CAAS,EAAE,CAAS,EAAE,OAAe,EAAE,eAAuB;IAC5F,MAAM,QAAQ,GAAG,CAAC,GAAG,eAAe,CAAC;IACrC,MAAM,QAAQ,GAAG,CAAC,GAAG,eAAe,CAAC;IACrC,IAAI,IAAI,GAAG,CAAC,CAAC;IACb,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,OAAO,EAAE,CAAC,EAAE,EAAE,CAAC;QAC/B,IAAI,IAAI,CAAC,CAAC,QAAQ,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC;IAC9E,CAAC;IACD,OAAO,IAAI,CAAC;AAChB,CAAC","sourcesContent":["export function encodeZOrderCurve(x: number, y: number, numBits: number, coordinateShift: number): number {\n const shiftedX = x + coordinateShift;\n const shiftedY = y + coordinateShift;\n let code = 0;\n for (let i = 0; i < numBits; i++) {\n code |= ((shiftedX & (1 << i)) << i) | ((shiftedY & (1 << i)) << (i + 1));\n }\n return code;\n}\n"]}

9
node_modules/@maplibre/mlt/dist/index.d.ts generated vendored Normal file
View File

@@ -0,0 +1,9 @@
export { default as decodeTile } from "./mltDecoder";
export { default as FeatureTable } from "./vector/featureTable";
export { GeometryVector } from "./vector/geometry/geometryVector";
export { GpuVector } from "./vector/geometry/gpuVector";
export { default as GeometryScaling } from "./decoding/geometryScaling";
export { GEOMETRY_TYPE } from "./vector/geometry/geometryType";
export type { TileSetMetadata } from "./metadata/tileset/tilesetMetadata";
export type { Geometry } from "./vector/geometry/geometryVector";
export type { Feature } from "./vector/featureTable";

6
node_modules/@maplibre/mlt/dist/index.js generated vendored Normal file
View File

@@ -0,0 +1,6 @@
export { default as decodeTile } from "./mltDecoder";
export { default as FeatureTable } from "./vector/featureTable";
export { GeometryVector } from "./vector/geometry/geometryVector";
export { GpuVector } from "./vector/geometry/gpuVector";
export { GEOMETRY_TYPE } from "./vector/geometry/geometryType";
//# sourceMappingURL=index.js.map

1
node_modules/@maplibre/mlt/dist/index.js.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,OAAO,IAAI,UAAU,EAAE,MAAM,cAAc,CAAC;AACrD,OAAO,EAAE,OAAO,IAAI,YAAY,EAAE,MAAM,uBAAuB,CAAC;AAChE,OAAO,EAAE,cAAc,EAAE,MAAM,kCAAkC,CAAC;AAClE,OAAO,EAAE,SAAS,EAAE,MAAM,6BAA6B,CAAC;AAExD,OAAO,EAAE,aAAa,EAAE,MAAM,gCAAgC,CAAC","sourcesContent":["export { default as decodeTile } from \"./mltDecoder\";\nexport { default as FeatureTable } from \"./vector/featureTable\";\nexport { GeometryVector } from \"./vector/geometry/geometryVector\";\nexport { GpuVector } from \"./vector/geometry/gpuVector\";\nexport { default as GeometryScaling } from \"./decoding/geometryScaling\";\nexport { GEOMETRY_TYPE } from \"./vector/geometry/geometryType\";\nexport type { TileSetMetadata } from \"./metadata/tileset/tilesetMetadata\";\nexport type { Geometry } from \"./vector/geometry/geometryVector\";\nexport type { Feature } from \"./vector/featureTable\";\n"]}

View File

@@ -0,0 +1,8 @@
export declare enum DictionaryType {
NONE = "NONE",
SINGLE = "SINGLE",
SHARED = "SHARED",
VERTEX = "VERTEX",
MORTON = "MORTON",
FSST = "FSST"
}

View File

@@ -0,0 +1,10 @@
export var DictionaryType;
(function (DictionaryType) {
DictionaryType["NONE"] = "NONE";
DictionaryType["SINGLE"] = "SINGLE";
DictionaryType["SHARED"] = "SHARED";
DictionaryType["VERTEX"] = "VERTEX";
DictionaryType["MORTON"] = "MORTON";
DictionaryType["FSST"] = "FSST";
})(DictionaryType || (DictionaryType = {}));
//# sourceMappingURL=dictionaryType.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"dictionaryType.js","sourceRoot":"","sources":["../../../src/metadata/tile/dictionaryType.ts"],"names":[],"mappings":"AAAA,MAAM,CAAN,IAAY,cAOX;AAPD,WAAY,cAAc;IACtB,+BAAa,CAAA;IACb,mCAAiB,CAAA;IACjB,mCAAiB,CAAA;IACjB,mCAAiB,CAAA;IACjB,mCAAiB,CAAA;IACjB,+BAAa,CAAA;AACjB,CAAC,EAPW,cAAc,KAAd,cAAc,QAOzB","sourcesContent":["export enum DictionaryType {\n NONE = \"NONE\",\n SINGLE = \"SINGLE\",\n SHARED = \"SHARED\",\n VERTEX = \"VERTEX\",\n MORTON = \"MORTON\",\n FSST = \"FSST\",\n}\n"]}

View File

@@ -0,0 +1,9 @@
export declare enum LengthType {
VAR_BINARY = "VAR_BINARY",
GEOMETRIES = "GEOMETRIES",
PARTS = "PARTS",
RINGS = "RINGS",
TRIANGLES = "TRIANGLES",
SYMBOL = "SYMBOL",
DICTIONARY = "DICTIONARY"
}

View File

@@ -0,0 +1,11 @@
export var LengthType;
(function (LengthType) {
LengthType["VAR_BINARY"] = "VAR_BINARY";
LengthType["GEOMETRIES"] = "GEOMETRIES";
LengthType["PARTS"] = "PARTS";
LengthType["RINGS"] = "RINGS";
LengthType["TRIANGLES"] = "TRIANGLES";
LengthType["SYMBOL"] = "SYMBOL";
LengthType["DICTIONARY"] = "DICTIONARY";
})(LengthType || (LengthType = {}));
//# sourceMappingURL=lengthType.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"lengthType.js","sourceRoot":"","sources":["../../../src/metadata/tile/lengthType.ts"],"names":[],"mappings":"AAAA,MAAM,CAAN,IAAY,UAQX;AARD,WAAY,UAAU;IAClB,uCAAyB,CAAA;IACzB,uCAAyB,CAAA;IACzB,6BAAe,CAAA;IACf,6BAAe,CAAA;IACf,qCAAuB,CAAA;IACvB,+BAAiB,CAAA;IACjB,uCAAyB,CAAA;AAC7B,CAAC,EARW,UAAU,KAAV,UAAU,QAQrB","sourcesContent":["export enum LengthType {\n VAR_BINARY = \"VAR_BINARY\",\n GEOMETRIES = \"GEOMETRIES\",\n PARTS = \"PARTS\",\n RINGS = \"RINGS\",\n TRIANGLES = \"TRIANGLES\",\n SYMBOL = \"SYMBOL\",\n DICTIONARY = \"DICTIONARY\",\n}\n"]}

View File

@@ -0,0 +1,8 @@
export declare enum LogicalLevelTechnique {
NONE = "NONE",
DELTA = "DELTA",
COMPONENTWISE_DELTA = "COMPONENTWISE_DELTA",
RLE = "RLE",
MORTON = "MORTON",
PDE = "PDE"
}

View File

@@ -0,0 +1,12 @@
export var LogicalLevelTechnique;
(function (LogicalLevelTechnique) {
LogicalLevelTechnique["NONE"] = "NONE";
LogicalLevelTechnique["DELTA"] = "DELTA";
LogicalLevelTechnique["COMPONENTWISE_DELTA"] = "COMPONENTWISE_DELTA";
LogicalLevelTechnique["RLE"] = "RLE";
LogicalLevelTechnique["MORTON"] = "MORTON";
// Pseudodecimal Encoding of floats -> only for the exponent integer part an additional logical level technique is used.
// Both exponent and significant parts are encoded with the same physical level technique
LogicalLevelTechnique["PDE"] = "PDE";
})(LogicalLevelTechnique || (LogicalLevelTechnique = {}));
//# sourceMappingURL=logicalLevelTechnique.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"logicalLevelTechnique.js","sourceRoot":"","sources":["../../../src/metadata/tile/logicalLevelTechnique.ts"],"names":[],"mappings":"AAAA,MAAM,CAAN,IAAY,qBASX;AATD,WAAY,qBAAqB;IAC7B,sCAAa,CAAA;IACb,wCAAe,CAAA;IACf,oEAA2C,CAAA;IAC3C,oCAAW,CAAA;IACX,0CAAiB,CAAA;IACjB,wHAAwH;IACxH,yFAAyF;IACzF,oCAAW,CAAA;AACf,CAAC,EATW,qBAAqB,KAArB,qBAAqB,QAShC","sourcesContent":["export enum LogicalLevelTechnique {\n NONE = \"NONE\",\n DELTA = \"DELTA\",\n COMPONENTWISE_DELTA = \"COMPONENTWISE_DELTA\",\n RLE = \"RLE\",\n MORTON = \"MORTON\",\n // Pseudodecimal Encoding of floats -> only for the exponent integer part an additional logical level technique is used.\n // Both exponent and significant parts are encoded with the same physical level technique\n PDE = \"PDE\",\n}\n"]}

View File

@@ -0,0 +1,8 @@
import type { DictionaryType } from "./dictionaryType";
import type { OffsetType } from "./offsetType";
import type { LengthType } from "./lengthType";
export type LogicalStreamType = {
readonly dictionaryType?: DictionaryType;
readonly offsetType?: OffsetType;
readonly lengthType?: LengthType;
};

View File

@@ -0,0 +1,2 @@
export {};
//# sourceMappingURL=logicalStreamType.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"logicalStreamType.js","sourceRoot":"","sources":["../../../src/metadata/tile/logicalStreamType.ts"],"names":[],"mappings":"","sourcesContent":["import type { DictionaryType } from \"./dictionaryType\";\nimport type { OffsetType } from \"./offsetType\";\nimport type { LengthType } from \"./lengthType\";\n\nexport type LogicalStreamType = {\n readonly dictionaryType?: DictionaryType;\n readonly offsetType?: OffsetType;\n readonly lengthType?: LengthType;\n};\n"]}

View File

@@ -0,0 +1,6 @@
export declare enum OffsetType {
VERTEX = "VERTEX",
INDEX = "INDEX",
STRING = "STRING",
KEY = "KEY"
}

View File

@@ -0,0 +1,8 @@
export var OffsetType;
(function (OffsetType) {
OffsetType["VERTEX"] = "VERTEX";
OffsetType["INDEX"] = "INDEX";
OffsetType["STRING"] = "STRING";
OffsetType["KEY"] = "KEY";
})(OffsetType || (OffsetType = {}));
//# sourceMappingURL=offsetType.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"offsetType.js","sourceRoot":"","sources":["../../../src/metadata/tile/offsetType.ts"],"names":[],"mappings":"AAAA,MAAM,CAAN,IAAY,UAKX;AALD,WAAY,UAAU;IAClB,+BAAiB,CAAA;IACjB,6BAAe,CAAA;IACf,+BAAiB,CAAA;IACjB,yBAAW,CAAA;AACf,CAAC,EALW,UAAU,KAAV,UAAU,QAKrB","sourcesContent":["export enum OffsetType {\n VERTEX = \"VERTEX\",\n INDEX = \"INDEX\",\n STRING = \"STRING\",\n KEY = \"KEY\",\n}\n"]}

View File

@@ -0,0 +1,17 @@
export declare enum PhysicalLevelTechnique {
NONE = "NONE",
/**
* Preferred option, tends to produce the best compression ratio and decoding performance.
* But currently only limited to 32 bit integer.
*/
FAST_PFOR = "FAST_PFOR",
/**
* Can produce better results in combination with a heavyweight compression scheme like Gzip.
* Simple compression scheme where the decoder are easier to implement compared to FastPfor.
*/
VARINT = "VARINT",
/**
* Adaptive Lossless floating-Point Compression
*/
ALP = "ALP"
}

Some files were not shown because too many files have changed in this diff Show More