Initial commit
This commit is contained in:
7
node_modules/@maplibre/mlt/dist/encoding/bigEndianEncode.d.ts
generated
vendored
Normal file
7
node_modules/@maplibre/mlt/dist/encoding/bigEndianEncode.d.ts
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
/**
|
||||
* Serializes an `Int32Array` to a big-endian byte stream.
|
||||
*
|
||||
* @param values - Int32 words to serialize.
|
||||
* @returns Big-endian byte stream (`values.length * 4` bytes).
|
||||
*/
|
||||
export declare function encodeBigEndianInt32s(values: Uint32Array): Uint8Array;
|
||||
16
node_modules/@maplibre/mlt/dist/encoding/bigEndianEncode.js
generated
vendored
Normal file
16
node_modules/@maplibre/mlt/dist/encoding/bigEndianEncode.js
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
import { bswap32 } from "../decoding/fastPforShared";
|
||||
/**
|
||||
* Serializes an `Int32Array` to a big-endian byte stream.
|
||||
*
|
||||
* @param values - Int32 words to serialize.
|
||||
* @returns Big-endian byte stream (`values.length * 4` bytes).
|
||||
*/
|
||||
export function encodeBigEndianInt32s(values) {
|
||||
const bytes = new Uint8Array(values.length * 4);
|
||||
const u32 = new Uint32Array(bytes.buffer, bytes.byteOffset, values.length);
|
||||
for (let i = 0; i < values.length; i++) {
|
||||
u32[i] = bswap32(values[i]);
|
||||
}
|
||||
return bytes;
|
||||
}
|
||||
//# sourceMappingURL=bigEndianEncode.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/encoding/bigEndianEncode.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/encoding/bigEndianEncode.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"bigEndianEncode.js","sourceRoot":"","sources":["../../src/encoding/bigEndianEncode.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,OAAO,EAAE,MAAM,4BAA4B,CAAC;AAErD;;;;;GAKG;AACH,MAAM,UAAU,qBAAqB,CAAC,MAAmB;IACrD,MAAM,KAAK,GAAG,IAAI,UAAU,CAAC,MAAM,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;IAChD,MAAM,GAAG,GAAG,IAAI,WAAW,CAAC,KAAK,CAAC,MAAM,EAAE,KAAK,CAAC,UAAU,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC;IAE3E,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;QACrC,GAAG,CAAC,CAAC,CAAC,GAAG,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAC;IAChC,CAAC;IACD,OAAO,KAAK,CAAC;AACjB,CAAC","sourcesContent":["import { bswap32 } from \"../decoding/fastPforShared\";\n\n/**\n * Serializes an `Int32Array` to a big-endian byte stream.\n *\n * @param values - Int32 words to serialize.\n * @returns Big-endian byte stream (`values.length * 4` bytes).\n */\nexport function encodeBigEndianInt32s(values: Uint32Array): Uint8Array {\n const bytes = new Uint8Array(values.length * 4);\n const u32 = new Uint32Array(bytes.buffer, bytes.byteOffset, values.length);\n\n for (let i = 0; i < values.length; i++) {\n u32[i] = bswap32(values[i]);\n }\n return bytes;\n}\n"]}
|
||||
19
node_modules/@maplibre/mlt/dist/encoding/constGeometryVectorEncoder.d.ts
generated
vendored
Normal file
19
node_modules/@maplibre/mlt/dist/encoding/constGeometryVectorEncoder.d.ts
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
import type { GeometryVector, MortonSettings } from "../vector/geometry/geometryVector";
|
||||
export declare const DEFAULT_MORTON_SETTINGS: MortonSettings;
|
||||
export declare function encode(x: number, y: number): number;
|
||||
export declare function encodePointGeometryVector(x: number, y: number): GeometryVector;
|
||||
export declare function encodePointGeometryVectorWithOffset(x: number, y: number): GeometryVector;
|
||||
export declare function encodePointGeometryVectorWithMortonEncoding(x: number, y: number): GeometryVector;
|
||||
export declare function encodePointsGeometryVector(points: number[]): GeometryVector;
|
||||
export declare function encodeMultiPointGeometryVector(points: number[][]): GeometryVector;
|
||||
export declare function encodeLineStringGeometryVector(lines: [number, number][]): GeometryVector;
|
||||
export declare function encodeLineStringGeometryVectorWithMortonEncoding(line: [number, number][]): GeometryVector;
|
||||
export declare function encodePolygonGeometryVector(polygon: [number, number][][]): GeometryVector;
|
||||
export declare function encodePolygonGeometryVectorWithOffsets(polygon: [number, number][][]): GeometryVector;
|
||||
export declare function encodePolygonGeometryVectorWithMortonOffsets(polygon: [number, number][][]): GeometryVector;
|
||||
export declare function encodeMultiLineStringGeometryVector(lines: [number, number][][]): GeometryVector;
|
||||
export declare function encodeMultiLineStringGeometryVectorWithOffsets(lines: [number, number][][]): GeometryVector;
|
||||
export declare function encodeMultiLineStringGeometryVectorWithMortonOffsets(lines: [number, number][][]): GeometryVector;
|
||||
export declare function encodeMultiPolygonGeometryVector(polygons: [number, number][][][]): GeometryVector;
|
||||
export declare function encodeMultiPolygonGeometryVectorWithOffsets(polygons: [number, number][][][]): GeometryVector;
|
||||
export declare function encodeMultiPolygonGeometryVectorWithMortonOffsets(polygons: [number, number][][][]): GeometryVector;
|
||||
248
node_modules/@maplibre/mlt/dist/encoding/constGeometryVectorEncoder.js
generated
vendored
Normal file
248
node_modules/@maplibre/mlt/dist/encoding/constGeometryVectorEncoder.js
generated
vendored
Normal file
@@ -0,0 +1,248 @@
|
||||
import { ConstGeometryVector } from "../vector/geometry/constGeometryVector";
|
||||
import { GEOMETRY_TYPE } from "../vector/geometry/geometryType";
|
||||
import { VertexBufferType } from "../vector/geometry/vertexBufferType";
|
||||
import { encodeZOrderCurve } from "./zOrderCurveEncoder";
|
||||
export const DEFAULT_MORTON_SETTINGS = { numBits: 16, coordinateShift: 0 };
|
||||
export function encode(x, y) {
|
||||
return encodeZOrderCurve(x, y, DEFAULT_MORTON_SETTINGS.numBits, DEFAULT_MORTON_SETTINGS.coordinateShift);
|
||||
}
|
||||
export function encodePointGeometryVector(x, y) {
|
||||
return new ConstGeometryVector(1, GEOMETRY_TYPE.POINT, VertexBufferType.VEC_2, {
|
||||
geometryOffsets: new Uint32Array([0]),
|
||||
partOffsets: new Uint32Array([0]),
|
||||
ringOffsets: new Uint32Array([0]),
|
||||
}, undefined, new Int32Array([x, y]));
|
||||
}
|
||||
export function encodePointGeometryVectorWithOffset(x, y) {
|
||||
return new ConstGeometryVector(1, GEOMETRY_TYPE.POINT, VertexBufferType.VEC_2, {
|
||||
geometryOffsets: new Uint32Array([0]),
|
||||
partOffsets: new Uint32Array([0]),
|
||||
ringOffsets: new Uint32Array([0]),
|
||||
}, new Uint32Array([1]), new Int32Array([99, 99, x, y]));
|
||||
}
|
||||
export function encodePointGeometryVectorWithMortonEncoding(x, y) {
|
||||
const mortonEncoded = encode(x, y);
|
||||
return new ConstGeometryVector(1, GEOMETRY_TYPE.POINT, VertexBufferType.MORTON, {
|
||||
geometryOffsets: new Uint32Array([0]),
|
||||
partOffsets: new Uint32Array([0]),
|
||||
ringOffsets: new Uint32Array([0]),
|
||||
}, new Uint32Array([0]), new Int32Array([mortonEncoded]), DEFAULT_MORTON_SETTINGS);
|
||||
}
|
||||
export function encodePointsGeometryVector(points) {
|
||||
return new ConstGeometryVector(points.length / 2, GEOMETRY_TYPE.POINT, VertexBufferType.VEC_2, {
|
||||
geometryOffsets: new Uint32Array([0]),
|
||||
partOffsets: new Uint32Array([0]),
|
||||
ringOffsets: new Uint32Array([0]),
|
||||
}, undefined, new Int32Array(points));
|
||||
}
|
||||
export function encodeMultiPointGeometryVector(points) {
|
||||
const vertexBuffer = new Int32Array(points.flatMap((point) => [point[0], point[1]]));
|
||||
return new ConstGeometryVector(1, GEOMETRY_TYPE.MULTIPOINT, VertexBufferType.VEC_2, {
|
||||
geometryOffsets: new Uint32Array([0, points.length]),
|
||||
partOffsets: undefined,
|
||||
ringOffsets: undefined,
|
||||
}, undefined, vertexBuffer);
|
||||
}
|
||||
export function encodeLineStringGeometryVector(lines) {
|
||||
const vertexBuffer = new Int32Array(lines.flatMap((line) => [line[0], line[1]]));
|
||||
return new ConstGeometryVector(1, GEOMETRY_TYPE.LINESTRING, VertexBufferType.VEC_2, {
|
||||
geometryOffsets: undefined,
|
||||
partOffsets: new Uint32Array([0, vertexBuffer.length / 2]),
|
||||
ringOffsets: undefined,
|
||||
}, undefined, vertexBuffer);
|
||||
}
|
||||
export function encodeLineStringGeometryVectorWithMortonEncoding(line) {
|
||||
const numVertices = line.length;
|
||||
const vertexBuffer = new Int32Array(numVertices);
|
||||
const offsetBuffer = new Uint32Array(numVertices);
|
||||
for (let i = 0; i < numVertices; i++) {
|
||||
vertexBuffer[i] = encode(line[i][0], line[i][1]);
|
||||
offsetBuffer[i] = i;
|
||||
}
|
||||
return new ConstGeometryVector(1, GEOMETRY_TYPE.LINESTRING, VertexBufferType.MORTON, {
|
||||
geometryOffsets: undefined,
|
||||
partOffsets: new Uint32Array([0, numVertices]),
|
||||
ringOffsets: undefined,
|
||||
}, offsetBuffer, vertexBuffer, DEFAULT_MORTON_SETTINGS);
|
||||
}
|
||||
export function encodePolygonGeometryVector(polygon) {
|
||||
const vertexBuffer = new Int32Array(polygon.flatMap((ring) => ring.flatMap((point) => [point[0], point[1]])));
|
||||
const ringOffsets = new Uint32Array(polygon.length + 1);
|
||||
ringOffsets[0] = 0;
|
||||
let ringIndex = 1;
|
||||
for (const ring of polygon) {
|
||||
ringOffsets[ringIndex] = ringOffsets[ringIndex - 1] + ring.length;
|
||||
ringIndex++;
|
||||
}
|
||||
return new ConstGeometryVector(1, GEOMETRY_TYPE.POLYGON, VertexBufferType.VEC_2, {
|
||||
geometryOffsets: undefined,
|
||||
partOffsets: new Uint32Array([0, polygon.length]),
|
||||
ringOffsets,
|
||||
}, undefined, vertexBuffer);
|
||||
}
|
||||
export function encodePolygonGeometryVectorWithOffsets(polygon) {
|
||||
const vertexBuffer = new Int32Array(polygon.flatMap((ring) => ring.flatMap((point) => [point[0], point[1]])));
|
||||
const ringOffsets = new Uint32Array(polygon.length + 1);
|
||||
ringOffsets[0] = 0;
|
||||
let ringIndex = 1;
|
||||
for (const ring of polygon) {
|
||||
ringOffsets[ringIndex] = ringOffsets[ringIndex - 1] + ring.length;
|
||||
ringIndex++;
|
||||
}
|
||||
const offsetBuffer = new Uint32Array(vertexBuffer.length / 2);
|
||||
for (let i = 0; i < offsetBuffer.length; i++) {
|
||||
offsetBuffer[i] = i;
|
||||
}
|
||||
return new ConstGeometryVector(1, GEOMETRY_TYPE.POLYGON, VertexBufferType.VEC_2, {
|
||||
geometryOffsets: undefined,
|
||||
partOffsets: new Uint32Array([0, polygon.length]),
|
||||
ringOffsets,
|
||||
}, offsetBuffer, vertexBuffer);
|
||||
}
|
||||
export function encodePolygonGeometryVectorWithMortonOffsets(polygon) {
|
||||
const vertexBuffer = new Int32Array(polygon.flatMap((ring) => ring.flatMap((point) => encode(point[0], point[1]))));
|
||||
const ringOffsets = new Uint32Array(polygon.length + 1);
|
||||
ringOffsets[0] = 0;
|
||||
let ringIndex = 1;
|
||||
for (const ring of polygon) {
|
||||
ringOffsets[ringIndex] = ringOffsets[ringIndex - 1] + ring.length;
|
||||
ringIndex++;
|
||||
}
|
||||
const offsetBuffer = new Uint32Array(vertexBuffer.length);
|
||||
for (let i = 0; i < offsetBuffer.length; i++) {
|
||||
offsetBuffer[i] = i;
|
||||
}
|
||||
return new ConstGeometryVector(1, GEOMETRY_TYPE.POLYGON, VertexBufferType.MORTON, {
|
||||
geometryOffsets: undefined,
|
||||
partOffsets: new Uint32Array([0, polygon.length]),
|
||||
ringOffsets,
|
||||
}, offsetBuffer, vertexBuffer, DEFAULT_MORTON_SETTINGS);
|
||||
}
|
||||
export function encodeMultiLineStringGeometryVector(lines) {
|
||||
const vertexBuffer = new Int32Array(lines.flatMap((line) => line.flatMap((point) => [point[0], point[1]])));
|
||||
const partOffsets = new Uint32Array(lines.length + 1);
|
||||
partOffsets[0] = 0;
|
||||
let partIndex = 1;
|
||||
for (const line of lines) {
|
||||
partOffsets[partIndex] = partOffsets[partIndex - 1] + line.length;
|
||||
partIndex++;
|
||||
}
|
||||
return new ConstGeometryVector(1, GEOMETRY_TYPE.MULTILINESTRING, VertexBufferType.VEC_2, {
|
||||
geometryOffsets: new Uint32Array([0, lines.length]),
|
||||
partOffsets,
|
||||
ringOffsets: undefined,
|
||||
}, undefined, vertexBuffer);
|
||||
}
|
||||
export function encodeMultiLineStringGeometryVectorWithOffsets(lines) {
|
||||
const vertexBuffer = new Int32Array(lines.flatMap((line) => line.flatMap((point) => [point[0], point[1]])));
|
||||
const partOffsets = new Uint32Array(lines.length + 1);
|
||||
partOffsets[0] = 0;
|
||||
let partIndex = 1;
|
||||
for (const line of lines) {
|
||||
partOffsets[partIndex] = partOffsets[partIndex - 1] + line.length;
|
||||
partIndex++;
|
||||
}
|
||||
const offsetBuffer = new Uint32Array(vertexBuffer.length / 2);
|
||||
for (let i = 0; i < offsetBuffer.length; i++) {
|
||||
offsetBuffer[i] = i;
|
||||
}
|
||||
return new ConstGeometryVector(1, GEOMETRY_TYPE.MULTILINESTRING, VertexBufferType.VEC_2, {
|
||||
geometryOffsets: new Uint32Array([0, lines.length]),
|
||||
partOffsets,
|
||||
ringOffsets: undefined,
|
||||
}, offsetBuffer, vertexBuffer);
|
||||
}
|
||||
export function encodeMultiLineStringGeometryVectorWithMortonOffsets(lines) {
|
||||
const vertexBuffer = new Int32Array(lines.flatMap((line) => line.flatMap((point) => encode(point[0], point[1]))));
|
||||
const partOffsets = new Uint32Array(lines.length + 1);
|
||||
partOffsets[0] = 0;
|
||||
let partIndex = 1;
|
||||
for (const line of lines) {
|
||||
partOffsets[partIndex] = partOffsets[partIndex - 1] + line.length;
|
||||
partIndex++;
|
||||
}
|
||||
const offsetBuffer = new Uint32Array(vertexBuffer.length);
|
||||
for (let i = 0; i < offsetBuffer.length; i++) {
|
||||
offsetBuffer[i] = i;
|
||||
}
|
||||
return new ConstGeometryVector(1, GEOMETRY_TYPE.MULTILINESTRING, VertexBufferType.MORTON, {
|
||||
geometryOffsets: new Uint32Array([0, lines.length]),
|
||||
partOffsets,
|
||||
ringOffsets: undefined,
|
||||
}, offsetBuffer, vertexBuffer, DEFAULT_MORTON_SETTINGS);
|
||||
}
|
||||
export function encodeMultiPolygonGeometryVector(polygons) {
|
||||
const vertexBuffer = new Int32Array(polygons.flatMap((polygon) => polygon.flatMap((ring) => ring.flatMap((point) => [point[0], point[1]]))));
|
||||
const ringOffsets = new Uint32Array(polygons.reduce((sum, polygon) => sum + polygon.length, 0) + 1);
|
||||
const partOffsets = new Uint32Array(polygons.length + 1);
|
||||
ringOffsets[0] = 0;
|
||||
partOffsets[0] = 0;
|
||||
let ringIndex = 1;
|
||||
let partIndex = 1;
|
||||
for (const polygon of polygons) {
|
||||
for (const ring of polygon) {
|
||||
ringOffsets[ringIndex] = ringOffsets[ringIndex - 1] + ring.length;
|
||||
ringIndex++;
|
||||
}
|
||||
partOffsets[partIndex] = partOffsets[partIndex - 1] + polygon.length;
|
||||
partIndex++;
|
||||
}
|
||||
return new ConstGeometryVector(1, GEOMETRY_TYPE.MULTIPOLYGON, VertexBufferType.VEC_2, {
|
||||
geometryOffsets: new Uint32Array([0, polygons.length]),
|
||||
partOffsets,
|
||||
ringOffsets,
|
||||
}, undefined, vertexBuffer);
|
||||
}
|
||||
export function encodeMultiPolygonGeometryVectorWithOffsets(polygons) {
|
||||
const vertexBuffer = new Int32Array(polygons.flatMap((polygon) => polygon.flatMap((ring) => ring.flatMap((point) => [point[0], point[1]]))));
|
||||
const ringOffsets = new Uint32Array(polygons.reduce((sum, polygon) => sum + polygon.length, 0) + 1);
|
||||
const partOffsets = new Uint32Array(polygons.length + 1);
|
||||
ringOffsets[0] = 0;
|
||||
partOffsets[0] = 0;
|
||||
let ringIndex = 1;
|
||||
let partIndex = 1;
|
||||
for (const polygon of polygons) {
|
||||
for (const ring of polygon) {
|
||||
ringOffsets[ringIndex] = ringOffsets[ringIndex - 1] + ring.length;
|
||||
ringIndex++;
|
||||
}
|
||||
partOffsets[partIndex] = partOffsets[partIndex - 1] + polygon.length;
|
||||
partIndex++;
|
||||
}
|
||||
const offsetBuffer = new Uint32Array(vertexBuffer.length / 2);
|
||||
for (let i = 0; i < offsetBuffer.length; i++) {
|
||||
offsetBuffer[i] = i;
|
||||
}
|
||||
return new ConstGeometryVector(1, GEOMETRY_TYPE.MULTIPOLYGON, VertexBufferType.VEC_2, {
|
||||
geometryOffsets: new Uint32Array([0, polygons.length]),
|
||||
partOffsets,
|
||||
ringOffsets,
|
||||
}, offsetBuffer, vertexBuffer);
|
||||
}
|
||||
export function encodeMultiPolygonGeometryVectorWithMortonOffsets(polygons) {
|
||||
const vertexBuffer = new Int32Array(polygons.flatMap((polygon) => polygon.flatMap((ring) => ring.flatMap((point) => encode(point[0], point[1])))));
|
||||
const ringOffsets = new Uint32Array(polygons.reduce((sum, polygon) => sum + polygon.length, 0) + 1);
|
||||
const partOffsets = new Uint32Array(polygons.length + 1);
|
||||
ringOffsets[0] = 0;
|
||||
partOffsets[0] = 0;
|
||||
let ringIndex = 1;
|
||||
let partIndex = 1;
|
||||
for (const polygon of polygons) {
|
||||
for (const ring of polygon) {
|
||||
ringOffsets[ringIndex] = ringOffsets[ringIndex - 1] + ring.length;
|
||||
ringIndex++;
|
||||
}
|
||||
partOffsets[partIndex] = partOffsets[partIndex - 1] + polygon.length;
|
||||
partIndex++;
|
||||
}
|
||||
const offsetBuffer = new Uint32Array(vertexBuffer.length);
|
||||
for (let i = 0; i < offsetBuffer.length; i++) {
|
||||
offsetBuffer[i] = i;
|
||||
}
|
||||
return new ConstGeometryVector(1, GEOMETRY_TYPE.MULTIPOLYGON, VertexBufferType.MORTON, {
|
||||
geometryOffsets: new Uint32Array([0, polygons.length]),
|
||||
partOffsets,
|
||||
ringOffsets,
|
||||
}, offsetBuffer, vertexBuffer, DEFAULT_MORTON_SETTINGS);
|
||||
}
|
||||
//# sourceMappingURL=constGeometryVectorEncoder.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/encoding/constGeometryVectorEncoder.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/encoding/constGeometryVectorEncoder.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
16
node_modules/@maplibre/mlt/dist/encoding/embeddedTilesetMetadataEncoder.d.ts
generated
vendored
Normal file
16
node_modules/@maplibre/mlt/dist/encoding/embeddedTilesetMetadataEncoder.d.ts
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
/**
|
||||
* Encodes a single typeCode as a varint.
|
||||
*/
|
||||
export declare function encodeTypeCode(typeCode: number): Uint8Array;
|
||||
/**
|
||||
* Encodes a field name as a length-prefixed UTF-8 string.
|
||||
*/
|
||||
export declare function encodeFieldName(name: string): Uint8Array;
|
||||
/**
|
||||
* Encodes a child count as a varint.
|
||||
*/
|
||||
export declare function encodeChildCount(count: number): Uint8Array;
|
||||
/**
|
||||
* Computes typeCode for a scalar field.
|
||||
*/
|
||||
export declare function scalarTypeCode(scalarType: number, nullable: boolean): number;
|
||||
40
node_modules/@maplibre/mlt/dist/encoding/embeddedTilesetMetadataEncoder.js
generated
vendored
Normal file
40
node_modules/@maplibre/mlt/dist/encoding/embeddedTilesetMetadataEncoder.js
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
import IntWrapper from "../decoding/intWrapper";
|
||||
import { encodeVarintInt32Value } from "./integerEncodingUtils";
|
||||
import { concatenateBuffers } from "../decoding/decodingTestUtils";
|
||||
/**
|
||||
* Encodes a single typeCode as a varint.
|
||||
*/
|
||||
export function encodeTypeCode(typeCode) {
|
||||
const buffer = new Uint8Array(5);
|
||||
const offset = new IntWrapper(0);
|
||||
encodeVarintInt32Value(typeCode, buffer, offset);
|
||||
return buffer.slice(0, offset.get());
|
||||
}
|
||||
/**
|
||||
* Encodes a field name as a length-prefixed UTF-8 string.
|
||||
*/
|
||||
export function encodeFieldName(name) {
|
||||
const textEncoder = new TextEncoder();
|
||||
const nameBytes = textEncoder.encode(name);
|
||||
const lengthBuf = new Uint8Array(5);
|
||||
const offset = new IntWrapper(0);
|
||||
encodeVarintInt32Value(nameBytes.length, lengthBuf, offset);
|
||||
const lengthSlice = lengthBuf.slice(0, offset.get());
|
||||
return concatenateBuffers(lengthSlice, nameBytes);
|
||||
}
|
||||
/**
|
||||
* Encodes a child count as a varint.
|
||||
*/
|
||||
export function encodeChildCount(count) {
|
||||
const buffer = new Uint8Array(5);
|
||||
const offset = new IntWrapper(0);
|
||||
encodeVarintInt32Value(count, buffer, offset);
|
||||
return buffer.slice(0, offset.get());
|
||||
}
|
||||
/**
|
||||
* Computes typeCode for a scalar field.
|
||||
*/
|
||||
export function scalarTypeCode(scalarType, nullable) {
|
||||
return 10 + scalarType * 2 + (nullable ? 1 : 0);
|
||||
}
|
||||
//# sourceMappingURL=embeddedTilesetMetadataEncoder.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/encoding/embeddedTilesetMetadataEncoder.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/encoding/embeddedTilesetMetadataEncoder.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"embeddedTilesetMetadataEncoder.js","sourceRoot":"","sources":["../../src/encoding/embeddedTilesetMetadataEncoder.ts"],"names":[],"mappings":"AAAA,OAAO,UAAU,MAAM,wBAAwB,CAAC;AAChD,OAAO,EAAE,sBAAsB,EAAE,MAAM,wBAAwB,CAAC;AAChE,OAAO,EAAE,kBAAkB,EAAE,MAAM,+BAA+B,CAAC;AAEnE;;GAEG;AACH,MAAM,UAAU,cAAc,CAAC,QAAgB;IAC3C,MAAM,MAAM,GAAG,IAAI,UAAU,CAAC,CAAC,CAAC,CAAC;IACjC,MAAM,MAAM,GAAG,IAAI,UAAU,CAAC,CAAC,CAAC,CAAC;IACjC,sBAAsB,CAAC,QAAQ,EAAE,MAAM,EAAE,MAAM,CAAC,CAAC;IACjD,OAAO,MAAM,CAAC,KAAK,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,EAAE,CAAC,CAAC;AACzC,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,eAAe,CAAC,IAAY;IACxC,MAAM,WAAW,GAAG,IAAI,WAAW,EAAE,CAAC;IACtC,MAAM,SAAS,GAAG,WAAW,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC;IAC3C,MAAM,SAAS,GAAG,IAAI,UAAU,CAAC,CAAC,CAAC,CAAC;IACpC,MAAM,MAAM,GAAG,IAAI,UAAU,CAAC,CAAC,CAAC,CAAC;IACjC,sBAAsB,CAAC,SAAS,CAAC,MAAM,EAAE,SAAS,EAAE,MAAM,CAAC,CAAC;IAC5D,MAAM,WAAW,GAAG,SAAS,CAAC,KAAK,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,EAAE,CAAC,CAAC;IACrD,OAAO,kBAAkB,CAAC,WAAW,EAAE,SAAS,CAAC,CAAC;AACtD,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,gBAAgB,CAAC,KAAa;IAC1C,MAAM,MAAM,GAAG,IAAI,UAAU,CAAC,CAAC,CAAC,CAAC;IACjC,MAAM,MAAM,GAAG,IAAI,UAAU,CAAC,CAAC,CAAC,CAAC;IACjC,sBAAsB,CAAC,KAAK,EAAE,MAAM,EAAE,MAAM,CAAC,CAAC;IAC9C,OAAO,MAAM,CAAC,KAAK,CAAC,CAAC,EAAE,MAAM,CAAC,GAAG,EAAE,CAAC,CAAC;AACzC,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,cAAc,CAAC,UAAkB,EAAE,QAAiB;IAChE,OAAO,EAAE,GAAG,UAAU,GAAG,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACpD,CAAC","sourcesContent":["import IntWrapper from \"../decoding/intWrapper\";\nimport { encodeVarintInt32Value } from \"./integerEncodingUtils\";\nimport { concatenateBuffers } from \"../decoding/decodingTestUtils\";\n\n/**\n * Encodes a single typeCode as a varint.\n */\nexport function encodeTypeCode(typeCode: number): Uint8Array {\n const buffer = new Uint8Array(5);\n const offset = new IntWrapper(0);\n encodeVarintInt32Value(typeCode, buffer, offset);\n return buffer.slice(0, offset.get());\n}\n\n/**\n * Encodes a field name as a length-prefixed UTF-8 string.\n */\nexport function encodeFieldName(name: string): Uint8Array {\n const textEncoder = new TextEncoder();\n const nameBytes = textEncoder.encode(name);\n const lengthBuf = new Uint8Array(5);\n const offset = new IntWrapper(0);\n encodeVarintInt32Value(nameBytes.length, lengthBuf, offset);\n const lengthSlice = lengthBuf.slice(0, offset.get());\n return concatenateBuffers(lengthSlice, nameBytes);\n}\n\n/**\n * Encodes a child count as a varint.\n */\nexport function encodeChildCount(count: number): Uint8Array {\n const buffer = new Uint8Array(5);\n const offset = new IntWrapper(0);\n encodeVarintInt32Value(count, buffer, offset);\n return buffer.slice(0, offset.get());\n}\n\n/**\n * Computes typeCode for a scalar field.\n */\nexport function scalarTypeCode(scalarType: number, nullable: boolean): number {\n return 10 + scalarType * 2 + (nullable ? 1 : 0);\n}\n"]}
|
||||
7
node_modules/@maplibre/mlt/dist/encoding/encodingUtils.d.ts
generated
vendored
Normal file
7
node_modules/@maplibre/mlt/dist/encoding/encodingUtils.d.ts
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
export declare function encodeFloatsLE(values: Float32Array): Uint8Array;
|
||||
export declare function encodeDoubleLE(values: Float64Array): Uint8Array;
|
||||
export declare function encodeBooleanRle(values: boolean[]): Uint8Array;
|
||||
export declare function encodeByteRle(values: Uint8Array): Uint8Array;
|
||||
export declare function encodeStrings(strings: string[]): Uint8Array;
|
||||
export declare function createStringLengths(strings: string[]): Uint32Array;
|
||||
export declare function concatenateBuffers(...buffers: Uint8Array[]): Uint8Array;
|
||||
107
node_modules/@maplibre/mlt/dist/encoding/encodingUtils.js
generated
vendored
Normal file
107
node_modules/@maplibre/mlt/dist/encoding/encodingUtils.js
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
export function encodeFloatsLE(values) {
|
||||
const buffer = new Uint8Array(values.length * 4);
|
||||
const view = new DataView(buffer.buffer);
|
||||
for (let i = 0; i < values.length; i++) {
|
||||
view.setFloat32(i * 4, values[i], true);
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
export function encodeDoubleLE(values) {
|
||||
const buffer = new Uint8Array(values.length * Float64Array.BYTES_PER_ELEMENT);
|
||||
const view = new DataView(buffer.buffer);
|
||||
for (let i = 0; i < values.length; i++) {
|
||||
view.setFloat64(i * Float64Array.BYTES_PER_ELEMENT, values[i], true);
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
export function encodeBooleanRle(values) {
|
||||
// Pack booleans into bytes (8 booleans per byte)
|
||||
const numBytes = Math.ceil(values.length / 8);
|
||||
const packed = new Uint8Array(numBytes);
|
||||
for (let i = 0; i < values.length; i++) {
|
||||
if (values[i]) {
|
||||
const byteIndex = Math.floor(i / 8);
|
||||
const bitIndex = i % 8;
|
||||
packed[byteIndex] |= 1 << bitIndex;
|
||||
}
|
||||
}
|
||||
const result = new Uint8Array(1 + numBytes);
|
||||
result[0] = 256 - numBytes;
|
||||
result.set(packed, 1);
|
||||
return result;
|
||||
}
|
||||
export function encodeByteRle(values) {
|
||||
const result = [];
|
||||
let i = 0;
|
||||
while (i < values.length) {
|
||||
const currentByte = values[i];
|
||||
let runLength = 1;
|
||||
while (i + runLength < values.length && values[i + runLength] === currentByte && runLength < 131) {
|
||||
runLength++;
|
||||
}
|
||||
if (runLength >= 3) {
|
||||
const header = runLength - 3;
|
||||
result.push(Math.min(header, 0x7f));
|
||||
result.push(currentByte);
|
||||
i += runLength;
|
||||
}
|
||||
else {
|
||||
const literalStart = i;
|
||||
while (i < values.length) {
|
||||
let nextRunLength = 1;
|
||||
if (i + 1 < values.length) {
|
||||
while (i + nextRunLength < values.length &&
|
||||
values[i + nextRunLength] === values[i] &&
|
||||
nextRunLength < 3) {
|
||||
nextRunLength++;
|
||||
}
|
||||
}
|
||||
if (nextRunLength >= 3) {
|
||||
break;
|
||||
}
|
||||
i++;
|
||||
if (i - literalStart >= 128) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
const numLiterals = i - literalStart;
|
||||
const header = 256 - numLiterals;
|
||||
result.push(header);
|
||||
for (let j = literalStart; j < i; j++) {
|
||||
result.push(values[j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
return new Uint8Array(result);
|
||||
}
|
||||
export function encodeStrings(strings) {
|
||||
const encoder = new TextEncoder();
|
||||
const encoded = strings.map((s) => encoder.encode(s));
|
||||
const totalLength = encoded.reduce((sum, arr) => sum + arr.length, 0);
|
||||
const result = new Uint8Array(totalLength);
|
||||
let offset = 0;
|
||||
for (const arr of encoded) {
|
||||
result.set(arr, offset);
|
||||
offset += arr.length;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
export function createStringLengths(strings) {
|
||||
const lengths = new Uint32Array(strings.length);
|
||||
const encoder = new TextEncoder();
|
||||
for (let i = 0; i < strings.length; i++) {
|
||||
lengths[i] = encoder.encode(strings[i]).length;
|
||||
}
|
||||
return lengths;
|
||||
}
|
||||
export function concatenateBuffers(...buffers) {
|
||||
const totalLength = buffers.reduce((sum, buf) => sum + buf.length, 0);
|
||||
const result = new Uint8Array(totalLength);
|
||||
let offset = 0;
|
||||
for (const buffer of buffers) {
|
||||
result.set(buffer, offset);
|
||||
offset += buffer.length;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
//# sourceMappingURL=encodingUtils.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/encoding/encodingUtils.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/encoding/encodingUtils.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
18
node_modules/@maplibre/mlt/dist/encoding/fastPforEncoder.d.ts
generated
vendored
Normal file
18
node_modules/@maplibre/mlt/dist/encoding/fastPforEncoder.d.ts
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
/**
|
||||
* Internal workspace for the FastPFOR encoder.
|
||||
* Exposed so callers can avoid allocations.
|
||||
* Use one workspace per concurrent encode call.
|
||||
*/
|
||||
export type FastPforEncoderWorkspace = {
|
||||
dataToBePacked: Array<Uint32Array | undefined>;
|
||||
dataPointers: Int32Array;
|
||||
byteContainer: Uint8Array;
|
||||
bitWidthFrequencies: Int32Array;
|
||||
bestBitWidthPlan: Int32Array;
|
||||
};
|
||||
export declare function fastPack32(inValues: Uint32Array, inPos: number, out: Uint32Array, outPos: number, bitWidth: number): void;
|
||||
export declare function createFastPforEncoderWorkspace(): FastPforEncoderWorkspace;
|
||||
/**
|
||||
* Encodes an int32 stream using the FastPFOR wire format (pages + VByte tail).
|
||||
*/
|
||||
export declare function encodeFastPforInt32WithWorkspace(values: Uint32Array, workspace: FastPforEncoderWorkspace): Uint32Array;
|
||||
310
node_modules/@maplibre/mlt/dist/encoding/fastPforEncoder.js
generated
vendored
Normal file
310
node_modules/@maplibre/mlt/dist/encoding/fastPforEncoder.js
generated
vendored
Normal file
@@ -0,0 +1,310 @@
|
||||
import { MASKS, DEFAULT_PAGE_SIZE, BLOCK_SIZE, greatestMultiple, roundUpToMultipleOf32, normalizePageSize, } from "../decoding/fastPforShared";
|
||||
const EXCEPTION_OVERHEAD_BITS = 8;
|
||||
const MAX_BIT_WIDTH = 32;
|
||||
const BIT_WIDTH_SLOTS = MAX_BIT_WIDTH + 1;
|
||||
const PAGE_SIZE = normalizePageSize(DEFAULT_PAGE_SIZE);
|
||||
const INITIAL_PACKED_BUFFER_SIZE_WORDS = (PAGE_SIZE / 32) * 4;
|
||||
const BYTE_CONTAINER_SIZE = ((3 * PAGE_SIZE) / BLOCK_SIZE + PAGE_SIZE) | 0;
|
||||
function requiredBits(value) {
|
||||
return 32 - Math.clz32(value >>> 0);
|
||||
}
|
||||
function ensureInt32Capacity(buffer, requiredLength) {
|
||||
if (requiredLength <= buffer.length)
|
||||
return buffer;
|
||||
let newLength = buffer.length === 0 ? 1 : buffer.length;
|
||||
while (newLength < requiredLength) {
|
||||
newLength *= 2;
|
||||
}
|
||||
const next = new Uint32Array(newLength);
|
||||
next.set(buffer);
|
||||
return next;
|
||||
}
|
||||
function ensureUint8Capacity(buffer, requiredLength) {
|
||||
if (requiredLength <= buffer.length)
|
||||
return buffer;
|
||||
let newLength = buffer.length === 0 ? 1 : buffer.length;
|
||||
while (newLength < requiredLength) {
|
||||
newLength *= 2;
|
||||
}
|
||||
const next = new Uint8Array(newLength);
|
||||
next.set(buffer);
|
||||
return next;
|
||||
}
|
||||
export function fastPack32(inValues, inPos, out, outPos, bitWidth) {
|
||||
if (bitWidth === 0)
|
||||
return;
|
||||
if (bitWidth === 32) {
|
||||
out.set(inValues.subarray(inPos, inPos + 32), outPos);
|
||||
return;
|
||||
}
|
||||
const mask = MASKS[bitWidth] >>> 0;
|
||||
let outputWordIndex = outPos;
|
||||
let bitOffset = 0;
|
||||
let currentWord = 0;
|
||||
for (let i = 0; i < 32; i++) {
|
||||
const value = (inValues[inPos + i] >>> 0) & mask;
|
||||
if (bitOffset + bitWidth <= 32) {
|
||||
currentWord |= value << bitOffset;
|
||||
bitOffset += bitWidth;
|
||||
if (bitOffset === 32) {
|
||||
out[outputWordIndex++] = currentWord | 0;
|
||||
bitOffset = 0;
|
||||
currentWord = 0;
|
||||
}
|
||||
}
|
||||
else {
|
||||
const lowBits = 32 - bitOffset;
|
||||
const lowMask = MASKS[lowBits] >>> 0;
|
||||
currentWord |= (value & lowMask) << bitOffset;
|
||||
out[outputWordIndex++] = currentWord | 0;
|
||||
currentWord = value >>> lowBits;
|
||||
bitOffset = bitWidth - lowBits;
|
||||
}
|
||||
}
|
||||
}
|
||||
export function createFastPforEncoderWorkspace() {
|
||||
const dataToBePacked = new Array(BIT_WIDTH_SLOTS);
|
||||
for (let k = 1; k < BIT_WIDTH_SLOTS; k++) {
|
||||
dataToBePacked[k] = new Uint32Array(INITIAL_PACKED_BUFFER_SIZE_WORDS);
|
||||
}
|
||||
return {
|
||||
dataToBePacked,
|
||||
dataPointers: new Int32Array(BIT_WIDTH_SLOTS),
|
||||
byteContainer: new Uint8Array(BYTE_CONTAINER_SIZE),
|
||||
bitWidthFrequencies: new Int32Array(BIT_WIDTH_SLOTS),
|
||||
bestBitWidthPlan: new Int32Array(3),
|
||||
};
|
||||
}
|
||||
function computeBestBitWidthPlan(inValues, pos, workspace) {
|
||||
const bitWidthFrequencies = workspace.bitWidthFrequencies;
|
||||
const bestBitWidthPlan = workspace.bestBitWidthPlan;
|
||||
bitWidthFrequencies.fill(0);
|
||||
for (let k = pos, kEnd = pos + BLOCK_SIZE; k < kEnd; k++) {
|
||||
bitWidthFrequencies[requiredBits(inValues[k])]++;
|
||||
}
|
||||
let maxBitWidth = MAX_BIT_WIDTH;
|
||||
while (bitWidthFrequencies[maxBitWidth] === 0)
|
||||
maxBitWidth--;
|
||||
let bestBitWidth = maxBitWidth;
|
||||
let bestCost = maxBitWidth * BLOCK_SIZE;
|
||||
let exceptionCount = 0;
|
||||
let bestExceptionCount = exceptionCount;
|
||||
for (let candidateBitWidth = maxBitWidth - 1; candidateBitWidth >= 0; candidateBitWidth--) {
|
||||
exceptionCount += bitWidthFrequencies[candidateBitWidth + 1];
|
||||
if (exceptionCount === BLOCK_SIZE)
|
||||
break;
|
||||
let candidateCost = exceptionCount * EXCEPTION_OVERHEAD_BITS +
|
||||
exceptionCount * (maxBitWidth - candidateBitWidth) +
|
||||
candidateBitWidth * BLOCK_SIZE +
|
||||
8;
|
||||
if (maxBitWidth - candidateBitWidth === 1)
|
||||
candidateCost -= exceptionCount;
|
||||
if (candidateCost < bestCost) {
|
||||
bestCost = candidateCost;
|
||||
bestBitWidth = candidateBitWidth;
|
||||
bestExceptionCount = exceptionCount;
|
||||
}
|
||||
}
|
||||
bestBitWidthPlan[0] = bestBitWidth;
|
||||
bestBitWidthPlan[1] = bestExceptionCount;
|
||||
bestBitWidthPlan[2] = maxBitWidth;
|
||||
}
|
||||
function writeByte(workspace, byteContainerPos, byteValue) {
|
||||
if (byteContainerPos >= workspace.byteContainer.length) {
|
||||
workspace.byteContainer = ensureUint8Capacity(workspace.byteContainer, byteContainerPos + 1);
|
||||
}
|
||||
workspace.byteContainer[byteContainerPos] = byteValue & 0xff;
|
||||
return byteContainerPos + 1;
|
||||
}
|
||||
function ensureExceptionValuesCapacity(dataToBePacked, dataPointers, exceptionBitWidth, exceptionCount) {
|
||||
if (exceptionBitWidth === 1)
|
||||
return;
|
||||
const needed = dataPointers[exceptionBitWidth] + exceptionCount;
|
||||
const currentExceptionValues = dataToBePacked[exceptionBitWidth];
|
||||
if (!currentExceptionValues || needed >= currentExceptionValues.length) {
|
||||
let newSize = 2 * needed;
|
||||
newSize = roundUpToMultipleOf32(newSize);
|
||||
const next = new Uint32Array(newSize);
|
||||
if (currentExceptionValues)
|
||||
next.set(currentExceptionValues);
|
||||
dataToBePacked[exceptionBitWidth] = next;
|
||||
}
|
||||
}
|
||||
function writeBlockHeader(workspace, byteContainerPos, bitWidth, exceptionCount, maxBitWidth) {
|
||||
byteContainerPos = writeByte(workspace, byteContainerPos, bitWidth);
|
||||
byteContainerPos = writeByte(workspace, byteContainerPos, exceptionCount);
|
||||
if (exceptionCount > 0) {
|
||||
byteContainerPos = writeByte(workspace, byteContainerPos, maxBitWidth);
|
||||
}
|
||||
return byteContainerPos;
|
||||
}
|
||||
function recordBlockExceptions(workspace, inValues, blockPos, bitWidth, exceptionCount, exceptionBitWidth, byteContainerPos) {
|
||||
if (exceptionCount === 0)
|
||||
return byteContainerPos;
|
||||
const dataToBePacked = workspace.dataToBePacked;
|
||||
const dataPointers = workspace.dataPointers;
|
||||
ensureExceptionValuesCapacity(dataToBePacked, dataPointers, exceptionBitWidth, exceptionCount);
|
||||
for (let k = 0; k < BLOCK_SIZE; k++) {
|
||||
const value = inValues[blockPos + k] >>> 0;
|
||||
if (value >>> bitWidth !== 0) {
|
||||
byteContainerPos = writeByte(workspace, byteContainerPos, k);
|
||||
if (exceptionBitWidth !== 1) {
|
||||
const exceptionValues = dataToBePacked[exceptionBitWidth];
|
||||
exceptionValues[dataPointers[exceptionBitWidth]++] = (value >>> bitWidth) | 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
return byteContainerPos;
|
||||
}
|
||||
function packBlock(inValues, blockPos, bitWidth, state) {
|
||||
for (let k = 0; k < BLOCK_SIZE; k += 32) {
|
||||
state.out = ensureInt32Capacity(state.out, state.outPos + bitWidth);
|
||||
fastPack32(inValues, blockPos + k, state.out, state.outPos, bitWidth);
|
||||
state.outPos += bitWidth;
|
||||
}
|
||||
}
|
||||
function padByteContainerToInt32(workspace, byteContainerPos) {
|
||||
while ((byteContainerPos & 3) !== 0) {
|
||||
byteContainerPos = writeByte(workspace, byteContainerPos, 0);
|
||||
}
|
||||
return byteContainerPos;
|
||||
}
|
||||
function writeByteContainerInts(workspace, state, byteContainerPos) {
|
||||
const howManyInts = byteContainerPos / 4;
|
||||
state.out = ensureInt32Capacity(state.out, state.outPos + howManyInts);
|
||||
const byteContainer = workspace.byteContainer;
|
||||
for (let i = 0; i < howManyInts; i++) {
|
||||
const base = i * 4;
|
||||
const packedWord = byteContainer[base] |
|
||||
(byteContainer[base + 1] << 8) |
|
||||
(byteContainer[base + 2] << 16) |
|
||||
(byteContainer[base + 3] << 24) |
|
||||
0;
|
||||
state.out[state.outPos + i] = packedWord;
|
||||
}
|
||||
state.outPos += howManyInts;
|
||||
}
|
||||
function computeExceptionBitmap(dataPointers) {
|
||||
let bitmap = 0;
|
||||
for (let k = 2; k <= MAX_BIT_WIDTH; k++) {
|
||||
if (dataPointers[k] !== 0) {
|
||||
bitmap |= k === MAX_BIT_WIDTH ? 0x80000000 : 1 << (k - 1);
|
||||
}
|
||||
}
|
||||
return bitmap;
|
||||
}
|
||||
function writeExceptionStreams(workspace, state) {
|
||||
const dataPointers = workspace.dataPointers;
|
||||
const dataToBePacked = workspace.dataToBePacked;
|
||||
const bitmap = computeExceptionBitmap(dataPointers);
|
||||
state.out = ensureInt32Capacity(state.out, state.outPos + 1);
|
||||
state.out[state.outPos++] = bitmap;
|
||||
for (let k = 2; k <= MAX_BIT_WIDTH; k++) {
|
||||
const size = dataPointers[k];
|
||||
if (size !== 0) {
|
||||
state.out = ensureInt32Capacity(state.out, state.outPos + 1);
|
||||
state.out[state.outPos++] = size | 0;
|
||||
let j = 0;
|
||||
for (; j < size; j += 32) {
|
||||
const exceptionValues = dataToBePacked[k];
|
||||
state.out = ensureInt32Capacity(state.out, state.outPos + k);
|
||||
fastPack32(exceptionValues, j, state.out, state.outPos, k);
|
||||
state.outPos += k;
|
||||
}
|
||||
const overflow = j - size;
|
||||
state.outPos -= (overflow * k) >>> 5;
|
||||
}
|
||||
}
|
||||
}
|
||||
function encodePage(inValues, thisSize, state, workspace) {
|
||||
const headerPos = state.outPos;
|
||||
state.out = ensureInt32Capacity(state.out, headerPos + 1);
|
||||
state.outPos = (state.outPos + 1) | 0;
|
||||
const dataPointers = workspace.dataPointers;
|
||||
dataPointers.fill(0);
|
||||
let byteContainerPos = 0;
|
||||
let tmpInPos = state.inPos;
|
||||
const finalInPos = tmpInPos + thisSize - BLOCK_SIZE;
|
||||
for (; tmpInPos <= finalInPos; tmpInPos += BLOCK_SIZE) {
|
||||
computeBestBitWidthPlan(inValues, tmpInPos, workspace);
|
||||
const bestBitWidthPlan = workspace.bestBitWidthPlan;
|
||||
const bitWidth = bestBitWidthPlan[0];
|
||||
const exceptionCount = bestBitWidthPlan[1];
|
||||
const maxBitWidth = bestBitWidthPlan[2];
|
||||
const exceptionBitWidth = exceptionCount > 0 ? maxBitWidth - bitWidth : 0;
|
||||
byteContainerPos = writeBlockHeader(workspace, byteContainerPos, bitWidth, exceptionCount, maxBitWidth);
|
||||
byteContainerPos = recordBlockExceptions(workspace, inValues, tmpInPos, bitWidth, exceptionCount, exceptionBitWidth, byteContainerPos);
|
||||
packBlock(inValues, tmpInPos, bitWidth, state);
|
||||
}
|
||||
const pageEndOutPos = state.outPos;
|
||||
state.inPos = tmpInPos;
|
||||
state.out[headerPos] = (pageEndOutPos - headerPos) | 0;
|
||||
const byteSize = byteContainerPos;
|
||||
byteContainerPos = padByteContainerToInt32(workspace, byteContainerPos);
|
||||
state.out = ensureInt32Capacity(state.out, state.outPos + 1);
|
||||
state.out[state.outPos++] = byteSize | 0;
|
||||
writeByteContainerInts(workspace, state, byteContainerPos);
|
||||
writeExceptionStreams(workspace, state);
|
||||
}
|
||||
function encodeAlignedPages(inValues, inLength, state, workspace) {
|
||||
const alignedLength = greatestMultiple(inLength, BLOCK_SIZE);
|
||||
const finalInPos = state.inPos + alignedLength;
|
||||
while (state.inPos !== finalInPos) {
|
||||
const thisSize = Math.min(PAGE_SIZE, finalInPos - state.inPos);
|
||||
encodePage(inValues, thisSize, state, workspace);
|
||||
}
|
||||
}
|
||||
function encode(inValues, inLength, state, workspace) {
|
||||
const alignedLength = greatestMultiple(inLength, BLOCK_SIZE);
|
||||
state.out = ensureInt32Capacity(state.out, state.outPos + 1);
|
||||
state.out[state.outPos++] = alignedLength;
|
||||
if (alignedLength === 0)
|
||||
return;
|
||||
encodeAlignedPages(inValues, alignedLength, state, workspace);
|
||||
}
|
||||
/**
|
||||
* VByte encoding for FastPFOR tail values (MSB=1 terminator).
|
||||
* Note: Inverts standard Protobuf Varint (MSB=0 terminator), so we cannot reuse generic methods.
|
||||
*/
|
||||
function encodeVByte(inValues, inLength, state, workspace) {
|
||||
if (inLength === 0)
|
||||
return;
|
||||
const requiredBytes = inLength * 5 + 3;
|
||||
workspace.byteContainer = ensureUint8Capacity(workspace.byteContainer, requiredBytes);
|
||||
const start = state.inPos;
|
||||
let bytePos = 0;
|
||||
for (let k = start; k < start + inLength; k++) {
|
||||
let value = inValues[k] >>> 0;
|
||||
while (value >= 0x80) {
|
||||
workspace.byteContainer[bytePos++] = value & 0x7f;
|
||||
value >>>= 7;
|
||||
}
|
||||
workspace.byteContainer[bytePos++] = (value | 0x80) & 0xff;
|
||||
}
|
||||
while ((bytePos & 3) !== 0)
|
||||
workspace.byteContainer[bytePos++] = 0;
|
||||
const intsToWrite = bytePos / 4;
|
||||
state.out = ensureInt32Capacity(state.out, state.outPos + intsToWrite);
|
||||
let outIdx = state.outPos;
|
||||
for (let i = 0; i < bytePos; i += 4) {
|
||||
const packedWord = workspace.byteContainer[i] |
|
||||
(workspace.byteContainer[i + 1] << 8) |
|
||||
(workspace.byteContainer[i + 2] << 16) |
|
||||
(workspace.byteContainer[i + 3] << 24) |
|
||||
0;
|
||||
state.out[outIdx++] = packedWord;
|
||||
}
|
||||
state.outPos = outIdx;
|
||||
state.inPos = (state.inPos + inLength) | 0;
|
||||
}
|
||||
/**
|
||||
* Encodes an int32 stream using the FastPFOR wire format (pages + VByte tail).
|
||||
*/
|
||||
export function encodeFastPforInt32WithWorkspace(values, workspace) {
|
||||
const state = { inPos: 0, outPos: 0, out: new Uint32Array(values.length + 1024) };
|
||||
encode(values, values.length, state, workspace);
|
||||
const remaining = values.length - state.inPos;
|
||||
encodeVByte(values, remaining, state, workspace);
|
||||
return state.out.subarray(0, state.outPos);
|
||||
}
|
||||
//# sourceMappingURL=fastPforEncoder.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/encoding/fastPforEncoder.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/encoding/fastPforEncoder.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
21
node_modules/@maplibre/mlt/dist/encoding/fsstEncoder.d.ts
generated
vendored
Normal file
21
node_modules/@maplibre/mlt/dist/encoding/fsstEncoder.d.ts
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
/**
|
||||
* Create symbol table from string array
|
||||
*
|
||||
* @param symbolStrings Array of symbol strings
|
||||
* @returns Symbol table buffer and lengths
|
||||
*/
|
||||
export declare function createSymbolTable(symbolStrings: string[]): {
|
||||
symbols: Uint8Array;
|
||||
symbolLengths: Uint32Array;
|
||||
};
|
||||
/**
|
||||
* Encode data using FSST compression with pre-defined symbol table
|
||||
* Encoder requires pre-defined symbol table. Real FSST learns optimal symbols from data. This
|
||||
* implementation is for testing decoder only.
|
||||
*
|
||||
* @param symbols Array of symbols, where each symbol can be between 1 and 8 bytes
|
||||
* @param symbolLengths Array of symbol lengths, length of each symbol in symbols array
|
||||
* @param uncompressedData Data to compress
|
||||
* @returns FSST compressed data, where each entry is an index to the symbols array
|
||||
*/
|
||||
export declare function encodeFsst(symbols: Uint8Array, symbolLengths: Uint32Array, uncompressedData: Uint8Array): Uint8Array;
|
||||
78
node_modules/@maplibre/mlt/dist/encoding/fsstEncoder.js
generated
vendored
Normal file
78
node_modules/@maplibre/mlt/dist/encoding/fsstEncoder.js
generated
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
/**
|
||||
* Create symbol table from string array
|
||||
*
|
||||
* @param symbolStrings Array of symbol strings
|
||||
* @returns Symbol table buffer and lengths
|
||||
*/
|
||||
export function createSymbolTable(symbolStrings) {
|
||||
const textEncoder = new TextEncoder();
|
||||
const symbolBuffers = symbolStrings.map((s) => textEncoder.encode(s));
|
||||
const symbolLengths = new Uint32Array(symbolBuffers.map((b) => b.length));
|
||||
const totalLength = symbolBuffers.reduce((sum, b) => sum + b.length, 0);
|
||||
const symbols = new Uint8Array(totalLength);
|
||||
let offset = 0;
|
||||
for (const buffer of symbolBuffers) {
|
||||
symbols.set(buffer, offset);
|
||||
offset += buffer.length;
|
||||
}
|
||||
return { symbols, symbolLengths };
|
||||
}
|
||||
/**
|
||||
* Encode data using FSST compression with pre-defined symbol table
|
||||
* Encoder requires pre-defined symbol table. Real FSST learns optimal symbols from data. This
|
||||
* implementation is for testing decoder only.
|
||||
*
|
||||
* @param symbols Array of symbols, where each symbol can be between 1 and 8 bytes
|
||||
* @param symbolLengths Array of symbol lengths, length of each symbol in symbols array
|
||||
* @param uncompressedData Data to compress
|
||||
* @returns FSST compressed data, where each entry is an index to the symbols array
|
||||
*/
|
||||
export function encodeFsst(symbols, symbolLengths, uncompressedData) {
|
||||
if (uncompressedData.length === 0) {
|
||||
return new Uint8Array(0);
|
||||
}
|
||||
// Calculate symbol offsets (cumulative sum of lengths)
|
||||
const symbolOffsets = new Array(symbolLengths.length).fill(0);
|
||||
for (let i = 1; i < symbolLengths.length; i++) {
|
||||
symbolOffsets[i] = symbolOffsets[i - 1] + symbolLengths[i - 1];
|
||||
}
|
||||
const result = [];
|
||||
let pos = 0;
|
||||
while (pos < uncompressedData.length) {
|
||||
let bestSymbolIndex = -1;
|
||||
let bestSymbolLength = 0;
|
||||
// Try to find longest matching symbol at current position
|
||||
for (let symbolIndex = 0; symbolIndex < symbolLengths.length; symbolIndex++) {
|
||||
const symbolLength = symbolLengths[symbolIndex];
|
||||
const symbolOffset = symbolOffsets[symbolIndex];
|
||||
// Check if symbol could fit and is longer than current best
|
||||
if (pos + symbolLength <= uncompressedData.length && symbolLength > bestSymbolLength) {
|
||||
// Check if bytes match
|
||||
let matches = true;
|
||||
for (let i = 0; i < symbolLength; i++) {
|
||||
if (symbols[symbolOffset + i] !== uncompressedData[pos + i]) {
|
||||
matches = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (matches) {
|
||||
bestSymbolIndex = symbolIndex;
|
||||
bestSymbolLength = symbolLength;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (bestSymbolIndex !== -1) {
|
||||
// Found a matching symbol
|
||||
result.push(bestSymbolIndex);
|
||||
pos += bestSymbolLength;
|
||||
}
|
||||
else {
|
||||
// No match - emit escape sequence (255 followed by literal byte)
|
||||
result.push(255);
|
||||
result.push(uncompressedData[pos]);
|
||||
pos++;
|
||||
}
|
||||
}
|
||||
return new Uint8Array(result);
|
||||
}
|
||||
//# sourceMappingURL=fsstEncoder.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/encoding/fsstEncoder.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/encoding/fsstEncoder.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
68
node_modules/@maplibre/mlt/dist/encoding/integerEncodingUtils.d.ts
generated
vendored
Normal file
68
node_modules/@maplibre/mlt/dist/encoding/integerEncodingUtils.d.ts
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
import IntWrapper from "../decoding/intWrapper";
|
||||
export declare function encodeVarintInt32Value(value: number, dst: Uint8Array, offset: IntWrapper): void;
|
||||
export declare function encodeVarintInt32(values: Uint32Array): Uint8Array;
|
||||
export declare function encodeVarintInt64(values: BigUint64Array): Uint8Array;
|
||||
export declare function encodeVarintFloat64(values: Float64Array): Uint8Array;
|
||||
export declare function encodeFastPfor(values: Uint32Array): Uint8Array;
|
||||
export declare function encodeZigZagInt32Value(value: number): number;
|
||||
export declare function encodeZigZagInt64Value(value: bigint): bigint;
|
||||
export declare function encodeZigZagFloat64Value(n: number): number;
|
||||
export declare function encodeZigZagInt32(data: Int32Array): Uint32Array;
|
||||
export declare function encodeZigZagInt64(data: BigInt64Array): BigUint64Array;
|
||||
export declare function encodeZigZagFloat64(data: Float64Array): void;
|
||||
export declare function encodeUnsignedRleInt32(input: Uint32Array): {
|
||||
data: Uint32Array;
|
||||
runs: number;
|
||||
};
|
||||
export declare function encodeUnsignedRleInt64(input: BigInt64Array): {
|
||||
data: BigUint64Array;
|
||||
runs: number;
|
||||
};
|
||||
export declare function encodeUnsignedRleFloat64(input: Float64Array): {
|
||||
data: Float64Array;
|
||||
runs: number;
|
||||
};
|
||||
export declare function encodeZigZagDeltaInt32(data: Int32Array): Uint32Array;
|
||||
export declare function encodeZigZagDeltaInt64(data: BigInt64Array): BigUint64Array;
|
||||
export declare function encodeZigZagDeltaFloat64(data: Float64Array): void;
|
||||
export declare function encodeZigZagRleInt32(input: Int32Array): {
|
||||
data: Uint32Array;
|
||||
runs: number;
|
||||
numTotalValues: number;
|
||||
};
|
||||
export declare function encodeZigZagRleInt64(input: BigInt64Array): {
|
||||
data: BigUint64Array;
|
||||
runs: number;
|
||||
numTotalValues: number;
|
||||
};
|
||||
export declare function encodeZigZagRleFloat64(input: Float64Array): {
|
||||
data: Float64Array;
|
||||
runs: number;
|
||||
numTotalValues: number;
|
||||
};
|
||||
/**
|
||||
* This is not really a encode, but more of a decode method...
|
||||
*/
|
||||
export declare function encodeDeltaInt32(data: Int32Array | Uint32Array): void;
|
||||
export declare function encodeComponentwiseDeltaVec2(data: Int32Array): Uint32Array;
|
||||
export declare function encodeComponentwiseDeltaVec2Scaled(data: Int32Array, scale: number): Uint32Array;
|
||||
export declare function encodeZigZagRleDeltaInt32(values: Int32Array | number[]): {
|
||||
data: Uint32Array;
|
||||
runs: number;
|
||||
numTotalValues: number;
|
||||
};
|
||||
export declare function encodeRleDeltaInt32(values: Uint32Array | number[]): {
|
||||
data: Uint32Array;
|
||||
runs: number;
|
||||
numTotalValues: number;
|
||||
};
|
||||
export declare function encodeDeltaRleInt32(input: Int32Array): {
|
||||
data: Uint32Array;
|
||||
runs: number;
|
||||
numValues: number;
|
||||
};
|
||||
export declare function encodeDeltaRleInt64(input: BigInt64Array): {
|
||||
data: BigUint64Array;
|
||||
runs: number;
|
||||
numValues: number;
|
||||
};
|
||||
671
node_modules/@maplibre/mlt/dist/encoding/integerEncodingUtils.js
generated
vendored
Normal file
671
node_modules/@maplibre/mlt/dist/encoding/integerEncodingUtils.js
generated
vendored
Normal file
@@ -0,0 +1,671 @@
|
||||
import IntWrapper from "../decoding/intWrapper";
|
||||
import { createFastPforEncoderWorkspace, encodeFastPforInt32WithWorkspace } from "./fastPforEncoder";
|
||||
import { encodeBigEndianInt32s } from "./bigEndianEncode";
|
||||
export function encodeVarintInt32Value(value, dst, offset) {
|
||||
let v = value;
|
||||
while (v > 0x7f) {
|
||||
dst[offset.get()] = (v & 0x7f) | 0x80;
|
||||
offset.increment();
|
||||
v >>>= 7;
|
||||
}
|
||||
dst[offset.get()] = v & 0x7f;
|
||||
offset.increment();
|
||||
}
|
||||
export function encodeVarintInt32(values) {
|
||||
const buffer = new Uint8Array(values.length * 5);
|
||||
const offset = new IntWrapper(0);
|
||||
for (const value of values) {
|
||||
encodeVarintInt32Value(value, buffer, offset);
|
||||
}
|
||||
return buffer.slice(0, offset.get());
|
||||
}
|
||||
export function encodeVarintInt64(values) {
|
||||
const buffer = new Uint8Array(values.length * 10);
|
||||
const offset = new IntWrapper(0);
|
||||
for (const value of values) {
|
||||
encodeVarintInt64Value(value, buffer, offset);
|
||||
}
|
||||
return buffer.slice(0, offset.get());
|
||||
}
|
||||
function encodeVarintInt64Value(value, dst, offset) {
|
||||
let v = value;
|
||||
while (v > 0x7fn) {
|
||||
dst[offset.get()] = Number(v & 0x7fn) | 0x80;
|
||||
offset.increment();
|
||||
v >>= 7n;
|
||||
}
|
||||
dst[offset.get()] = Number(v & 0x7fn);
|
||||
offset.increment();
|
||||
}
|
||||
export function encodeVarintFloat64(values) {
|
||||
// 1. Calculate the exact size required for the buffer
|
||||
let size = 0;
|
||||
for (let i = 0; i < values.length; i++) {
|
||||
let val = values[i];
|
||||
// Ensure we handle the value as a positive integer
|
||||
val = val < 0 ? 0 : Math.floor(val);
|
||||
// 0 always takes 1 byte
|
||||
if (val === 0) {
|
||||
size++;
|
||||
continue;
|
||||
}
|
||||
// Calculate bytes needed: ceil(log128(val + 1))
|
||||
while (val > 0) {
|
||||
size++;
|
||||
val = Math.floor(val / 128);
|
||||
}
|
||||
}
|
||||
const dst = new Uint8Array(size);
|
||||
const offset = new IntWrapper(0);
|
||||
for (let i = 0; i < values.length; i++) {
|
||||
encodeVarintFloat64Value(values[i], dst, offset);
|
||||
}
|
||||
return dst;
|
||||
}
|
||||
/**
|
||||
* Encodes a single number into the buffer at the given offset using Varint encoding.
|
||||
* Handles numbers up to 2^53 (MAX_SAFE_INTEGER) correctly.
|
||||
*/
|
||||
function encodeVarintFloat64Value(val, buf, offset) {
|
||||
// Ensure integer
|
||||
val = Math.floor(val);
|
||||
// Handle 0 explicitly or ensure loop runs once
|
||||
if (val === 0) {
|
||||
buf[offset.get()] = 0;
|
||||
offset.increment();
|
||||
return;
|
||||
}
|
||||
while (val >= 128) {
|
||||
// Write 7 bits of data | 0x80 (continuation bit)
|
||||
buf[offset.get()] = (val % 128) | 0x80;
|
||||
offset.increment();
|
||||
// Shift right by 7 bits
|
||||
val = Math.floor(val / 128);
|
||||
}
|
||||
// Write the last byte (no continuation bit)
|
||||
buf[offset.get()] = val;
|
||||
offset.increment();
|
||||
}
|
||||
export function encodeFastPfor(values) {
|
||||
const encoderWorkspace = createFastPforEncoderWorkspace();
|
||||
const encodedWords = encodeFastPforInt32WithWorkspace(values, encoderWorkspace);
|
||||
return encodeBigEndianInt32s(encodedWords);
|
||||
}
|
||||
export function encodeZigZagInt32Value(value) {
|
||||
return (value << 1) ^ (value >> 31);
|
||||
}
|
||||
export function encodeZigZagInt64Value(value) {
|
||||
return (value << 1n) ^ (value >> 63n);
|
||||
}
|
||||
export function encodeZigZagFloat64Value(n) {
|
||||
return n >= 0 ? n * 2 : n * -2 - 1;
|
||||
}
|
||||
export function encodeZigZagInt32(data) {
|
||||
const result = new Uint32Array(data.length);
|
||||
for (let i = 0; i < data.length; i++) {
|
||||
result[i] = encodeZigZagInt32Value(data[i]);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
export function encodeZigZagInt64(data) {
|
||||
const result = new BigUint64Array(data.length);
|
||||
for (let i = 0; i < data.length; i++) {
|
||||
result[i] = encodeZigZagInt64Value(data[i]);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
export function encodeZigZagFloat64(data) {
|
||||
for (let i = 0; i < data.length; i++) {
|
||||
data[i] = encodeZigZagFloat64Value(data[i]);
|
||||
}
|
||||
}
|
||||
export function encodeUnsignedRleInt32(input) {
|
||||
if (input.length === 0) {
|
||||
return { data: new Uint32Array(0), runs: 0 };
|
||||
}
|
||||
const runLengths = [];
|
||||
const runValues = [];
|
||||
let currentRunLength = 0;
|
||||
let currentValue = input[0];
|
||||
for (let i = 0; i < input.length; i++) {
|
||||
const nextValue = input[i];
|
||||
if (nextValue === currentValue) {
|
||||
currentRunLength++;
|
||||
}
|
||||
else {
|
||||
// End of the current run, record it
|
||||
runLengths.push(currentRunLength);
|
||||
runValues.push(currentValue);
|
||||
// Start a new run
|
||||
currentValue = nextValue;
|
||||
currentRunLength = 1;
|
||||
}
|
||||
}
|
||||
// Record the final run after the loop finishes
|
||||
runLengths.push(currentRunLength);
|
||||
runValues.push(currentValue);
|
||||
// Combine lengths and values into the final structured output array
|
||||
const numRuns = runLengths.length;
|
||||
const encodedData = new Uint32Array(numRuns * 2);
|
||||
// Populate the first half with lengths
|
||||
encodedData.set(runLengths, 0);
|
||||
// Populate the second half with values, offset by the total number of runs
|
||||
encodedData.set(runValues, numRuns);
|
||||
return { data: encodedData, runs: numRuns };
|
||||
}
|
||||
export function encodeUnsignedRleInt64(input) {
|
||||
if (input.length === 0) {
|
||||
return { data: new BigUint64Array(0), runs: 0 };
|
||||
}
|
||||
const runLengths = [];
|
||||
const runValues = [];
|
||||
let currentRunLength = 0;
|
||||
let currentValue = input[0];
|
||||
for (let i = 0; i < input.length; i++) {
|
||||
const nextValue = input[i];
|
||||
if (nextValue === currentValue) {
|
||||
currentRunLength++;
|
||||
}
|
||||
else {
|
||||
// End of the current run, record it
|
||||
runLengths.push(currentRunLength);
|
||||
runValues.push(currentValue);
|
||||
// Start a new run
|
||||
currentValue = nextValue;
|
||||
currentRunLength = 1;
|
||||
}
|
||||
}
|
||||
// Record the final run after the loop finishes
|
||||
runLengths.push(currentRunLength);
|
||||
runValues.push(currentValue);
|
||||
// Combine lengths and values into the final structured output array (BigUint64Array)
|
||||
const numRuns = runLengths.length;
|
||||
const encodedData = new BigUint64Array(numRuns * 2);
|
||||
// Populate the first half with lengths, converting the run length numbers to bigint for storage in the BigUint64Array.
|
||||
for (let i = 0; i < numRuns; i++) {
|
||||
encodedData[i] = BigInt(runLengths[i]);
|
||||
}
|
||||
// Populate the second half with values, offset by the total number of runs
|
||||
encodedData.set(runValues, numRuns);
|
||||
return { data: encodedData, runs: numRuns };
|
||||
}
|
||||
export function encodeUnsignedRleFloat64(input) {
|
||||
if (input.length === 0) {
|
||||
return { data: new Float64Array(0), runs: 0 };
|
||||
}
|
||||
const runLengths = [];
|
||||
const runValues = [];
|
||||
let currentRunLength = 0;
|
||||
let currentValue = input[0];
|
||||
for (let i = 0; i < input.length; i++) {
|
||||
const nextValue = input[i];
|
||||
if (nextValue === currentValue) {
|
||||
currentRunLength++;
|
||||
}
|
||||
else {
|
||||
// End of the current run, record it
|
||||
runLengths.push(currentRunLength);
|
||||
runValues.push(currentValue);
|
||||
// Start a new run
|
||||
currentValue = nextValue;
|
||||
currentRunLength = 1;
|
||||
}
|
||||
}
|
||||
// Record the final run after the loop finishes
|
||||
runLengths.push(currentRunLength);
|
||||
runValues.push(currentValue);
|
||||
// Combine lengths and values into the final structured output array (Float64Array)
|
||||
const numRuns = runLengths.length;
|
||||
// The final array is twice the size of the number of runs
|
||||
const encodedData = new Float64Array(numRuns * 2);
|
||||
// Populate the first half with lengths
|
||||
encodedData.set(runLengths, 0);
|
||||
// Populate the second half with values, offset by the total number of runs
|
||||
encodedData.set(runValues, numRuns);
|
||||
return { data: encodedData, runs: numRuns };
|
||||
}
|
||||
export function encodeZigZagDeltaInt32(data) {
|
||||
if (data.length === 0) {
|
||||
return new Uint32Array(0);
|
||||
}
|
||||
const encodedData = new Uint32Array(data.length);
|
||||
let previousValue = data[0];
|
||||
encodedData[0] = encodeZigZagInt32Value(previousValue);
|
||||
for (let i = 1; i < data.length; i++) {
|
||||
const currentValue = data[i];
|
||||
const delta = currentValue - previousValue;
|
||||
const encodedDelta = encodeZigZagInt32Value(delta);
|
||||
// Store the encoded delta back into the array
|
||||
encodedData[i] = encodedDelta;
|
||||
// Update the previous value tracker for the next iteration's delta calculation
|
||||
previousValue = currentValue;
|
||||
}
|
||||
return encodedData;
|
||||
}
|
||||
export function encodeZigZagDeltaInt64(data) {
|
||||
if (data.length === 0) {
|
||||
return new BigUint64Array(0);
|
||||
}
|
||||
const encodedData = new BigUint64Array(data.length);
|
||||
let previousValue = data[0];
|
||||
encodedData[0] = encodeZigZagInt64Value(previousValue);
|
||||
for (let i = 1; i < data.length; i++) {
|
||||
const currentValue = data[i];
|
||||
const delta = currentValue - previousValue;
|
||||
const encodedDelta = encodeZigZagInt64Value(delta);
|
||||
// Store the encoded delta back into the array
|
||||
encodedData[i] = encodedDelta;
|
||||
// Update the previous value tracker for the next iteration's delta calculation
|
||||
previousValue = currentValue;
|
||||
}
|
||||
return encodedData;
|
||||
}
|
||||
export function encodeZigZagDeltaFloat64(data) {
|
||||
if (data.length === 0) {
|
||||
return;
|
||||
}
|
||||
let previousValue = data[0];
|
||||
data[0] = encodeZigZagFloat64Value(previousValue);
|
||||
for (let i = 1; i < data.length; i++) {
|
||||
const currentValue = data[i];
|
||||
const delta = currentValue - previousValue;
|
||||
const encodedDelta = encodeZigZagFloat64Value(delta);
|
||||
// Store the encoded delta back into the array
|
||||
data[i] = encodedDelta;
|
||||
// Update the previous value tracker for the next iteration's delta calculation
|
||||
previousValue = currentValue;
|
||||
}
|
||||
}
|
||||
export function encodeZigZagRleInt32(input) {
|
||||
if (input.length === 0) {
|
||||
return { data: new Uint32Array(0), runs: 0, numTotalValues: 0 };
|
||||
}
|
||||
const zigzagEncodedStream = [];
|
||||
// Step 1: Apply Zigzag Encoding to all values
|
||||
for (let i = 0; i < input.length; i++) {
|
||||
zigzagEncodedStream.push(encodeZigZagInt32Value(input[i]));
|
||||
}
|
||||
// zigzagEncodedStream now holds the intermediate stream of zigzag values
|
||||
// Step 2: Apply RLE to the stream of zigzag-encoded values
|
||||
const runLengths = [];
|
||||
const runZigZagValues = [];
|
||||
let currentRunLength = 0;
|
||||
let currentValue = zigzagEncodedStream[0];
|
||||
for (let i = 0; i < zigzagEncodedStream.length; i++) {
|
||||
const nextValue = zigzagEncodedStream[i];
|
||||
if (nextValue === currentValue) {
|
||||
currentRunLength++;
|
||||
}
|
||||
else {
|
||||
runLengths.push(currentRunLength);
|
||||
runZigZagValues.push(currentValue);
|
||||
currentValue = nextValue;
|
||||
currentRunLength = 1;
|
||||
}
|
||||
}
|
||||
// Record the final run
|
||||
runLengths.push(currentRunLength);
|
||||
runZigZagValues.push(currentValue);
|
||||
// Step 3: Combine lengths and values into the final structured output array
|
||||
const numRuns = runLengths.length;
|
||||
// The final array uses Uint32Array for lengths AND values
|
||||
const encodedData = new Uint32Array(numRuns * 2);
|
||||
// Populate the first half with lengths
|
||||
encodedData.set(runLengths, 0);
|
||||
// Populate the second half with zigzagged values
|
||||
encodedData.set(runZigZagValues, numRuns);
|
||||
return {
|
||||
data: encodedData,
|
||||
runs: numRuns,
|
||||
numTotalValues: input.length, // Total original values count
|
||||
};
|
||||
}
|
||||
export function encodeZigZagRleInt64(input) {
|
||||
if (input.length === 0) {
|
||||
return { data: new BigUint64Array(0), runs: 0, numTotalValues: 0 };
|
||||
}
|
||||
const zigzagEncodedStream = [];
|
||||
// Step 1: Apply Zigzag Encoding to all values
|
||||
for (let i = 0; i < input.length; i++) {
|
||||
zigzagEncodedStream.push(encodeZigZagInt64Value(input[i]));
|
||||
}
|
||||
// zigzagEncodedStream now holds the intermediate stream of zigzag values
|
||||
// Step 2: Apply RLE to the stream of zigzag-encoded values
|
||||
const runLengths = [];
|
||||
const runZigZagValues = [];
|
||||
let currentRunLength = 0;
|
||||
let currentValue = zigzagEncodedStream[0];
|
||||
for (let i = 0; i < zigzagEncodedStream.length; i++) {
|
||||
const nextValue = zigzagEncodedStream[i];
|
||||
if (nextValue === currentValue) {
|
||||
currentRunLength++;
|
||||
}
|
||||
else {
|
||||
runLengths.push(currentRunLength);
|
||||
runZigZagValues.push(currentValue);
|
||||
currentValue = nextValue;
|
||||
currentRunLength = 1;
|
||||
}
|
||||
}
|
||||
// Record the final run
|
||||
runLengths.push(currentRunLength);
|
||||
runZigZagValues.push(currentValue);
|
||||
// Step 3: Combine lengths and values into the final structured output array
|
||||
const numRuns = runLengths.length;
|
||||
// The final array uses BigUint64Array for lengths AND values
|
||||
const encodedData = new BigUint64Array(numRuns * 2);
|
||||
// Populate the first half with lengths (converting numbers back to BigUint64Array format)
|
||||
for (let i = 0; i < numRuns; i++) {
|
||||
encodedData[i] = BigInt(runLengths[i]);
|
||||
}
|
||||
// Populate the second half with zigzagged values
|
||||
encodedData.set(runZigZagValues, numRuns);
|
||||
return {
|
||||
data: encodedData,
|
||||
runs: numRuns,
|
||||
numTotalValues: input.length, // Total original values count
|
||||
};
|
||||
}
|
||||
export function encodeZigZagRleFloat64(input) {
|
||||
if (input.length === 0) {
|
||||
return { data: new Float64Array(0), runs: 0, numTotalValues: 0 };
|
||||
}
|
||||
const zigzagEncodedStream = [];
|
||||
// Step 1: Apply Float-based Zigzag Encoding to all values
|
||||
for (let i = 0; i < input.length; i++) {
|
||||
zigzagEncodedStream.push(encodeZigZagFloat64Value(input[i]));
|
||||
}
|
||||
// zigzagEncodedStream now holds the intermediate stream of zigzag values (as floats acting as integers)
|
||||
// Step 2: Apply RLE to the stream of zigzag-encoded values
|
||||
const runLengths = [];
|
||||
const runZigZagValues = [];
|
||||
let currentRunLength = 0;
|
||||
let currentValue = zigzagEncodedStream[0];
|
||||
for (let i = 0; i < zigzagEncodedStream.length; i++) {
|
||||
const nextValue = zigzagEncodedStream[i];
|
||||
if (nextValue === currentValue) {
|
||||
currentRunLength++;
|
||||
}
|
||||
else {
|
||||
runLengths.push(currentRunLength);
|
||||
runZigZagValues.push(currentValue);
|
||||
currentValue = nextValue;
|
||||
currentRunLength = 1;
|
||||
}
|
||||
}
|
||||
// Record the final run
|
||||
runLengths.push(currentRunLength);
|
||||
runZigZagValues.push(currentValue);
|
||||
// Step 3: Combine lengths and values into the final structured output array
|
||||
const numRuns = runLengths.length;
|
||||
// The final array uses Float64Array for lengths AND values
|
||||
const encodedData = new Float64Array(numRuns * 2);
|
||||
// Populate the first half with lengths
|
||||
encodedData.set(runLengths, 0);
|
||||
// Populate the second half with zigzagged values
|
||||
encodedData.set(runZigZagValues, numRuns);
|
||||
return {
|
||||
data: encodedData,
|
||||
runs: numRuns,
|
||||
numTotalValues: input.length, // Total original values count
|
||||
};
|
||||
}
|
||||
/**
|
||||
* This is not really a encode, but more of a decode method...
|
||||
*/
|
||||
export function encodeDeltaInt32(data) {
|
||||
if (data.length === 0) {
|
||||
return;
|
||||
}
|
||||
for (let i = data.length - 1; i >= 1; i--) {
|
||||
data[i] = data[i] - data[i - 1];
|
||||
}
|
||||
}
|
||||
export function encodeComponentwiseDeltaVec2(data) {
|
||||
if (data.length < 2)
|
||||
return new Uint32Array(data);
|
||||
const encoded = new Uint32Array(data.length);
|
||||
// Reverse iterate to avoid overwriting data needed for delta computation
|
||||
for (let i = data.length - 2; i >= 2; i -= 2) {
|
||||
const deltaX = data[i] - data[i - 2];
|
||||
const deltaY = data[i + 1] - data[i - 1];
|
||||
encoded[i] = encodeZigZagInt32Value(deltaX);
|
||||
encoded[i + 1] = encodeZigZagInt32Value(deltaY);
|
||||
}
|
||||
// Encode first vertex last (after computing all deltas)
|
||||
encoded[0] = encodeZigZagInt32Value(data[0]);
|
||||
encoded[1] = encodeZigZagInt32Value(data[1]);
|
||||
return encoded;
|
||||
}
|
||||
export function encodeComponentwiseDeltaVec2Scaled(data, scale) {
|
||||
if (data.length < 2)
|
||||
return new Uint32Array(data);
|
||||
const encoded = new Uint32Array(data.length);
|
||||
// First, inverse scale all values (tile space -> original space)
|
||||
for (let i = 0; i < data.length; i++) {
|
||||
encoded[i] = Math.round(data[i] / scale);
|
||||
}
|
||||
// Then apply componentwise delta encoding (same as non-scaled version)
|
||||
// Reverse iterate to avoid overwriting data needed for delta computation
|
||||
for (let i = encoded.length - 2; i >= 2; i -= 2) {
|
||||
const deltaX = encoded[i] - encoded[i - 2];
|
||||
const deltaY = encoded[i + 1] - encoded[i - 1];
|
||||
encoded[i] = encodeZigZagInt32Value(deltaX);
|
||||
encoded[i + 1] = encodeZigZagInt32Value(deltaY);
|
||||
}
|
||||
// Encode first vertex last (after computing all deltas)
|
||||
encoded[0] = encodeZigZagInt32Value(encoded[0]);
|
||||
encoded[1] = encodeZigZagInt32Value(encoded[1]);
|
||||
return encoded;
|
||||
}
|
||||
// HM TODO:
|
||||
// zigZagDeltaOfDeltaDecoding
|
||||
export function encodeZigZagRleDeltaInt32(values) {
|
||||
if (values.length === 0) {
|
||||
return { data: new Uint32Array(0), runs: 0, numTotalValues: 0 };
|
||||
}
|
||||
const runLengths = [];
|
||||
const encodedDeltas = [];
|
||||
// The decoder explicitly sets decodedValues[0] = 0 and uses previousValue = 0.
|
||||
// Therefore, we initialize our 'previous' tracker to 0 to calculate the first delta correctly.
|
||||
let previousValue = 0;
|
||||
// Variables to track the current run
|
||||
let currentDelta = null;
|
||||
let currentRunLength = 0;
|
||||
for (let i = 0; i < values.length; i++) {
|
||||
const value = values[i];
|
||||
const delta = value - previousValue;
|
||||
previousValue = value;
|
||||
if (currentDelta === null) {
|
||||
// First element initialization
|
||||
currentDelta = delta;
|
||||
currentRunLength = 1;
|
||||
}
|
||||
else if (delta === currentDelta) {
|
||||
// Continuation of the current run
|
||||
currentRunLength++;
|
||||
}
|
||||
else {
|
||||
// The run has broken (delta changed)
|
||||
// 1. Push the length of the previous run
|
||||
runLengths.push(currentRunLength);
|
||||
// 2. ZigZag encode the previous delta and push it
|
||||
encodedDeltas.push(encodeZigZagInt32Value(currentDelta));
|
||||
// Start the new run
|
||||
currentDelta = delta;
|
||||
currentRunLength = 1;
|
||||
}
|
||||
}
|
||||
// Flush the final run remaining after the loop finishes
|
||||
if (currentDelta !== null) {
|
||||
runLengths.push(currentRunLength);
|
||||
encodedDeltas.push(encodeZigZagInt32Value(currentDelta));
|
||||
}
|
||||
const numRuns = runLengths.length;
|
||||
// The decoder expects 'data' to be: [RunLength 1, RunLength 2... | Value 1, Value 2...]
|
||||
// Size is numRuns * 2 (First half lengths, second half values)
|
||||
const data = new Uint32Array(numRuns * 2);
|
||||
for (let i = 0; i < numRuns; i++) {
|
||||
data[i] = runLengths[i]; // First half: Run Lengths
|
||||
data[i + numRuns] = encodedDeltas[i]; // Second half: ZigZag Encoded Deltas
|
||||
}
|
||||
return {
|
||||
data: data,
|
||||
runs: numRuns,
|
||||
numTotalValues: values.length,
|
||||
};
|
||||
}
|
||||
export function encodeRleDeltaInt32(values) {
|
||||
if (values.length === 0) {
|
||||
return { data: new Uint32Array(0), runs: 0, numTotalValues: 0 };
|
||||
}
|
||||
const runLengths = [];
|
||||
const deltas = [];
|
||||
// The decoder logic relies on: decodedValues[0] = 0; previousValue = 0;
|
||||
// So the encoder must assume the sequence starts relative to 0.
|
||||
let previousValue = 0;
|
||||
// Track the current run of deltas
|
||||
let currentDelta = null;
|
||||
let currentRunLength = 0;
|
||||
for (let i = 0; i < values.length; i++) {
|
||||
const value = values[i];
|
||||
const delta = value - previousValue;
|
||||
previousValue = value;
|
||||
if (currentDelta === null) {
|
||||
// Initialize first run
|
||||
currentDelta = delta;
|
||||
currentRunLength = 1;
|
||||
}
|
||||
else if (delta === currentDelta) {
|
||||
// Continue current run
|
||||
currentRunLength++;
|
||||
}
|
||||
else {
|
||||
// Delta changed: flush the previous run
|
||||
runLengths.push(currentRunLength);
|
||||
deltas.push(currentDelta);
|
||||
// Start new run
|
||||
currentDelta = delta;
|
||||
currentRunLength = 1;
|
||||
}
|
||||
}
|
||||
// Flush the final run
|
||||
if (currentDelta !== null) {
|
||||
runLengths.push(currentRunLength);
|
||||
deltas.push(currentDelta);
|
||||
}
|
||||
const numRuns = runLengths.length;
|
||||
// Pack into Uint32Array: [ RunLength 1...N | Delta 1...N ]
|
||||
const data = new Uint32Array(numRuns * 2);
|
||||
for (let i = 0; i < numRuns; i++) {
|
||||
data[i] = runLengths[i];
|
||||
data[i + numRuns] = deltas[i];
|
||||
}
|
||||
return {
|
||||
data: data,
|
||||
runs: numRuns,
|
||||
numTotalValues: values.length,
|
||||
};
|
||||
}
|
||||
export function encodeDeltaRleInt32(input) {
|
||||
if (input.length === 0) {
|
||||
return { data: new Uint32Array(0), runs: 0, numValues: 0 };
|
||||
}
|
||||
const deltasAndEncoded = [];
|
||||
let previousValue = 0;
|
||||
// Step 1 & 2: Calculate Deltas and Zigzag Encode them
|
||||
for (let i = 0; i < input.length; i++) {
|
||||
const currentValue = input[i];
|
||||
const delta = currentValue - previousValue;
|
||||
const encodedDelta = encodeZigZagInt32Value(delta);
|
||||
deltasAndEncoded.push(encodedDelta);
|
||||
previousValue = currentValue;
|
||||
}
|
||||
// deltasAndEncoded now holds the intermediate stream of zigzagged deltas
|
||||
// Step 3: Apply RLE to the stream of zigzag-encoded deltas
|
||||
const runLengths = [];
|
||||
const runZigZagDeltas = [];
|
||||
let currentRunLength = 0;
|
||||
let currentRunValue = deltasAndEncoded[0];
|
||||
for (let i = 0; i < deltasAndEncoded.length; i++) {
|
||||
const nextValue = deltasAndEncoded[i];
|
||||
if (nextValue === currentRunValue) {
|
||||
currentRunLength++;
|
||||
}
|
||||
else {
|
||||
runLengths.push(currentRunLength);
|
||||
runZigZagDeltas.push(currentRunValue);
|
||||
currentRunValue = nextValue;
|
||||
currentRunLength = 1;
|
||||
}
|
||||
}
|
||||
// Record the final run
|
||||
runLengths.push(currentRunLength);
|
||||
runZigZagDeltas.push(currentRunValue);
|
||||
// Step 4: Combine lengths and values into the final structured output array
|
||||
const numRuns = runLengths.length;
|
||||
const encodedData = new Uint32Array(numRuns * 2);
|
||||
// Populate the first half with lengths
|
||||
for (let i = 0; i < numRuns; i++) {
|
||||
encodedData[i] = runLengths[i];
|
||||
}
|
||||
// Populate the second half with zigzagged deltas
|
||||
// Uint32Array.set() works with standard number arrays
|
||||
encodedData.set(runZigZagDeltas, numRuns);
|
||||
return {
|
||||
data: encodedData,
|
||||
runs: numRuns,
|
||||
numValues: input.length, // Total original values count
|
||||
};
|
||||
}
|
||||
export function encodeDeltaRleInt64(input) {
|
||||
if (input.length === 0) {
|
||||
return { data: new BigUint64Array(0), runs: 0, numValues: 0 };
|
||||
}
|
||||
const deltasAndEncoded = [];
|
||||
let previousValue = 0n;
|
||||
// Step 1 & 2: Calculate Deltas and Zigzag Encode them
|
||||
for (let i = 0; i < input.length; i++) {
|
||||
const currentValue = input[i];
|
||||
const delta = currentValue - previousValue;
|
||||
const encodedDelta = encodeZigZagInt64Value(delta);
|
||||
deltasAndEncoded.push(encodedDelta);
|
||||
previousValue = currentValue;
|
||||
}
|
||||
// deltasAndEncoded now holds the intermediate stream of zigzagged deltas
|
||||
// Step 3: Apply RLE to the stream of zigzag-encoded deltas
|
||||
const runLengths = [];
|
||||
const runZigZagDeltas = [];
|
||||
let currentRunLength = 0;
|
||||
let currentValue = deltasAndEncoded[0];
|
||||
for (let i = 0; i < deltasAndEncoded.length; i++) {
|
||||
const nextValue = deltasAndEncoded[i];
|
||||
if (nextValue === currentValue) {
|
||||
currentRunLength++;
|
||||
}
|
||||
else {
|
||||
runLengths.push(currentRunLength);
|
||||
runZigZagDeltas.push(currentValue);
|
||||
currentValue = nextValue;
|
||||
currentRunLength = 1;
|
||||
}
|
||||
}
|
||||
// Record the final run
|
||||
runLengths.push(currentRunLength);
|
||||
runZigZagDeltas.push(currentValue);
|
||||
// Step 4: Combine lengths and values into the final structured output array
|
||||
const numRuns = runLengths.length;
|
||||
const encodedData = new BigUint64Array(numRuns * 2);
|
||||
// Populate the first half with lengths (converting numbers back to BigUint64Array for storage)
|
||||
for (let i = 0; i < numRuns; i++) {
|
||||
encodedData[i] = BigInt(runLengths[i]);
|
||||
}
|
||||
// Populate the second half with zigzagged deltas
|
||||
encodedData.set(runZigZagDeltas, numRuns);
|
||||
return {
|
||||
data: encodedData,
|
||||
runs: numRuns,
|
||||
numValues: input.length, // Total original values count
|
||||
};
|
||||
}
|
||||
//# sourceMappingURL=integerEncodingUtils.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/encoding/integerEncodingUtils.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/encoding/integerEncodingUtils.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
28
node_modules/@maplibre/mlt/dist/encoding/integerStreamEncoder.d.ts
generated
vendored
Normal file
28
node_modules/@maplibre/mlt/dist/encoding/integerStreamEncoder.d.ts
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
import type { StreamMetadata } from "../metadata/tile/streamMetadataDecoder";
|
||||
import type BitVector from "../vector/flat/bitVector";
|
||||
import type GeometryScaling from "../decoding/geometryScaling";
|
||||
export declare function encodeSignedInt32Stream(values: Int32Array, metadata: StreamMetadata, bitVector?: BitVector, scalingData?: GeometryScaling): Uint8Array;
|
||||
export declare function encodeUnsignedInt32Stream(values: Uint32Array, metadata: StreamMetadata, bitVector?: BitVector, scalingData?: GeometryScaling): Uint8Array;
|
||||
export declare function encodeFloat64(values: Float64Array, streamMetadata: StreamMetadata, isSigned: boolean): Float64Array;
|
||||
/**
|
||||
* Encodes BigInt64 values with zigzag encoding and varint compression
|
||||
*/
|
||||
export declare function encodeInt64SignedNone(values: BigInt64Array): Uint8Array;
|
||||
/**
|
||||
* Encodes BigInt64 values with delta encoding, zigzag, and varint
|
||||
*/
|
||||
export declare function encodeInt64SignedDelta(values: BigInt64Array): Uint8Array;
|
||||
/**
|
||||
* Encodes BigInt64 values with RLE, zigzag, and varint
|
||||
* @param runs - Array of [runLength, value] pairs
|
||||
*/
|
||||
export declare function encodeInt64SignedRle(runs: Array<[number, bigint]>): Uint8Array;
|
||||
/**
|
||||
* Encodes BigInt64 values with delta+RLE, zigzag, and varint
|
||||
* @param runs - Array of [runLength, deltaValue] pairs representing RLE-encoded delta values
|
||||
*/
|
||||
export declare function encodeInt64SignedDeltaRle(runs: Array<[number, bigint]>): Uint8Array;
|
||||
/**
|
||||
* Encodes unsigned BigInt64 values with varint compression (no zigzag)
|
||||
*/
|
||||
export declare function encodeInt64UnsignedNone(values: BigInt64Array): Uint8Array;
|
||||
173
node_modules/@maplibre/mlt/dist/encoding/integerStreamEncoder.js
generated
vendored
Normal file
173
node_modules/@maplibre/mlt/dist/encoding/integerStreamEncoder.js
generated
vendored
Normal file
@@ -0,0 +1,173 @@
|
||||
import { LogicalLevelTechnique } from "../metadata/tile/logicalLevelTechnique";
|
||||
import { encodeDeltaRleInt32, encodeZigZagInt32, encodeZigZagRleInt32, encodeUnsignedRleInt32, encodeDeltaInt32, encodeUnsignedRleFloat64, encodeZigZagDeltaFloat64, encodeZigZagFloat64, encodeZigZagRleFloat64, encodeVarintInt32, encodeVarintInt64, encodeZigZagInt64Value, encodeFastPfor, encodeComponentwiseDeltaVec2, encodeComponentwiseDeltaVec2Scaled, encodeZigZagDeltaInt32, } from "./integerEncodingUtils";
|
||||
import { packNullable } from "./packNullableUtils";
|
||||
import { PhysicalLevelTechnique } from "../metadata/tile/physicalLevelTechnique";
|
||||
export function encodeSignedInt32Stream(values, metadata, bitVector, scalingData) {
|
||||
const { data } = encodeSignedInt32(values, metadata, bitVector, scalingData);
|
||||
return encodePhysicalLevelTechnique(data, metadata);
|
||||
}
|
||||
export function encodeUnsignedInt32Stream(values, metadata, bitVector, scalingData) {
|
||||
const { data } = encodeUnsignedInt32(values, metadata, bitVector, scalingData);
|
||||
return encodePhysicalLevelTechnique(data, metadata);
|
||||
}
|
||||
function encodePhysicalLevelTechnique(data, streamMetadata) {
|
||||
const physicalLevelTechnique = streamMetadata.physicalLevelTechnique;
|
||||
if (physicalLevelTechnique === PhysicalLevelTechnique.FAST_PFOR) {
|
||||
return encodeFastPfor(data);
|
||||
}
|
||||
if (physicalLevelTechnique === PhysicalLevelTechnique.VARINT) {
|
||||
return encodeVarintInt32(data);
|
||||
}
|
||||
if (physicalLevelTechnique === PhysicalLevelTechnique.NONE) {
|
||||
const slice = data.subarray(0, streamMetadata.byteLength);
|
||||
return new Uint8Array(slice);
|
||||
}
|
||||
throw new Error("Specified physicalLevelTechnique is not supported (yet).");
|
||||
}
|
||||
function encodeSignedInt32(values, streamMetadata, bitVector, scalingData) {
|
||||
values = bitVector ? packNullable(values, bitVector) : new Int32Array(values);
|
||||
let data;
|
||||
switch (streamMetadata.logicalLevelTechnique1) {
|
||||
case LogicalLevelTechnique.DELTA:
|
||||
if (streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.RLE) {
|
||||
const encoded = encodeDeltaRleInt32(values);
|
||||
return { data: encoded.data, runs: encoded.runs };
|
||||
}
|
||||
else {
|
||||
data = encodeZigZagDeltaInt32(values);
|
||||
return { data };
|
||||
}
|
||||
case LogicalLevelTechnique.RLE: {
|
||||
const encoded = encodeZigZagRleInt32(values);
|
||||
return { data: encoded.data, runs: encoded.runs };
|
||||
}
|
||||
case LogicalLevelTechnique.MORTON:
|
||||
encodeDeltaInt32(values);
|
||||
data = new Uint32Array(values);
|
||||
return { data };
|
||||
case LogicalLevelTechnique.COMPONENTWISE_DELTA:
|
||||
if (scalingData && !bitVector) {
|
||||
const data = encodeComponentwiseDeltaVec2Scaled(values, scalingData.scale);
|
||||
return { data };
|
||||
}
|
||||
data = encodeComponentwiseDeltaVec2(values);
|
||||
return { data };
|
||||
case LogicalLevelTechnique.NONE:
|
||||
data = encodeZigZagInt32(values);
|
||||
return { data };
|
||||
default:
|
||||
throw new Error(`The specified Logical level technique is not supported: ${streamMetadata.logicalLevelTechnique1}`);
|
||||
}
|
||||
}
|
||||
function encodeUnsignedInt32(values, streamMetadata, bitVector, scalingData) {
|
||||
values = bitVector ? packNullable(values, bitVector) : new Uint32Array(values);
|
||||
let data;
|
||||
switch (streamMetadata.logicalLevelTechnique1) {
|
||||
case LogicalLevelTechnique.DELTA:
|
||||
if (streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.RLE) {
|
||||
const encoded = encodeDeltaRleInt32(new Int32Array(values.buffer, values.byteOffset, values.length));
|
||||
return { data: encoded.data, runs: encoded.runs };
|
||||
}
|
||||
data = encodeZigZagDeltaInt32(new Int32Array(values.buffer, values.byteOffset, values.length));
|
||||
return { data };
|
||||
case LogicalLevelTechnique.RLE: {
|
||||
const encoded = encodeUnsignedRleInt32(values);
|
||||
return { data: encoded.data, runs: encoded.runs };
|
||||
}
|
||||
case LogicalLevelTechnique.MORTON:
|
||||
encodeDeltaInt32(values);
|
||||
data = values;
|
||||
return { data };
|
||||
case LogicalLevelTechnique.COMPONENTWISE_DELTA:
|
||||
if (scalingData && !bitVector) {
|
||||
const data = encodeComponentwiseDeltaVec2Scaled(new Int32Array(values), scalingData.scale);
|
||||
return { data };
|
||||
}
|
||||
data = encodeComponentwiseDeltaVec2(new Int32Array(values));
|
||||
return { data };
|
||||
case LogicalLevelTechnique.NONE:
|
||||
data = values;
|
||||
return { data };
|
||||
default:
|
||||
throw new Error(`The specified Logical level technique is not supported: ${streamMetadata.logicalLevelTechnique1}`);
|
||||
}
|
||||
}
|
||||
export function encodeFloat64(values, streamMetadata, isSigned) {
|
||||
switch (streamMetadata.logicalLevelTechnique1) {
|
||||
case LogicalLevelTechnique.DELTA:
|
||||
encodeZigZagDeltaFloat64(values);
|
||||
if (streamMetadata.logicalLevelTechnique2 === LogicalLevelTechnique.RLE) {
|
||||
values = encodeUnsignedRleFloat64(values).data;
|
||||
}
|
||||
return values;
|
||||
case LogicalLevelTechnique.RLE:
|
||||
return encodeRleFloat64(values, isSigned);
|
||||
case LogicalLevelTechnique.NONE:
|
||||
if (isSigned) {
|
||||
encodeZigZagFloat64(values);
|
||||
}
|
||||
return values;
|
||||
default:
|
||||
throw new Error(`The specified Logical level technique is not supported: ${streamMetadata.logicalLevelTechnique1}`);
|
||||
}
|
||||
}
|
||||
function encodeRleFloat64(data, isSigned) {
|
||||
return isSigned ? encodeZigZagRleFloat64(data).data : encodeUnsignedRleFloat64(data).data;
|
||||
}
|
||||
/**
|
||||
* Encodes BigInt64 values with zigzag encoding and varint compression
|
||||
*/
|
||||
export function encodeInt64SignedNone(values) {
|
||||
const zigzagEncoded = new BigUint64Array(Array.from(values, (val) => encodeZigZagInt64Value(val)));
|
||||
return encodeVarintInt64(zigzagEncoded);
|
||||
}
|
||||
/**
|
||||
* Encodes BigInt64 values with delta encoding, zigzag, and varint
|
||||
*/
|
||||
export function encodeInt64SignedDelta(values) {
|
||||
const deltaEncoded = new BigInt64Array(values.length);
|
||||
deltaEncoded[0] = values[0];
|
||||
for (let i = 1; i < values.length; i++) {
|
||||
deltaEncoded[i] = values[i] - values[i - 1];
|
||||
}
|
||||
const zigzagEncoded = new BigUint64Array(deltaEncoded.length);
|
||||
for (let i = 0; i < deltaEncoded.length; i++) {
|
||||
zigzagEncoded[i] = encodeZigZagInt64Value(deltaEncoded[i]);
|
||||
}
|
||||
return encodeVarintInt64(zigzagEncoded);
|
||||
}
|
||||
/**
|
||||
* Encodes BigInt64 values with RLE, zigzag, and varint
|
||||
* @param runs - Array of [runLength, value] pairs
|
||||
*/
|
||||
export function encodeInt64SignedRle(runs) {
|
||||
const runLengths = [];
|
||||
const values = [];
|
||||
for (const [runLength, value] of runs) {
|
||||
runLengths.push(BigInt(runLength));
|
||||
values.push(encodeZigZagInt64Value(value));
|
||||
}
|
||||
const rleValues = [...runLengths, ...values];
|
||||
return encodeVarintInt64(new BigUint64Array(rleValues));
|
||||
}
|
||||
/**
|
||||
* Encodes BigInt64 values with delta+RLE, zigzag, and varint
|
||||
* @param runs - Array of [runLength, deltaValue] pairs representing RLE-encoded delta values
|
||||
*/
|
||||
export function encodeInt64SignedDeltaRle(runs) {
|
||||
const runLengths = [];
|
||||
const values = [];
|
||||
for (const [runLength, value] of runs) {
|
||||
runLengths.push(BigInt(runLength));
|
||||
values.push(encodeZigZagInt64Value(value));
|
||||
}
|
||||
const rleValues = [...runLengths, ...values];
|
||||
return encodeVarintInt64(new BigUint64Array(rleValues));
|
||||
}
|
||||
/**
|
||||
* Encodes unsigned BigInt64 values with varint compression (no zigzag)
|
||||
*/
|
||||
export function encodeInt64UnsignedNone(values) {
|
||||
return encodeVarintInt64(new BigUint64Array(values));
|
||||
}
|
||||
//# sourceMappingURL=integerStreamEncoder.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/encoding/integerStreamEncoder.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/encoding/integerStreamEncoder.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
4
node_modules/@maplibre/mlt/dist/encoding/packNullableUtils.d.ts
generated
vendored
Normal file
4
node_modules/@maplibre/mlt/dist/encoding/packNullableUtils.d.ts
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
import type { TypedArrayInstance } from "../decoding/unpackNullableUtils";
|
||||
import BitVector from "../vector/flat/bitVector";
|
||||
export declare function packNullable<T extends TypedArrayInstance>(data: T, presentBits: BitVector | null): T;
|
||||
export declare function packNullableBoolean(data: Uint8Array, dataSize: number, presentBits: BitVector | null): Uint8Array;
|
||||
55
node_modules/@maplibre/mlt/dist/encoding/packNullableUtils.js
generated
vendored
Normal file
55
node_modules/@maplibre/mlt/dist/encoding/packNullableUtils.js
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
import BitVector from "../vector/flat/bitVector";
|
||||
export function packNullable(data, presentBits) {
|
||||
// Non-nullable case: if no mask is provided, the data is already "packed"
|
||||
if (!presentBits) {
|
||||
return data;
|
||||
}
|
||||
const size = data.length;
|
||||
// 1. First pass: Count how many elements are actually present
|
||||
// This is required to allocate the correct size for the TypedArray
|
||||
let packedCount = 0;
|
||||
for (let i = 0; i < size; i++) {
|
||||
if (presentBits.get(i)) {
|
||||
packedCount++;
|
||||
}
|
||||
}
|
||||
// 2. Create a new array of the same type with the reduced size
|
||||
const constructor = data.constructor;
|
||||
const result = new constructor(packedCount);
|
||||
// 3. Second pass: Fill the result array with valid values
|
||||
let counter = 0;
|
||||
for (let i = 0; i < size; i++) {
|
||||
if (presentBits.get(i)) {
|
||||
result[counter++] = data[i];
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
export function packNullableBoolean(data, dataSize, presentBits) {
|
||||
// Non-nullable case: if no mask is provided, the data is already "packed"
|
||||
if (!presentBits) {
|
||||
return data;
|
||||
}
|
||||
const inputBitVector = new BitVector(data, dataSize);
|
||||
// 1. Calculate how many bits are actually marked as 'present'
|
||||
// This determines the size of the final packed buffer.
|
||||
let packedCount = 0;
|
||||
for (let i = 0; i < dataSize; i++) {
|
||||
if (presentBits.get(i)) {
|
||||
packedCount++;
|
||||
}
|
||||
}
|
||||
// 2. Initialize the result BitVector with the correct compressed size
|
||||
const resultBuffer = new Uint8Array(Math.ceil(packedCount / 8));
|
||||
const resultBitVector = new BitVector(resultBuffer, packedCount);
|
||||
// 3. Fill the result: only copy bits where the mask is true
|
||||
let targetIndex = 0;
|
||||
for (let i = 0; i < dataSize; i++) {
|
||||
if (presentBits.get(i)) {
|
||||
const value = inputBitVector.get(i);
|
||||
resultBitVector.set(targetIndex++, value);
|
||||
}
|
||||
}
|
||||
return resultBitVector.getBuffer();
|
||||
}
|
||||
//# sourceMappingURL=packNullableUtils.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/encoding/packNullableUtils.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/encoding/packNullableUtils.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"packNullableUtils.js","sourceRoot":"","sources":["../../src/encoding/packNullableUtils.ts"],"names":[],"mappings":"AACA,OAAO,SAAS,MAAM,0BAA0B,CAAC;AAEjD,MAAM,UAAU,YAAY,CAA+B,IAAO,EAAE,WAA6B;IAC7F,0EAA0E;IAC1E,IAAI,CAAC,WAAW,EAAE,CAAC;QACf,OAAO,IAAI,CAAC;IAChB,CAAC;IAED,MAAM,IAAI,GAAG,IAAI,CAAC,MAAM,CAAC;IAEzB,8DAA8D;IAC9D,mEAAmE;IACnE,IAAI,WAAW,GAAG,CAAC,CAAC;IACpB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,EAAE,CAAC,EAAE,EAAE,CAAC;QAC5B,IAAI,WAAW,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC;YACrB,WAAW,EAAE,CAAC;QAClB,CAAC;IACL,CAAC;IAED,+DAA+D;IAC/D,MAAM,WAAW,GAAG,IAAI,CAAC,WAAoC,CAAC;IAC9D,MAAM,MAAM,GAAG,IAAI,WAAW,CAAC,WAAW,CAAM,CAAC;IAEjD,0DAA0D;IAC1D,IAAI,OAAO,GAAG,CAAC,CAAC;IAChB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,IAAI,EAAE,CAAC,EAAE,EAAE,CAAC;QAC5B,IAAI,WAAW,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC;YACrB,MAAM,CAAC,OAAO,EAAE,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC;QAChC,CAAC;IACL,CAAC;IAED,OAAO,MAAM,CAAC;AAClB,CAAC;AAED,MAAM,UAAU,mBAAmB,CAAC,IAAgB,EAAE,QAAgB,EAAE,WAA6B;IACjG,0EAA0E;IAC1E,IAAI,CAAC,WAAW,EAAE,CAAC;QACf,OAAO,IAAI,CAAC;IAChB,CAAC;IAED,MAAM,cAAc,GAAG,IAAI,SAAS,CAAC,IAAI,EAAE,QAAQ,CAAC,CAAC;IAErD,8DAA8D;IAC9D,uDAAuD;IACvD,IAAI,WAAW,GAAG,CAAC,CAAC;IACpB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,QAAQ,EAAE,CAAC,EAAE,EAAE,CAAC;QAChC,IAAI,WAAW,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC;YACrB,WAAW,EAAE,CAAC;QAClB,CAAC;IACL,CAAC;IAED,sEAAsE;IACtE,MAAM,YAAY,GAAG,IAAI,UAAU,CAAC,IAAI,CAAC,IAAI,CAAC,WAAW,GAAG,CAAC,CAAC,CAAC,CAAC;IAChE,MAAM,eAAe,GAAG,IAAI,SAAS,CAAC,YAAY,EAAE,WAAW,CAAC,CAAC;IAEjE,4DAA4D;IAC5D,IAAI,WAAW,GAAG,CAAC,CAAC;IACpB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,QAAQ,EAAE,CAAC,EAAE,EAAE,CAAC;QAChC,IAAI,WAAW,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC;YACrB,MAAM,KAAK,GAAG,cAAc,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC;YACpC,eAAe,CAAC,GAAG,CAAC,WAAW,EAAE,EAAE,KAAK,CAAC,CAAC;QAC9C,CAAC;IACL,CAAC;IAED,OAAO,eAAe,CAAC,SAAS,EAAE,CAAC;AACvC,CAAC","sourcesContent":["import type { TypedArrayConstructor, TypedArrayInstance } from \"../decoding/unpackNullableUtils\";\nimport BitVector from \"../vector/flat/bitVector\";\n\nexport function packNullable<T extends TypedArrayInstance>(data: T, presentBits: BitVector | null): T {\n // Non-nullable case: if no mask is provided, the data is already \"packed\"\n if (!presentBits) {\n return data;\n }\n\n const size = data.length;\n\n // 1. First pass: Count how many elements are actually present\n // This is required to allocate the correct size for the TypedArray\n let packedCount = 0;\n for (let i = 0; i < size; i++) {\n if (presentBits.get(i)) {\n packedCount++;\n }\n }\n\n // 2. Create a new array of the same type with the reduced size\n const constructor = data.constructor as TypedArrayConstructor;\n const result = new constructor(packedCount) as T;\n\n // 3. Second pass: Fill the result array with valid values\n let counter = 0;\n for (let i = 0; i < size; i++) {\n if (presentBits.get(i)) {\n result[counter++] = data[i];\n }\n }\n\n return result;\n}\n\nexport function packNullableBoolean(data: Uint8Array, dataSize: number, presentBits: BitVector | null): Uint8Array {\n // Non-nullable case: if no mask is provided, the data is already \"packed\"\n if (!presentBits) {\n return data;\n }\n\n const inputBitVector = new BitVector(data, dataSize);\n\n // 1. Calculate how many bits are actually marked as 'present'\n // This determines the size of the final packed buffer.\n let packedCount = 0;\n for (let i = 0; i < dataSize; i++) {\n if (presentBits.get(i)) {\n packedCount++;\n }\n }\n\n // 2. Initialize the result BitVector with the correct compressed size\n const resultBuffer = new Uint8Array(Math.ceil(packedCount / 8));\n const resultBitVector = new BitVector(resultBuffer, packedCount);\n\n // 3. Fill the result: only copy bits where the mask is true\n let targetIndex = 0;\n for (let i = 0; i < dataSize; i++) {\n if (presentBits.get(i)) {\n const value = inputBitVector.get(i);\n resultBitVector.set(targetIndex++, value);\n }\n }\n\n return resultBitVector.getBuffer();\n}\n"]}
|
||||
78
node_modules/@maplibre/mlt/dist/encoding/propertyEncoder.d.ts
generated
vendored
Normal file
78
node_modules/@maplibre/mlt/dist/encoding/propertyEncoder.d.ts
generated
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
/**
|
||||
* Encodes INT_32 values with NONE encoding (no delta, no RLE)
|
||||
*/
|
||||
export declare function encodeInt32NoneColumn(values: Int32Array): Uint8Array;
|
||||
/**
|
||||
* Encodes INT_32 values with DELTA encoding
|
||||
*/
|
||||
export declare function encodeInt32DeltaColumn(values: Int32Array): Uint8Array;
|
||||
/**
|
||||
* Encodes INT_32 values with RLE encoding
|
||||
* @param runs - Array of [runLength, value] pairs
|
||||
*/
|
||||
export declare function encodeInt32RleColumn(runs: Array<[number, number]>): Uint8Array;
|
||||
/**
|
||||
* Encodes INT_32 values with DELTA+RLE encoding
|
||||
* @param runs - Array of [runLength, deltaValue] pairs, where first value is the base
|
||||
*/
|
||||
export declare function encodeInt32DeltaRleColumn(runs: Array<[number, number]>): Uint8Array;
|
||||
/**
|
||||
* Encodes nullable INT_32 values
|
||||
*/
|
||||
export declare function encodeInt32NullableColumn(values: (number | null)[]): Uint8Array;
|
||||
/**
|
||||
* Encodes UINT_32 values (no zigzag encoding)
|
||||
*/
|
||||
export declare function encodeUint32Column(values: Uint32Array): Uint8Array;
|
||||
/**
|
||||
* Encodes INT_64 values with NONE encoding
|
||||
*/
|
||||
export declare function encodeInt64NoneColumn(values: BigInt64Array): Uint8Array;
|
||||
/**
|
||||
* Encodes INT_64 values with DELTA encoding
|
||||
*/
|
||||
export declare function encodeInt64DeltaColumn(values: BigInt64Array): Uint8Array;
|
||||
/**
|
||||
* Encodes INT_64 values with RLE encoding
|
||||
*/
|
||||
export declare function encodeInt64RleColumn(runs: Array<[number, bigint]>): Uint8Array;
|
||||
/**
|
||||
* Encodes INT_64 values with DELTA+RLE encoding
|
||||
*/
|
||||
export declare function encodeInt64DeltaRleColumn(runs: Array<[number, bigint]>): Uint8Array;
|
||||
/**
|
||||
* Encodes nullable INT_64 values
|
||||
*/
|
||||
export declare function encodeInt64NullableColumn(values: (bigint | null)[]): Uint8Array;
|
||||
/**
|
||||
* Encodes UINT_64 values (no zigzag encoding)
|
||||
*/
|
||||
export declare function encodeUint64Column(values: BigUint64Array): Uint8Array;
|
||||
/**
|
||||
* Encodes nullable UINT_64 values
|
||||
*/
|
||||
export declare function encodeUint64NullableColumn(values: (bigint | null)[]): Uint8Array;
|
||||
/**
|
||||
* Encodes FLOAT values
|
||||
*/
|
||||
export declare function encodeFloatColumn(values: Float32Array): Uint8Array;
|
||||
/**
|
||||
* Encodes nullable FLOAT values
|
||||
*/
|
||||
export declare function encodeFloatNullableColumn(values: (number | null)[]): Uint8Array;
|
||||
/**
|
||||
* Encodes DOUBLE values
|
||||
*/
|
||||
export declare function encodeDoubleColumn(values: Float64Array): Uint8Array;
|
||||
/**
|
||||
* Encodes nullable DOUBLE values
|
||||
*/
|
||||
export declare function encodeDoubleNullableColumn(values: (number | null)[]): Uint8Array;
|
||||
/**
|
||||
* Encodes BOOLEAN values
|
||||
*/
|
||||
export declare function encodeBooleanColumn(values: boolean[]): Uint8Array;
|
||||
/**
|
||||
* Encodes nullable BOOLEAN values
|
||||
*/
|
||||
export declare function encodeBooleanNullableColumn(values: (boolean | null)[]): Uint8Array;
|
||||
328
node_modules/@maplibre/mlt/dist/encoding/propertyEncoder.js
generated
vendored
Normal file
328
node_modules/@maplibre/mlt/dist/encoding/propertyEncoder.js
generated
vendored
Normal file
@@ -0,0 +1,328 @@
|
||||
import { LogicalLevelTechnique } from "../metadata/tile/logicalLevelTechnique";
|
||||
import { PhysicalLevelTechnique } from "../metadata/tile/physicalLevelTechnique";
|
||||
import { PhysicalStreamType } from "../metadata/tile/physicalStreamType";
|
||||
import { DictionaryType } from "../metadata/tile/dictionaryType";
|
||||
import IntWrapper from "../decoding/intWrapper";
|
||||
import { encodeBooleanRle, encodeFloatsLE, encodeDoubleLE } from "./encodingUtils";
|
||||
import { encodeVarintInt32Value, encodeVarintInt32, encodeVarintInt64, encodeZigZagInt32Value, encodeZigZagInt64Value, encodeZigZagInt32, } from "./integerEncodingUtils";
|
||||
/**
|
||||
* Encodes INT_32 values with NONE encoding (no delta, no RLE)
|
||||
*/
|
||||
export function encodeInt32NoneColumn(values) {
|
||||
const zigzagEncoded = encodeZigZagInt32(values);
|
||||
const encodedData = encodeVarintInt32(zigzagEncoded);
|
||||
const streamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, values.length);
|
||||
return buildEncodedStream(streamMetadata, encodedData);
|
||||
}
|
||||
/**
|
||||
* Encodes INT_32 values with DELTA encoding
|
||||
*/
|
||||
export function encodeInt32DeltaColumn(values) {
|
||||
// Delta encode: store deltas
|
||||
const deltaEncoded = new Int32Array(values.length);
|
||||
deltaEncoded[0] = values[0];
|
||||
for (let i = 1; i < values.length; i++) {
|
||||
deltaEncoded[i] = values[i] - values[i - 1];
|
||||
}
|
||||
const zigzagEncoded = encodeZigZagInt32(deltaEncoded);
|
||||
const encodedData = encodeVarintInt32(zigzagEncoded);
|
||||
const streamMetadata = createStreamMetadata(LogicalLevelTechnique.DELTA, LogicalLevelTechnique.NONE, values.length);
|
||||
return buildEncodedStream(streamMetadata, encodedData);
|
||||
}
|
||||
/**
|
||||
* Encodes INT_32 values with RLE encoding
|
||||
* @param runs - Array of [runLength, value] pairs
|
||||
*/
|
||||
export function encodeInt32RleColumn(runs) {
|
||||
const runLengths = [];
|
||||
const values = [];
|
||||
let totalValues = 0;
|
||||
for (const [runLength, value] of runs) {
|
||||
runLengths.push(runLength);
|
||||
values.push(encodeZigZagInt32Value(value));
|
||||
totalValues += runLength;
|
||||
}
|
||||
const rleValues = [...runLengths, ...values];
|
||||
const encodedData = encodeVarintInt32(new Uint32Array(rleValues));
|
||||
const streamMetadata = createRleMetadata(LogicalLevelTechnique.RLE, LogicalLevelTechnique.NONE, runs.length, totalValues);
|
||||
return buildEncodedStream(streamMetadata, encodedData);
|
||||
}
|
||||
/**
|
||||
* Encodes INT_32 values with DELTA+RLE encoding
|
||||
* @param runs - Array of [runLength, deltaValue] pairs, where first value is the base
|
||||
*/
|
||||
export function encodeInt32DeltaRleColumn(runs) {
|
||||
const runLengths = [];
|
||||
const values = [];
|
||||
let totalValues = 0;
|
||||
for (const [runLength, value] of runs) {
|
||||
runLengths.push(runLength);
|
||||
values.push(encodeZigZagInt32Value(value));
|
||||
totalValues += runLength;
|
||||
}
|
||||
const rleValues = [...runLengths, ...values];
|
||||
const encodedData = encodeVarintInt32(new Uint32Array(rleValues));
|
||||
const streamMetadata = createRleMetadata(LogicalLevelTechnique.DELTA, LogicalLevelTechnique.RLE, runs.length, totalValues);
|
||||
return buildEncodedStream(streamMetadata, encodedData);
|
||||
}
|
||||
/**
|
||||
* Encodes nullable INT_32 values
|
||||
*/
|
||||
export function encodeInt32NullableColumn(values) {
|
||||
const nonNullValues = values.filter((v) => v !== null);
|
||||
const zigzagEncoded = new Uint32Array(nonNullValues.map((v) => encodeZigZagInt32Value(v)));
|
||||
const encodedData = encodeVarintInt32(zigzagEncoded);
|
||||
const dataStreamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nonNullValues.length);
|
||||
const dataStream = buildEncodedStream(dataStreamMetadata, encodedData);
|
||||
// Nullability stream
|
||||
const nullabilityValues = values.map((v) => v !== null);
|
||||
const nullabilityEncoded = encodeBooleanRle(nullabilityValues);
|
||||
const nullabilityMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nullabilityValues.length);
|
||||
const nullabilityStream = buildEncodedStream(nullabilityMetadata, nullabilityEncoded);
|
||||
return concatenateBuffers(nullabilityStream, dataStream);
|
||||
}
|
||||
/**
|
||||
* Encodes UINT_32 values (no zigzag encoding)
|
||||
*/
|
||||
export function encodeUint32Column(values) {
|
||||
const encodedData = encodeVarintInt32(values);
|
||||
const streamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, values.length);
|
||||
return buildEncodedStream(streamMetadata, encodedData);
|
||||
}
|
||||
/**
|
||||
* Encodes INT_64 values with NONE encoding
|
||||
*/
|
||||
export function encodeInt64NoneColumn(values) {
|
||||
const zigzagEncoded = new BigUint64Array(Array.from(values, (val) => encodeZigZagInt64Value(val)));
|
||||
const encodedData = encodeVarintInt64(zigzagEncoded);
|
||||
const streamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, values.length);
|
||||
return buildEncodedStream(streamMetadata, encodedData);
|
||||
}
|
||||
/**
|
||||
* Encodes INT_64 values with DELTA encoding
|
||||
*/
|
||||
export function encodeInt64DeltaColumn(values) {
|
||||
const deltaEncoded = new BigInt64Array(values.length);
|
||||
deltaEncoded[0] = values[0];
|
||||
for (let i = 1; i < values.length; i++) {
|
||||
deltaEncoded[i] = values[i] - values[i - 1];
|
||||
}
|
||||
const zigzagEncoded = new BigUint64Array(deltaEncoded.length);
|
||||
for (let i = 0; i < deltaEncoded.length; i++) {
|
||||
zigzagEncoded[i] = encodeZigZagInt64Value(deltaEncoded[i]);
|
||||
}
|
||||
const encodedData = encodeVarintInt64(zigzagEncoded);
|
||||
const streamMetadata = createStreamMetadata(LogicalLevelTechnique.DELTA, LogicalLevelTechnique.NONE, values.length);
|
||||
return buildEncodedStream(streamMetadata, encodedData);
|
||||
}
|
||||
/**
|
||||
* Encodes INT_64 values with RLE encoding
|
||||
*/
|
||||
export function encodeInt64RleColumn(runs) {
|
||||
const runLengths = [];
|
||||
const values = [];
|
||||
let totalValues = 0;
|
||||
for (const [runLength, value] of runs) {
|
||||
runLengths.push(BigInt(runLength));
|
||||
values.push(encodeZigZagInt64Value(value));
|
||||
totalValues += runLength;
|
||||
}
|
||||
const rleValues = [...runLengths, ...values];
|
||||
const encodedData = encodeVarintInt64(new BigUint64Array(rleValues));
|
||||
const streamMetadata = createRleMetadata(LogicalLevelTechnique.RLE, LogicalLevelTechnique.NONE, runs.length, totalValues);
|
||||
return buildEncodedStream(streamMetadata, encodedData);
|
||||
}
|
||||
/**
|
||||
* Encodes INT_64 values with DELTA+RLE encoding
|
||||
*/
|
||||
export function encodeInt64DeltaRleColumn(runs) {
|
||||
const runLengths = [];
|
||||
const values = [];
|
||||
let totalValues = 0;
|
||||
for (const [runLength, value] of runs) {
|
||||
runLengths.push(BigInt(runLength));
|
||||
values.push(encodeZigZagInt64Value(value));
|
||||
totalValues += runLength;
|
||||
}
|
||||
const rleValues = [...runLengths, ...values];
|
||||
const encodedData = encodeVarintInt64(new BigUint64Array(rleValues));
|
||||
const streamMetadata = createRleMetadata(LogicalLevelTechnique.DELTA, LogicalLevelTechnique.RLE, runs.length, totalValues);
|
||||
return buildEncodedStream(streamMetadata, encodedData);
|
||||
}
|
||||
/**
|
||||
* Encodes nullable INT_64 values
|
||||
*/
|
||||
export function encodeInt64NullableColumn(values) {
|
||||
const nonNullValues = values.filter((v) => v !== null);
|
||||
const zigzagEncoded = new BigUint64Array(Array.from(nonNullValues, (val) => encodeZigZagInt64Value(val)));
|
||||
const encodedData = encodeVarintInt64(zigzagEncoded);
|
||||
const dataStreamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nonNullValues.length);
|
||||
const dataStream = buildEncodedStream(dataStreamMetadata, encodedData);
|
||||
const nullabilityValues = values.map((v) => v !== null);
|
||||
const nullabilityEncoded = encodeBooleanRle(nullabilityValues);
|
||||
const nullabilityMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nullabilityValues.length);
|
||||
const nullabilityStream = buildEncodedStream(nullabilityMetadata, nullabilityEncoded);
|
||||
return concatenateBuffers(nullabilityStream, dataStream);
|
||||
}
|
||||
/**
|
||||
* Encodes UINT_64 values (no zigzag encoding)
|
||||
*/
|
||||
export function encodeUint64Column(values) {
|
||||
const encodedData = encodeVarintInt64(values);
|
||||
const streamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, values.length);
|
||||
return buildEncodedStream(streamMetadata, encodedData);
|
||||
}
|
||||
/**
|
||||
* Encodes nullable UINT_64 values
|
||||
*/
|
||||
export function encodeUint64NullableColumn(values) {
|
||||
const nonNullValues = values.filter((v) => v !== null);
|
||||
const encodedData = encodeVarintInt64(new BigUint64Array(nonNullValues));
|
||||
const dataStreamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nonNullValues.length);
|
||||
const dataStream = buildEncodedStream(dataStreamMetadata, encodedData);
|
||||
const nullabilityValues = values.map((v) => v !== null);
|
||||
const nullabilityEncoded = encodeBooleanRle(nullabilityValues);
|
||||
const nullabilityMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nullabilityValues.length);
|
||||
const nullabilityStream = buildEncodedStream(nullabilityMetadata, nullabilityEncoded);
|
||||
return concatenateBuffers(nullabilityStream, dataStream);
|
||||
}
|
||||
/**
|
||||
* Encodes FLOAT values
|
||||
*/
|
||||
export function encodeFloatColumn(values) {
|
||||
const encodedData = encodeFloatsLE(values);
|
||||
const streamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, values.length);
|
||||
return buildEncodedStream(streamMetadata, encodedData);
|
||||
}
|
||||
/**
|
||||
* Encodes nullable FLOAT values
|
||||
*/
|
||||
export function encodeFloatNullableColumn(values) {
|
||||
const nonNullValues = values.filter((v) => v !== null);
|
||||
const encodedData = encodeFloatsLE(new Float32Array(nonNullValues));
|
||||
const dataStreamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nonNullValues.length);
|
||||
const dataStream = buildEncodedStream(dataStreamMetadata, encodedData);
|
||||
const nullabilityValues = values.map((v) => v !== null);
|
||||
const nullabilityEncoded = encodeBooleanRle(nullabilityValues);
|
||||
const nullabilityMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nullabilityValues.length);
|
||||
const nullabilityStream = buildEncodedStream(nullabilityMetadata, nullabilityEncoded);
|
||||
return concatenateBuffers(nullabilityStream, dataStream);
|
||||
}
|
||||
/**
|
||||
* Encodes DOUBLE values
|
||||
*/
|
||||
export function encodeDoubleColumn(values) {
|
||||
const encodedData = encodeDoubleLE(values);
|
||||
const streamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, values.length);
|
||||
return buildEncodedStream(streamMetadata, encodedData);
|
||||
}
|
||||
/**
|
||||
* Encodes nullable DOUBLE values
|
||||
*/
|
||||
export function encodeDoubleNullableColumn(values) {
|
||||
const nonNullValues = values.filter((v) => v !== null);
|
||||
const encodedData = encodeDoubleLE(new Float64Array(nonNullValues));
|
||||
const dataStreamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nonNullValues.length);
|
||||
const dataStream = buildEncodedStream(dataStreamMetadata, encodedData);
|
||||
const nullabilityValues = values.map((v) => v !== null);
|
||||
const nullabilityEncoded = encodeBooleanRle(nullabilityValues);
|
||||
const nullabilityMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nullabilityValues.length);
|
||||
const nullabilityStream = buildEncodedStream(nullabilityMetadata, nullabilityEncoded);
|
||||
return concatenateBuffers(nullabilityStream, dataStream);
|
||||
}
|
||||
/**
|
||||
* Encodes BOOLEAN values
|
||||
*/
|
||||
export function encodeBooleanColumn(values) {
|
||||
const encodedData = encodeBooleanRle(values);
|
||||
const streamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, values.length);
|
||||
return buildEncodedStream(streamMetadata, encodedData);
|
||||
}
|
||||
/**
|
||||
* Encodes nullable BOOLEAN values
|
||||
*/
|
||||
export function encodeBooleanNullableColumn(values) {
|
||||
const nonNullValues = values.filter((v) => v !== null);
|
||||
const encodedData = encodeBooleanRle(nonNullValues);
|
||||
const dataStreamMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nonNullValues.length);
|
||||
const dataStream = buildEncodedStream(dataStreamMetadata, encodedData);
|
||||
const nullabilityValues = values.map((v) => v !== null);
|
||||
const nullabilityEncoded = encodeBooleanRle(nullabilityValues);
|
||||
const nullabilityMetadata = createStreamMetadata(LogicalLevelTechnique.NONE, LogicalLevelTechnique.NONE, nullabilityValues.length);
|
||||
const nullabilityStream = buildEncodedStream(nullabilityMetadata, nullabilityEncoded);
|
||||
return concatenateBuffers(nullabilityStream, dataStream);
|
||||
}
|
||||
function createStreamMetadata(logicalTechnique1, logicalTechnique2 = LogicalLevelTechnique.NONE, numValues = 3) {
|
||||
return {
|
||||
physicalStreamType: PhysicalStreamType.DATA,
|
||||
logicalStreamType: { dictionaryType: DictionaryType.NONE },
|
||||
logicalLevelTechnique1: logicalTechnique1,
|
||||
logicalLevelTechnique2: logicalTechnique2,
|
||||
physicalLevelTechnique: PhysicalLevelTechnique.VARINT,
|
||||
numValues,
|
||||
byteLength: 10,
|
||||
decompressedCount: numValues,
|
||||
};
|
||||
}
|
||||
function createRleMetadata(logicalTechnique1, logicalTechnique2, runs, numRleValues) {
|
||||
return {
|
||||
physicalStreamType: PhysicalStreamType.DATA,
|
||||
logicalStreamType: { dictionaryType: DictionaryType.NONE },
|
||||
logicalLevelTechnique1: logicalTechnique1,
|
||||
logicalLevelTechnique2: logicalTechnique2,
|
||||
physicalLevelTechnique: PhysicalLevelTechnique.VARINT,
|
||||
numValues: runs * 2,
|
||||
byteLength: 10,
|
||||
decompressedCount: numRleValues,
|
||||
runs,
|
||||
numRleValues,
|
||||
};
|
||||
}
|
||||
function buildEncodedStream(streamMetadata, encodedData) {
|
||||
const updatedMetadata = {
|
||||
...streamMetadata,
|
||||
byteLength: encodedData.length,
|
||||
};
|
||||
const metadataBuffer = encodeStreamMetadata(updatedMetadata);
|
||||
const result = new Uint8Array(metadataBuffer.length + encodedData.length);
|
||||
result.set(metadataBuffer, 0);
|
||||
result.set(encodedData, metadataBuffer.length);
|
||||
return result;
|
||||
}
|
||||
function encodeStreamMetadata(metadata) {
|
||||
const buffer = new Uint8Array(100);
|
||||
let writeOffset = 0;
|
||||
// Byte 1: Stream type
|
||||
const physicalTypeIndex = Object.values(PhysicalStreamType).indexOf(metadata.physicalStreamType);
|
||||
const lowerNibble = 0; // For DATA stream with NONE dictionary type
|
||||
buffer[writeOffset++] = (physicalTypeIndex << 4) | lowerNibble;
|
||||
// Byte 2: Encoding techniques
|
||||
const llt1Index = Object.values(LogicalLevelTechnique).indexOf(metadata.logicalLevelTechnique1);
|
||||
const llt2Index = Object.values(LogicalLevelTechnique).indexOf(metadata.logicalLevelTechnique2);
|
||||
const pltIndex = Object.values(PhysicalLevelTechnique).indexOf(metadata.physicalLevelTechnique);
|
||||
buffer[writeOffset++] = (llt1Index << 5) | (llt2Index << 2) | pltIndex;
|
||||
// Variable-length fields
|
||||
const offset = new IntWrapper(writeOffset);
|
||||
encodeVarintInt32Value(metadata.numValues, buffer, offset);
|
||||
encodeVarintInt32Value(metadata.byteLength, buffer, offset);
|
||||
// RLE-specific fields
|
||||
if (isRleMetadata(metadata)) {
|
||||
encodeVarintInt32Value(metadata.runs, buffer, offset);
|
||||
encodeVarintInt32Value(metadata.numRleValues, buffer, offset);
|
||||
}
|
||||
return buffer.slice(0, offset.get());
|
||||
}
|
||||
function isRleMetadata(metadata) {
|
||||
return "runs" in metadata && "numRleValues" in metadata;
|
||||
}
|
||||
function concatenateBuffers(...buffers) {
|
||||
const totalLength = buffers.reduce((sum, buf) => sum + buf.length, 0);
|
||||
const result = new Uint8Array(totalLength);
|
||||
let offset = 0;
|
||||
for (const buffer of buffers) {
|
||||
result.set(buffer, offset);
|
||||
offset += buffer.length;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
//# sourceMappingURL=propertyEncoder.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/encoding/propertyEncoder.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/encoding/propertyEncoder.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
12
node_modules/@maplibre/mlt/dist/encoding/stringEncoder.d.ts
generated
vendored
Normal file
12
node_modules/@maplibre/mlt/dist/encoding/stringEncoder.d.ts
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
/**
|
||||
* Encodes plain strings into a complete stream with PRESENT (if needed), LENGTH, and DATA streams.
|
||||
* @param strings - Array of strings (can include null values)
|
||||
* @returns Encoded Uint8Array that can be passed to decodeString
|
||||
*/
|
||||
export declare function encodePlainStrings(strings: (string | null)[]): Uint8Array;
|
||||
/**
|
||||
* Encodes dictionary-compressed strings into a complete stream.
|
||||
* @param strings - Array of strings (can include null values)
|
||||
* @returns Encoded Uint8Array that can be passed to decodeString
|
||||
*/
|
||||
export declare function encodeDictionaryStrings(strings: (string | null)[]): Uint8Array;
|
||||
149
node_modules/@maplibre/mlt/dist/encoding/stringEncoder.js
generated
vendored
Normal file
149
node_modules/@maplibre/mlt/dist/encoding/stringEncoder.js
generated
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
import { PhysicalStreamType } from "../metadata/tile/physicalStreamType";
|
||||
import { DictionaryType } from "../metadata/tile/dictionaryType";
|
||||
import { LengthType } from "../metadata/tile/lengthType";
|
||||
import { OffsetType } from "../metadata/tile/offsetType";
|
||||
import { PhysicalLevelTechnique } from "../metadata/tile/physicalLevelTechnique";
|
||||
import { LogicalLevelTechnique } from "../metadata/tile/logicalLevelTechnique";
|
||||
import IntWrapper from "../decoding/intWrapper";
|
||||
import { encodeBooleanRle, encodeStrings, createStringLengths, concatenateBuffers } from "./encodingUtils";
|
||||
import { encodeVarintInt32Value, encodeVarintInt32 } from "./integerEncodingUtils";
|
||||
/**
|
||||
* Encodes plain strings into a complete stream with PRESENT (if needed), LENGTH, and DATA streams.
|
||||
* @param strings - Array of strings (can include null values)
|
||||
* @returns Encoded Uint8Array that can be passed to decodeString
|
||||
*/
|
||||
export function encodePlainStrings(strings) {
|
||||
const hasNull = strings.some((s) => s === null);
|
||||
const nonNullStrings = strings.filter((s) => s !== null);
|
||||
const stringBytes = encodeStrings(nonNullStrings);
|
||||
const streams = [];
|
||||
// Add PRESENT stream if nulls exist
|
||||
if (hasNull) {
|
||||
const nullabilityValues = strings.map((s) => s !== null);
|
||||
streams.push(createStream(PhysicalStreamType.PRESENT, encodeBooleanRle(nullabilityValues), {
|
||||
technique: PhysicalLevelTechnique.VARINT,
|
||||
count: nullabilityValues.length,
|
||||
}));
|
||||
}
|
||||
// Add LENGTH stream
|
||||
const lengths = createStringLengths(nonNullStrings);
|
||||
streams.push(createStream(PhysicalStreamType.LENGTH, encodeVarintInt32(lengths), {
|
||||
logical: { lengthType: LengthType.VAR_BINARY },
|
||||
technique: PhysicalLevelTechnique.VARINT,
|
||||
count: lengths.length,
|
||||
}));
|
||||
// Add DATA stream
|
||||
streams.push(createStream(PhysicalStreamType.DATA, stringBytes, {
|
||||
logical: { dictionaryType: DictionaryType.NONE },
|
||||
}));
|
||||
return concatenateBuffers(...streams);
|
||||
}
|
||||
/**
|
||||
* Encodes dictionary-compressed strings into a complete stream.
|
||||
* @param strings - Array of strings (can include null values)
|
||||
* @returns Encoded Uint8Array that can be passed to decodeString
|
||||
*/
|
||||
export function encodeDictionaryStrings(strings) {
|
||||
const hasNull = strings.some((s) => s === null);
|
||||
const nonNullStrings = strings.filter((s) => s !== null);
|
||||
// Create dictionary of unique strings
|
||||
const uniqueStrings = Array.from(new Set(nonNullStrings));
|
||||
const stringMap = new Map(uniqueStrings.map((s, i) => [s, i]));
|
||||
const offsets = nonNullStrings.map((s) => {
|
||||
const offset = stringMap.get(s);
|
||||
if (offset === undefined) {
|
||||
throw new Error(`String not found in dictionary: ${s}`);
|
||||
}
|
||||
return offset;
|
||||
});
|
||||
const stringBytes = encodeStrings(uniqueStrings);
|
||||
const lengths = createStringLengths(uniqueStrings);
|
||||
const streams = [];
|
||||
// Add PRESENT stream if nulls exist
|
||||
if (hasNull) {
|
||||
const nullabilityValues = strings.map((s) => s !== null);
|
||||
streams.push(createStream(PhysicalStreamType.PRESENT, encodeBooleanRle(nullabilityValues), {
|
||||
technique: PhysicalLevelTechnique.VARINT,
|
||||
count: nullabilityValues.length,
|
||||
}));
|
||||
}
|
||||
// Add OFFSET stream
|
||||
streams.push(createStream(PhysicalStreamType.OFFSET, encodeVarintInt32(new Uint32Array(offsets)), {
|
||||
logical: { offsetType: OffsetType.STRING },
|
||||
technique: PhysicalLevelTechnique.VARINT,
|
||||
count: offsets.length,
|
||||
}));
|
||||
// Add LENGTH stream (for dictionary)
|
||||
streams.push(createStream(PhysicalStreamType.LENGTH, encodeVarintInt32(lengths), {
|
||||
logical: { lengthType: LengthType.DICTIONARY },
|
||||
technique: PhysicalLevelTechnique.VARINT,
|
||||
count: lengths.length,
|
||||
}));
|
||||
// Add DATA stream
|
||||
streams.push(createStream(PhysicalStreamType.DATA, stringBytes, {
|
||||
logical: { dictionaryType: DictionaryType.SINGLE },
|
||||
}));
|
||||
return concatenateBuffers(...streams);
|
||||
}
|
||||
function createStream(physicalType, data, options = {}) {
|
||||
const count = options.count ?? 0;
|
||||
return buildEncodedStream({
|
||||
physicalStreamType: physicalType,
|
||||
logicalStreamType: options.logical ?? {},
|
||||
logicalLevelTechnique1: LogicalLevelTechnique.NONE,
|
||||
logicalLevelTechnique2: LogicalLevelTechnique.NONE,
|
||||
physicalLevelTechnique: options.technique ?? PhysicalLevelTechnique.NONE,
|
||||
numValues: count,
|
||||
byteLength: data.length,
|
||||
decompressedCount: count,
|
||||
}, data);
|
||||
}
|
||||
function buildEncodedStream(streamMetadata, encodedData) {
|
||||
const updatedMetadata = {
|
||||
...streamMetadata,
|
||||
byteLength: encodedData.length,
|
||||
};
|
||||
const metadataBuffer = encodeStreamMetadata(updatedMetadata);
|
||||
const result = new Uint8Array(metadataBuffer.length + encodedData.length);
|
||||
result.set(metadataBuffer, 0);
|
||||
result.set(encodedData, metadataBuffer.length);
|
||||
return result;
|
||||
}
|
||||
function encodeStreamMetadata(metadata) {
|
||||
const buffer = new Uint8Array(100);
|
||||
let writeOffset = 0;
|
||||
// Byte 1: Stream type
|
||||
const physicalTypeIndex = Object.values(PhysicalStreamType).indexOf(metadata.physicalStreamType);
|
||||
const lowerNibble = getLogicalSubtypeValue(metadata);
|
||||
buffer[writeOffset++] = (physicalTypeIndex << 4) | lowerNibble;
|
||||
// Byte 2: Encoding techniques
|
||||
const llt1Index = Object.values(LogicalLevelTechnique).indexOf(metadata.logicalLevelTechnique1);
|
||||
const llt2Index = Object.values(LogicalLevelTechnique).indexOf(metadata.logicalLevelTechnique2);
|
||||
const pltIndex = Object.values(PhysicalLevelTechnique).indexOf(metadata.physicalLevelTechnique);
|
||||
buffer[writeOffset++] = (llt1Index << 5) | (llt2Index << 2) | pltIndex;
|
||||
// Variable-length fields
|
||||
const offset = new IntWrapper(writeOffset);
|
||||
encodeVarintInt32Value(metadata.numValues, buffer, offset);
|
||||
encodeVarintInt32Value(metadata.byteLength, buffer, offset);
|
||||
return buffer.slice(0, offset.get());
|
||||
}
|
||||
function getLogicalSubtypeValue(metadata) {
|
||||
const { physicalStreamType, logicalStreamType } = metadata;
|
||||
switch (physicalStreamType) {
|
||||
case PhysicalStreamType.DATA:
|
||||
return logicalStreamType.dictionaryType !== undefined
|
||||
? Object.values(DictionaryType).indexOf(logicalStreamType.dictionaryType)
|
||||
: 0;
|
||||
case PhysicalStreamType.OFFSET:
|
||||
return logicalStreamType.offsetType !== undefined
|
||||
? Object.values(OffsetType).indexOf(logicalStreamType.offsetType)
|
||||
: 0;
|
||||
case PhysicalStreamType.LENGTH:
|
||||
return logicalStreamType.lengthType !== undefined
|
||||
? Object.values(LengthType).indexOf(logicalStreamType.lengthType)
|
||||
: 0;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=stringEncoder.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/encoding/stringEncoder.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/encoding/stringEncoder.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
1
node_modules/@maplibre/mlt/dist/encoding/zOrderCurveEncoder.d.ts
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/encoding/zOrderCurveEncoder.d.ts
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
export declare function encodeZOrderCurve(x: number, y: number, numBits: number, coordinateShift: number): number;
|
||||
10
node_modules/@maplibre/mlt/dist/encoding/zOrderCurveEncoder.js
generated
vendored
Normal file
10
node_modules/@maplibre/mlt/dist/encoding/zOrderCurveEncoder.js
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
export function encodeZOrderCurve(x, y, numBits, coordinateShift) {
|
||||
const shiftedX = x + coordinateShift;
|
||||
const shiftedY = y + coordinateShift;
|
||||
let code = 0;
|
||||
for (let i = 0; i < numBits; i++) {
|
||||
code |= ((shiftedX & (1 << i)) << i) | ((shiftedY & (1 << i)) << (i + 1));
|
||||
}
|
||||
return code;
|
||||
}
|
||||
//# sourceMappingURL=zOrderCurveEncoder.js.map
|
||||
1
node_modules/@maplibre/mlt/dist/encoding/zOrderCurveEncoder.js.map
generated
vendored
Normal file
1
node_modules/@maplibre/mlt/dist/encoding/zOrderCurveEncoder.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"zOrderCurveEncoder.js","sourceRoot":"","sources":["../../src/encoding/zOrderCurveEncoder.ts"],"names":[],"mappings":"AAAA,MAAM,UAAU,iBAAiB,CAAC,CAAS,EAAE,CAAS,EAAE,OAAe,EAAE,eAAuB;IAC5F,MAAM,QAAQ,GAAG,CAAC,GAAG,eAAe,CAAC;IACrC,MAAM,QAAQ,GAAG,CAAC,GAAG,eAAe,CAAC;IACrC,IAAI,IAAI,GAAG,CAAC,CAAC;IACb,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,OAAO,EAAE,CAAC,EAAE,EAAE,CAAC;QAC/B,IAAI,IAAI,CAAC,CAAC,QAAQ,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,QAAQ,GAAG,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC;IAC9E,CAAC;IACD,OAAO,IAAI,CAAC;AAChB,CAAC","sourcesContent":["export function encodeZOrderCurve(x: number, y: number, numBits: number, coordinateShift: number): number {\n const shiftedX = x + coordinateShift;\n const shiftedY = y + coordinateShift;\n let code = 0;\n for (let i = 0; i < numBits; i++) {\n code |= ((shiftedX & (1 << i)) << i) | ((shiftedY & (1 << i)) << (i + 1));\n }\n return code;\n}\n"]}
|
||||
Reference in New Issue
Block a user