diff --git a/dissect/evidence/asdf/asdf.py b/dissect/evidence/asdf/asdf.py index 7622316..cd85ed1 100644 --- a/dissect/evidence/asdf/asdf.py +++ b/dissect/evidence/asdf/asdf.py @@ -4,12 +4,14 @@ import gzip import io +import itertools import shutil import tarfile import uuid from bisect import bisect_right from collections import defaultdict -from typing import TYPE_CHECKING, BinaryIO +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any, BinaryIO, Generic, TypeVar from dissect.util import ts from dissect.util.stream import AlignedStream, RangeStream @@ -23,13 +25,14 @@ ) if TYPE_CHECKING: - from collections.abc import Callable, Iterator + from collections.abc import Iterator, KeysView, ValuesView SnapshotTableEntry = tuple[int, int, int, int] VERSION = 1 DEFAULT_BLOCK_SIZE = 4096 MAX_BLOCK_TABLE_SIZE = 2**32 +OFFSET_MASK = (1 << 64) - 1 MAX_IDX = 253 IDX_MEMORY = 254 @@ -41,6 +44,176 @@ FOOTER_MAGIC = b"FT\xa5\xdf" SPARSE_BYTES = b"\xa5\xdf" +DEFAULT_TABLE_SIZE = 4 * 1024 * 1024 // len(c_asdf.table_entry) + + +@dataclass(slots=True) +class ReadEntry: + """An entry representing the data to read in :class:`ASDFSnapshot` + + Very similar to ``c_asdf.table_entry`` however uses the offset of the data inside + the stream instead of ``file_size``. + """ + + idx: int + offset: int + size: int + file_offset: int + data_offset: int + + def dumps(self) -> bytes: + return b"" + + +T = TypeVar("T", ReadEntry, c_asdf.table_entry) + + +class Table(Generic[T]): + """A single point for the table entries to get collected for reading and writing.""" + + _table: dict[int, list[T]] + """Keeps an order for the table entries for a specific stream""" + _lookup: dict[int, list[int]] + """Keeps an order for all the stream offsets for a specific stream""" + _table_offsets: list[tuple[int, c_asdf.table_index]] + """Keeps account of any previously flushed table, containing both its start offset and table_index.""" + _entries: int + """The current number of entries inside of the table""" + last_table_offset: int + """Offset of the previously flushed table""" + + def __init__(self) -> None: + self._table_offsets = [] + self._table = defaultdict(list) + self._lookup = defaultdict(list) + + self._entries = 0 + self.last_table_offset = OFFSET_MASK + + def __bool__(self): + return bool(self._table) + + def __len__(self): + return self._entries + + def __contains__(self, obj: Any) -> bool: + return obj in self._table + + def get(self, index: int) -> tuple[list[T], list[int]]: + return self._table[index], self._lookup[index] + + def add(self, table_idx: int, entry: T) -> None: + self._table[entry.idx].insert(table_idx, entry) + self._lookup[entry.idx].insert(table_idx, entry.offset) + self._entries += 1 + + def indexes(self) -> list[int]: + """Returns which stream indexes are inside this table. + + Creates a 256-bit string that represends the stream indexes currently inside the table. + The bit string gets divided into 4 64-bit numbers. + """ + indexes = sum(1 << key for key in self._table) + return [(indexes >> (x * 64)) & OFFSET_MASK for x in range(256 // 64)] + + def lookup(self, idx: int, fh: BinaryIO) -> list[int]: + """Finds entries belonging to a stream index inside of any flushed table. + + In the worst case scenario, where a table gets flushed every time an entry gets added to it, + the lookup function finds all the offsets and returns it in the correct order. + + TODO: Only returns the offsets for now, can be rewritten and reused for :class:`.ASDFSnapshot`. + + Args: + idx: The stream idx which we want to lookup + fh: The filehandle of the asdf file. + + Returns: + a list of offsets of a specific stream. + """ + prev_offset = fh.tell() + # Which parts of table.indexes to look into for a stream + index_idx = (idx // 64) - (1 if idx != 0 else 0) + lookup_value = 1 << (idx % 64) + + lookup = [] + entries = [] + + # Go through all previously flushed tables + for offset, table in self._table_offsets: + # Determine whether the stream_idx is inside this flushed table + index = table.indexes[index_idx] + if not (lookup_value & index): + # This table does not contain the index, so we continue to the next one + continue + + fh.seek(offset + len(c_asdf.table_index), io.SEEK_SET) + + count = table.size // len(c_asdf.table_entry) + for entry in c_asdf.table_entry[count](fh.read(table.size)): + # Determine whether this entry can be skipped + if idx != entry.idx: + continue + + tab_idx, offset, size = _table_fit(entry.offset, entry.size, entries, lookup) + if tab_idx is None: + # The block can be skipped, continuing + continue + + entry.offset = offset + entry.size = size + entries.insert(tab_idx, entry) + lookup.insert(tab_idx, offset) + + fh.seek(prev_offset, io.SEEK_SET) + + # Fit all the entries inside the current table + for entry in self._table.get(idx, []): + tab_idx, offset, size = _table_fit(entry.offset, entry.size, entries, lookup) + if tab_idx is None: + continue + + # Copy the entry, so we don't change the data that's currently inside the table + _entry = c_asdf.table_entry( + idx=idx, + flags=entry.flags, + offset=offset, + size=size, + file_size=entry.file_size, + file_offset=entry.file_offset, + ) + + entries.insert(tab_idx, _entry) + lookup.insert(tab_idx, offset) + + return lookup + + def values(self) -> ValuesView[list[T]]: + return self._table.values() + + def keys(self) -> KeysView[int]: + return self._table.keys() + + def write(self, fh: BinaryIO) -> None: + """Writes a table directly to the fileheader""" + indexes = self.indexes() + result = [entry.dumps() for entry in itertools.chain(*self._table.values())] + + index = c_asdf.table_index( + prev_table=self.last_table_offset, size=len(result) * len(c_asdf.table_entry), indexes=indexes + ) + result.insert(0, index.dumps()) + + table_offset = fh.tell() + self.last_table_offset = table_offset + self._table_offsets.append((table_offset, index)) + + fh.writelines(result) + + self._table.clear() + self._lookup.clear() + self._entries = 0 + class AsdfWriter(io.RawIOBase): """ASDF file writer. @@ -65,6 +238,7 @@ def __init__( guid: uuid.UUID | None = None, compress: bool = False, block_crc: bool = True, + table_size: int = DEFAULT_TABLE_SIZE, ): self._fh = fh self.fh = self._fh @@ -79,9 +253,11 @@ def __init__( self.block_crc = block_crc self.block_compress = False # Disabled for now - self._table = defaultdict(list) - self._table_lookup = defaultdict(list) - self._table_offset = 0 + if table_size < 1: + raise ValueError("Table size can't be 0 or smaller") + + self._max_entries = table_size + self._table = Table[c_asdf.table_entry]() self._meta_buf = io.BytesIO() self._meta_tar = tarfile.open(fileobj=self._meta_buf, mode="w") # noqa: SIM115 @@ -204,8 +380,8 @@ def close(self) -> None: """ super().close() self._write_meta() - if self._table: - self._write_table() + if len(self._table): + self.flush() self._write_footer() self.fh.close() @@ -232,12 +408,9 @@ def _write_block(self, source: BinaryIO, offset: int, size: int, idx: int = 0, b """ absolute_offset = base + offset - lookup_table = self._table_lookup[idx] - entry_table = self._table[idx] + entry_table, lookup_table = self._table.get(idx) - table_idx, absolute_offset, size = _table_fit( - absolute_offset, size, entry_table, lookup_table, lambda e: (e[2], e[3]) - ) + table_idx, absolute_offset, size = _table_fit(absolute_offset, size, entry_table, lookup_table) if table_idx is None: return @@ -271,9 +444,20 @@ def _write_block(self, source: BinaryIO, offset: int, size: int, idx: int = 0, b outfh.finalize() data_size = self.fh.tell() - data_offset + self._table.add( + table_idx, + c_asdf.table_entry( + idx=idx, + offset=absolute_offset, + flags=flags, + size=size, + file_offset=block_offset, + file_size=data_size, + ), + ) - lookup_table.insert(table_idx, absolute_offset) - entry_table.insert(table_idx, (flags, idx, absolute_offset, size, block_offset, data_size)) + if len(self._table) >= self._max_entries: + self.flush() def _write_meta(self) -> None: """Write the metadata tar to the destination file-like object.""" @@ -283,26 +467,15 @@ def _write_meta(self) -> None: self._meta_buf.seek(0) self.copy_bytes(self._meta_buf, 0, size, idx=IDX_METADATA) - def _write_table(self) -> None: + def flush(self) -> None: """Write the ASDF block table to the destination file-like object.""" - self._table_offset = self.fh.tell() - for stream_table in self._table.values(): - for flags, idx, offset, size, file_offset, file_size in stream_table: - table_entry = c_asdf.table_entry( - flags=flags, - idx=idx, - offset=offset, - size=size, - file_offset=file_offset, - file_size=file_size, - ) - table_entry.write(self.fh) + self._table.write(self.fh) def _write_footer(self) -> None: """Write the ASDF footer to the destination file-like object.""" footer = c_asdf.footer( magic=FOOTER_MAGIC, - table_offset=self._table_offset, + table_offset=self._table.last_table_offset, sha256=self.fh.digest(), ) footer.write(self.fh) @@ -327,8 +500,7 @@ def __init__(self, fh: BinaryIO, recover: bool = False): self.timestamp = ts.from_unix(self.header.timestamp) self.guid = uuid.UUID(bytes_le=self.header.guid) - self.table: dict[list[SnapshotTableEntry]] = defaultdict(list) - self._table_lookup: dict[list[int]] = defaultdict(list) + self.table = Table[ReadEntry]() footer_offset = self.fh.seek(-len(c_asdf.footer), io.SEEK_END) @@ -349,11 +521,28 @@ def __init__(self, fh: BinaryIO, recover: bool = False): def _parse_block_table(self, offset: int, count: int) -> None: """Parse the block table, getting rid of overlapping blocks.""" self.fh.seek(offset) - table_data = io.BytesIO(self.fh.read(count * len(c_asdf.table_entry))) + table_offsets = [] - for _ in range(count): - entry = c_asdf.table_entry(table_data) - self._table_insert(entry.idx, entry.offset, entry.size, entry.file_offset) + # Read all the tables and their offsets in reverse order + while True: + table_offset = self.fh.tell() + table_index = c_asdf.table_index(self.fh) + table_offsets.append((table_offset, table_index)) + if table_index.prev_table == OFFSET_MASK: + break + self.fh.seek(table_index.prev_table) + + table_offsets.reverse() + self.table._table_offsets = table_offsets + + # Read all the table entries and add them to the table + for offset, table_index in table_offsets: + self.fh.seek(offset + len(c_asdf.table_index)) + _count = table_index.size // len(c_asdf.table_entry) + table_data = io.BytesIO(self.fh.read(table_index.size)) + + for entry in c_asdf.table_entry[_count](table_data): + self._table_insert(entry.idx, entry.offset, entry.size, entry.file_offset) def _recover_block_table(self) -> None: self.fh.seek(len(c_asdf.header)) @@ -361,29 +550,25 @@ def _recover_block_table(self) -> None: self._table_insert(block.idx, block.offset, block.size, file_offset) def _table_insert(self, idx: int, offset: int, size: int, file_offset: int) -> None: - stream_idx = idx entry_data_offset = file_offset + len(c_asdf.block) - lookup_table = self._table_lookup[stream_idx] - entry_table = self.table[stream_idx] + entry_table, lookup_table = self.table.get(idx) - table_idx, entry_offset, entry_size = _table_fit( - offset, size, entry_table, lookup_table, lambda e: (e[0], e[1]) - ) + table_idx, entry_offset, entry_size = _table_fit(offset, size, entry_table, lookup_table) if table_idx is None: return entry_data_offset += entry_offset - offset - lookup_table.insert(table_idx, entry_offset) - entry_table.insert( + self.table.add( table_idx, - ( - entry_offset, - entry_size, - file_offset, - entry_data_offset, + ReadEntry( + idx=idx, + offset=entry_offset, + size=entry_size, + file_offset=file_offset, + data_offset=entry_data_offset, ), ) @@ -459,12 +644,11 @@ def __init__(self, asdf: AsdfSnapshot, idx: int): self.fh = asdf.fh self.asdf = asdf self.idx = idx - self.table = asdf.table[idx] - self._table_lookup = asdf._table_lookup[idx] + self.table, self._table_lookup = asdf.table.get(idx) # We don't actually know the size of the source disk # Doesn't really matter though, just take the last run offset + size - size = self.table[-1][0] + self.table[-1][1] + size = self.table[-1].offset + self.table[-1].size super().__init__(size) def _read(self, offset: int, length: int) -> bytes: @@ -473,15 +657,12 @@ def _read(self, offset: int, length: int) -> bytes: size = self.size run_idx = bisect_right(self._table_lookup, offset) - 1 runlist_len = len(self.table) - while length > 0 and run_idx < runlist_len: - run_start, run_size, run_file_offset, run_data_offset = self.table[run_idx] - run_end = run_start + run_size + entry = self.table[run_idx] + run_data_offset = entry.data_offset + run_end = entry.offset + entry.size - if run_idx + 1 < runlist_len: - next_run_start, _, _, _ = self.table[run_idx + 1] - else: - next_run_start = None + next_run_start = self.table[run_idx + 1].offset if (run_idx + 1 < runlist_len) else None if run_idx < 0: # Missing first block @@ -510,20 +691,20 @@ def _read(self, offset: int, length: int) -> bytes: # Proceed to next run run_idx += 1 - elif offset < run_start: + elif offset < entry.offset: # Previous run consumed, and next run is far away - sparse_remaining = run_start - offset + sparse_remaining = entry.offset - offset read_count = min(size - offset, min(sparse_remaining, length)) result.append(SPARSE_BYTES * (read_count // len(SPARSE_BYTES))) # Don't proceed to next run, next loop iteration we'll be within the current run else: # We're in a run with data - run_pos = offset - run_start - run_remaining = run_size - run_pos + run_pos = offset - entry.offset + run_remaining = entry.size - run_pos read_count = min(size - offset, min(run_remaining, length)) - self.fh.seek(run_file_offset) + self.fh.seek(entry.file_offset) if self.fh.read(4) != BLOCK_MAGIC: raise InvalidBlock("invalid block magic") @@ -540,7 +721,7 @@ def _read(self, offset: int, length: int) -> bytes: return b"".join(result) -def scrape_blocks(fh: BinaryIO, buffer_size: int = io.DEFAULT_BUFFER_SIZE) -> Iterator[c_asdf.block, int]: +def scrape_blocks(fh: BinaryIO, buffer_size: int = io.DEFAULT_BUFFER_SIZE) -> Iterator[tuple[c_asdf.block, int]]: """Scrape for block headers in ``fh`` and yield parsed block headers and their offset. Args: @@ -586,8 +767,11 @@ def scrape_blocks(fh: BinaryIO, buffer_size: int = io.DEFAULT_BUFFER_SIZE) -> It def _table_fit( - entry_offset: int, entry_size: int, entry_table: list, lookup_table: list, getentry: Callable -) -> tuple[int, int, int]: + entry_offset: int, + entry_size: int, + entry_table: list[T], + lookup_table: list[int], +) -> tuple[int | None, int | None, int | None]: """Calculate where to insert an entry with the given offset and size into the entry table. Moves or shrinks the entry to prevent block overlap, and remove any overlapping blocks. @@ -597,7 +781,6 @@ def _table_fit( entry_size: The entry size to calculate the insert for. entry_table: The entry table to insert into or remove entries from. lookup_table: The lookup table for the entry_table. - getentry: A callable to return the ``(offset, size)`` tuple from an entry. Returns: A tuple of the table index to insert into, an adjusted entry offset and an adjusted entry size. @@ -610,10 +793,12 @@ def _table_fit( table_idx = bisect_right(lookup_table, entry_offset) if table_idx > 0: - prev_start, prev_size = getentry(entry_table[table_idx - 1]) + _entry = entry_table[table_idx - 1] + prev_start, prev_size = _entry.offset, _entry.size prev_end = prev_start + prev_size if table_idx < len(lookup_table): - next_start, next_size = getentry(entry_table[table_idx]) + _entry = entry_table[table_idx] + next_start, next_size = _entry.offset, _entry.size next_end = next_start + next_size if prev_end and prev_end >= entry_end: @@ -630,7 +815,8 @@ def _table_fit( entry_table.pop(table_idx) if table_idx < len(lookup_table): - next_start, next_size = getentry(entry_table[table_idx]) + _entry = entry_table[table_idx] + next_start, next_size = _entry.offset, _entry.size next_end = next_start + next_size else: next_start, next_end = None, None diff --git a/dissect/evidence/asdf/c_asdf.py b/dissect/evidence/asdf/c_asdf.py index 8da56ce..20bf864 100644 --- a/dissect/evidence/asdf/c_asdf.py +++ b/dissect/evidence/asdf/c_asdf.py @@ -31,6 +31,13 @@ uint64 size; // Size of block in stream }; +// A structure to keep track of previously flushed tables +struct table_index { + uint64 prev_table; // Offset of the previous table 0xFFFFFFFF_FFFFFFF denotes last table + uint64 size; // Amount of bytes of the table + uint64 indexes[4]; // Which stream indexes are available inside the table +}; + struct table_entry { BLOCK_FLAG flags; // Block flags uint8 idx; // Stream index, some reserved values have special meaning diff --git a/dissect/evidence/asdf/c_asdf.pyi b/dissect/evidence/asdf/c_asdf.pyi index 58c6f12..61c08ff 100644 --- a/dissect/evidence/asdf/c_asdf.pyi +++ b/dissect/evidence/asdf/c_asdf.pyi @@ -1,5 +1,5 @@ # Generated by cstruct-stubgen -from typing import BinaryIO, TypeAlias, overload +from typing import BinaryIO, Literal, TypeAlias, overload import dissect.cstruct as __cs__ @@ -53,6 +53,20 @@ class _c_asdf(__cs__.cstruct): @overload def __init__(self, fh: bytes | memoryview | bytearray | BinaryIO, /): ... + class table_index(__cs__.Structure): + prev_table: _c_asdf.uint64 + size: _c_asdf.uint64 + indexes: __cs__.Array[_c_asdf.uint64] + @overload + def __init__( + self, + prev_table: _c_asdf.uint64 | None = ..., + size: _c_asdf.uint64 | None = ..., + indexes: __cs__.Array[_c_asdf.uint64] | None = ..., + ): ... + @overload + def __init__(self, fh: bytes | memoryview | bytearray | BinaryIO, /): ... + class table_entry(__cs__.Structure): flags: _c_asdf.BLOCK_FLAG idx: _c_asdf.uint8 diff --git a/dissect/evidence/asdf/stream.py b/dissect/evidence/asdf/stream.py index 4bd1fb2..0c0efd1 100644 --- a/dissect/evidence/asdf/stream.py +++ b/dissect/evidence/asdf/stream.py @@ -16,6 +16,7 @@ class SubStreamBase(io.RawIOBase): def __init__(self, fh: BinaryIO): self.fh = fh + self.read = fh.read def write(self, b: bytes) -> int: return self.fh.write(b) diff --git a/tests/conftest.py b/tests/conftest.py index 3cfa66f..6398a54 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -6,7 +6,7 @@ import pytest -from dissect.evidence.asdf import AsdfWriter +from dissect.evidence.asdf.asdf import DEFAULT_TABLE_SIZE, AsdfWriter from tests._utils import absolute_path if TYPE_CHECKING: @@ -80,11 +80,16 @@ def ewf_data() -> Iterator[BinaryIO]: yield from open_data("_data/ewf/ewf.E01") -@pytest.fixture -def asdf_writer() -> AsdfWriter: +@pytest.fixture( + params=[ + pytest.param(1, id="table_size=1"), + pytest.param(DEFAULT_TABLE_SIZE, id="table_size=DEFAULT"), + ] +) +def asdf_writer(request: pytest.FixtureRequest) -> AsdfWriter: def noop() -> None: pass fh = BytesIO() fh.close = noop # Prevent clearing the buffer, we need it - return AsdfWriter(fh) + return AsdfWriter(fh, table_size=request.param) diff --git a/tests/test_asdf.py b/tests/test_asdf.py index d1b6e33..c3220b3 100644 --- a/tests/test_asdf.py +++ b/tests/test_asdf.py @@ -28,7 +28,7 @@ def test_asdf(asdf_writer: AsdfWriter) -> None: reader = AsdfSnapshot(asdf_writer._fh) stream_0 = reader.open(0) - assert [(run_start, run_size) for run_start, run_size, _, _ in stream_0.table] == [ + assert [(entry.offset, entry.size) for entry in stream_0.table] == [ (0, 0x1000), (0x4000, 0x1000), (0x8000, 0x1000), @@ -62,16 +62,16 @@ def test_asdf(asdf_writer: AsdfWriter) -> None: def test_asdf_overlap(asdf_writer: AsdfWriter) -> None: asdf_writer.add_bytes(b"\x01" * 100, base=0) asdf_writer.add_bytes(b"\x02" * 100, base=200) - assert asdf_writer._table_lookup[0] == [0, 200] + assert asdf_writer._table.lookup(0, asdf_writer._fh) == [0, 200] asdf_writer.add_bytes(b"\x03" * 100, base=50) - assert asdf_writer._table_lookup[0] == [0, 100, 200] + assert asdf_writer._table.lookup(0, asdf_writer._fh) == [0, 100, 200] asdf_writer.add_bytes(b"\x04" * 150, base=100) - assert asdf_writer._table_lookup[0] == [0, 100, 150, 200] + assert asdf_writer._table.lookup(0, asdf_writer._fh) == [0, 100, 150, 200] asdf_writer.add_bytes(b"\x05" * 50, base=25) - assert asdf_writer._table_lookup[0] == [0, 100, 150, 200] + assert asdf_writer._table.lookup(0, asdf_writer._fh) == [0, 100, 150, 200] asdf_writer.close() asdf_writer._fh.seek(0) @@ -79,7 +79,7 @@ def test_asdf_overlap(asdf_writer: AsdfWriter) -> None: reader = AsdfSnapshot(asdf_writer._fh) stream = reader.open(0) - assert [(run_start, run_size) for run_start, run_size, _, _ in stream.table] == [ + assert [(entry.offset, entry.size) for entry in stream.table] == [ (0, 100), (100, 50), (150, 50), @@ -93,9 +93,9 @@ def test_asdf_overlap_all(asdf_writer: AsdfWriter) -> None: asdf_writer.add_bytes(b"\x02" * 100, base=200) asdf_writer.add_bytes(b"\x03" * 100, base=50) asdf_writer.add_bytes(b"\x04" * 150, base=100) - assert asdf_writer._table_lookup[0] == [0, 100, 150, 200] + assert asdf_writer._table.lookup(0, asdf_writer._fh) == [0, 100, 150, 200] asdf_writer.add_bytes(b"\x06" * 400, base=0) - assert asdf_writer._table_lookup[0] == [0, 100] + assert asdf_writer._table.lookup(0, asdf_writer._fh) == [0, 100] asdf_writer.close() asdf_writer._fh.seek(0) @@ -103,7 +103,7 @@ def test_asdf_overlap_all(asdf_writer: AsdfWriter) -> None: reader = AsdfSnapshot(asdf_writer._fh) stream = reader.open(0) - assert [(run_start, run_size) for run_start, run_size, _, _ in stream.table] == [ + assert [(entry.offset, entry.size) for entry in stream.table] == [ (0, 100), (100, 300), ] @@ -113,10 +113,10 @@ def test_asdf_overlap_all(asdf_writer: AsdfWriter) -> None: def test_asdf_overlap_contiguous(asdf_writer: AsdfWriter) -> None: asdf_writer.add_bytes(b"\x01" * 100, base=0) asdf_writer.add_bytes(b"\x02" * 100, base=100) - assert asdf_writer._table_lookup[0] == [0, 100] + assert asdf_writer._table.lookup(0, asdf_writer._fh) == [0, 100] asdf_writer.add_bytes(b"\x03" * 75, base=50) - assert asdf_writer._table_lookup[0] == [0, 100] + assert asdf_writer._table.lookup(0, asdf_writer._fh) == [0, 100] asdf_writer.close() asdf_writer._fh.seek(0) @@ -124,7 +124,7 @@ def test_asdf_overlap_contiguous(asdf_writer: AsdfWriter) -> None: reader = AsdfSnapshot(asdf_writer._fh) stream = reader.open(0) - assert [(run_start, run_size) for run_start, run_size, _, _ in stream.table] == [ + assert [(entry.offset, entry.size) for entry in stream.table] == [ (0, 100), (100, 100), ] @@ -135,7 +135,7 @@ def test_asdf_overlap_seek(asdf_writer: AsdfWriter) -> None: asdf_writer.add_bytes(b"\x00" * 100, base=0) asdf_writer.add_bytes(b"\x00" * 100, base=200) asdf_writer.add_bytes(bytes(range(200)), base=50) - assert asdf_writer._table_lookup[0] == [0, 100, 200] + assert asdf_writer._table.lookup(0, asdf_writer._fh) == [0, 100, 200] asdf_writer.close() asdf_writer._fh.seek(0) @@ -143,7 +143,7 @@ def test_asdf_overlap_seek(asdf_writer: AsdfWriter) -> None: reader = AsdfSnapshot(asdf_writer._fh) stream = reader.open(0) - assert [(run_start, run_size) for run_start, run_size, _, _ in stream.table] == [ + assert [(entry.offset, entry.size) for entry in stream.table] == [ (0, 100), (100, 100), (200, 100), @@ -237,7 +237,7 @@ def test_asdf_scrape(asdf_writer: AsdfWriter) -> None: reader = AsdfSnapshot(asdf_writer._fh, recover=True) stream = reader.open(0) - assert [(run_start, run_size) for run_start, run_size, _, _ in stream.table] == [ + assert [(entry.offset, entry.size) for entry in stream.table] == [ (0, 0x1000), (0x4000, 0x1000), (0x8000, 0x1000),