Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
82cced9
Core: Merge DVs referencing the same data files as a safeguard
amogh-jahagirdar Jan 9, 2026
e41943d
Fix dangling delete tests
amogh-jahagirdar Jan 9, 2026
76e24e4
Simplification in OutputFileFactory
amogh-jahagirdar Jan 9, 2026
a740ff9
minor optimization
amogh-jahagirdar Jan 9, 2026
11ffc2f
cleanup, make outputfilefactory take in more fields so that we don't …
amogh-jahagirdar Jan 10, 2026
772e3c2
change the duplicate tracking algorithm, fix spark tests
amogh-jahagirdar Jan 10, 2026
3404a86
Add more tests for multiple DVs and w equality deletes
amogh-jahagirdar Jan 11, 2026
c04d0e0
Rebase and fix spark 4.1 tests
amogh-jahagirdar Jan 11, 2026
a39b073
more cleanup, put dvfilewriter in try w resources
amogh-jahagirdar Jan 11, 2026
a079d22
Add logging, some more cleanup
amogh-jahagirdar Jan 12, 2026
d7eadb0
more cleanup
amogh-jahagirdar Jan 12, 2026
0a053a6
Make dv refs a multimap, group by partition to write single puffin fo…
amogh-jahagirdar Jan 15, 2026
6b04dd9
Filter files with duplicates before sifting through them and merging
amogh-jahagirdar Jan 15, 2026
a50fb32
update old comment
amogh-jahagirdar Jan 15, 2026
301f0fe
Use an unpartitioned spec for the output file factory for the final p…
amogh-jahagirdar Jan 16, 2026
112b086
address feedback
amogh-jahagirdar Jan 16, 2026
eecacad
Add some spacing to make precondition checks more readable
amogh-jahagirdar Jan 16, 2026
9673f85
more style stuff
amogh-jahagirdar Jan 16, 2026
4104097
Update delete loader to use IOUtil.readDV API
amogh-jahagirdar Jan 16, 2026
9972ce0
Update interface documentation
amogh-jahagirdar Jan 16, 2026
e75bb03
address Ryan's feedback
amogh-jahagirdar Jan 25, 2026
6bccc52
Dedupe pos/equality deletes to preserve previous behavior, prevent an…
amogh-jahagirdar Jan 26, 2026
9bb9c56
Make DV tracking a linked hashmap to preserve ordering of entries
amogh-jahagirdar Jan 27, 2026
85801f1
Remove passing in specs to util
amogh-jahagirdar Jan 27, 2026
669f125
more cleanup
amogh-jahagirdar Jan 27, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
156 changes: 156 additions & 0 deletions core/src/main/java/org/apache/iceberg/DVUtil.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;

import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.ExecutorService;
import java.util.stream.Collectors;
import org.apache.iceberg.deletes.BaseDVFileWriter;
import org.apache.iceberg.deletes.DVFileWriter;
import org.apache.iceberg.deletes.Deletes;
import org.apache.iceberg.deletes.PositionDeleteIndex;
import org.apache.iceberg.io.DeleteWriteResult;
import org.apache.iceberg.io.OutputFileFactory;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.util.Tasks;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

class DVUtil {
private static final Logger LOG = LoggerFactory.getLogger(DVUtil.class);

private DVUtil() {}

/**
* Merges duplicate DVs for the same data file and writes the merged DV Puffin files.
*
* @param duplicateDVsByReferencedFile map of data file location to duplicate DVs (all entries
* must have size > 1)
* @return newly merged DVs
*/
static List<DeleteFile> mergeDVsAndWrite(
TableOperations ops,
Map<String, List<DeleteFile>> duplicateDVsByReferencedFile,
String tableName,
ExecutorService threadpool) {
Map<String, PositionDeleteIndex> mergedIndices =
duplicateDVsByReferencedFile.entrySet().stream()
.collect(
Collectors.toMap(
Map.Entry::getKey,
entry -> readDVsAndMerge(ops, entry.getValue(), threadpool)));

return writeMergedDVs(
mergedIndices, duplicateDVsByReferencedFile, ops, tableName, ops.current().specsById());
}

// Merges the position indices for the duplicate DVs for a given referenced file
private static PositionDeleteIndex readDVsAndMerge(
TableOperations ops, List<DeleteFile> dvsForFile, ExecutorService pool) {
Preconditions.checkArgument(dvsForFile.size() > 1, "Expected more than 1 DV");
PositionDeleteIndex[] dvIndices = readDVs(dvsForFile, pool, ops);
PositionDeleteIndex mergedPositions = dvIndices[0];
DeleteFile firstDV = dvsForFile.get(0);

for (int i = 1; i < dvIndices.length; i++) {
DeleteFile dv = dvsForFile.get(i);
Preconditions.checkArgument(
Objects.equals(dv.dataSequenceNumber(), firstDV.dataSequenceNumber()),
"Cannot merge duplicate added DVs when data sequence numbers are different, "
+ "expected all to be added with sequence %s, but got %s",
firstDV.dataSequenceNumber(),
dv.dataSequenceNumber());

Preconditions.checkArgument(
dv.specId() == firstDV.specId(),
"Cannot merge duplicate added DVs when partition specs are different, "
+ "expected all to be added with spec %s, but got %s",
firstDV.specId(),
dv.specId());

Preconditions.checkArgument(
Objects.equals(dv.partition(), firstDV.partition()),
"Cannot merge duplicate added DVs when partition tuples are different");
Comment on lines +91 to +93
Copy link
Contributor Author

@amogh-jahagirdar amogh-jahagirdar Jan 27, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@rdblue let me know if you feel strongly about this check. While it is StructLike and doesn't guarantee an equals implementation, the way I look at it is the following:

  1. Generally it'll be PartitionData which does do a type and value by value comparison.
  2. Even if the implementation changes from under us and ends up being another StructLike which doesn't override equals, then it's a reference equality which will worst case be a false positive and just fail the commit.

Another rationale behind these checks is that if a writer produces duplicate DVs, there's also a chance of some kind of metadata record reuse issue from the writer and this felt like an easy sanity check.

Alternatively, we could just simplify this and remove these validations by assuming that the duplicate DVs are OK by every other dimension.


mergedPositions.merge(dvIndices[i]);
}

return mergedPositions;
}

private static PositionDeleteIndex[] readDVs(
List<DeleteFile> dvs, ExecutorService pool, TableOperations ops) {
PositionDeleteIndex[] dvIndices = new PositionDeleteIndex[dvs.size()];
Tasks.range(dvIndices.length)
.executeWith(pool)
.stopOnFailure()
.throwFailureWhenFinished()
.run(
i -> {
dvIndices[i] = Deletes.readDV(dvs.get(i), ops.io(), ops.encryption());
});

return dvIndices;
}

// Produces a Puffin per partition spec containing the merged DVs for that spec
private static List<DeleteFile> writeMergedDVs(
Map<String, PositionDeleteIndex> mergedIndices,
Map<String, List<DeleteFile>> dataFilesWithDuplicateDVs,
TableOperations ops,
String tableName,
Map<Integer, PartitionSpec> specsById) {
try (DVFileWriter dvFileWriter =
new BaseDVFileWriter(
// Use an unpartitioned spec for the location provider for the puffin containing
// all the merged DVs
OutputFileFactory.builderFor(
ops, PartitionSpec.unpartitioned(), FileFormat.PUFFIN, 1, 1)
.build(),
path -> null)) {

for (Map.Entry<String, PositionDeleteIndex> entry : mergedIndices.entrySet()) {
String referencedLocation = entry.getKey();
PositionDeleteIndex mergedPositions = entry.getValue();
List<DeleteFile> duplicateDVs = dataFilesWithDuplicateDVs.get(referencedLocation);
DeleteFile firstDV = duplicateDVs.get(0);
LOG.warn(
"Merged {} DVs for data file {}. These will be orphaned DVs in table {}",
duplicateDVs.size(),
referencedLocation,
tableName);
dvFileWriter.delete(
referencedLocation,
mergedPositions,
specsById.get(firstDV.specId()),
firstDV.partition());
}

dvFileWriter.close();
DeleteWriteResult writeResult = dvFileWriter.result();
return writeResult.deleteFiles();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
55 changes: 40 additions & 15 deletions core/src/main/java/org/apache/iceberg/MergingSnapshotProducer.java
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.iceberg.encryption.EncryptedOutputFile;
import org.apache.iceberg.events.CreateSnapshotEvent;
import org.apache.iceberg.exceptions.ValidationException;
Expand All @@ -47,6 +48,7 @@
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.relocated.com.google.common.collect.Sets;
import org.apache.iceberg.relocated.com.google.common.collect.Streams;
import org.apache.iceberg.util.CharSequenceSet;
import org.apache.iceberg.util.ContentFileUtil;
import org.apache.iceberg.util.DataFileSet;
Expand All @@ -55,6 +57,7 @@
import org.apache.iceberg.util.PartitionSet;
import org.apache.iceberg.util.SnapshotUtil;
import org.apache.iceberg.util.Tasks;
import org.apache.iceberg.util.ThreadPools;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand Down Expand Up @@ -86,8 +89,8 @@ abstract class MergingSnapshotProducer<ThisT> extends SnapshotProducer<ThisT> {
// update data
private final Map<Integer, DataFileSet> newDataFilesBySpec = Maps.newHashMap();
private Long newDataFilesDataSequenceNumber;
private final Map<Integer, DeleteFileSet> newDeleteFilesBySpec = Maps.newHashMap();
private final Set<String> newDVRefs = Sets.newHashSet();
private final List<DeleteFile> positionAndEqualityDeletes = Lists.newArrayList();
private final Map<String, List<DeleteFile>> dvsByReferencedFile = Maps.newLinkedHashMap();
Comment on lines +92 to +93
Copy link
Contributor Author

@amogh-jahagirdar amogh-jahagirdar Jan 27, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@rdblue These are 2 disjoint fields, one for a list of v2 deletes and a multimap for DVs.
The map is a LinkedHashMap because we have a bunch of tests which have expectations on the exact orders of entries in a manifest. The previous change didn't require anything because we worked with the deleteFilesBySpec, and inherently preserved the order.

I personally think our tests should probably get away from expecting a certain order in manifests, and just assert the contents (or at least have validate methods that express either being strict on the ordering or not). As we get into V4, maybe we'll make implementation choices for ordering entries in a certain way but in the current state of things, it was kind of a hinderance to making changes here.

I didn't make the test change since it's fairly large, and can be distracting from this change and I figured the linkedhashma has negligible overhead so we can just preserve the existing behavior.

private final List<ManifestFile> appendManifests = Lists.newArrayList();
private final List<ManifestFile> rewrittenAppendManifests = Lists.newArrayList();
private final SnapshotSummary.Builder addedFilesSummary = SnapshotSummary.builder();
Expand Down Expand Up @@ -222,7 +225,7 @@ protected boolean addsDataFiles() {
}

protected boolean addsDeleteFiles() {
return !newDeleteFilesBySpec.isEmpty();
return !positionAndEqualityDeletes.isEmpty() || !dvsByReferencedFile.isEmpty();
}

/** Add a data file to the new snapshot. */
Expand Down Expand Up @@ -265,15 +268,14 @@ private void addInternal(DeleteFile file) {
"Cannot find partition spec %s for delete file: %s",
file.specId(),
file.location());

DeleteFileSet deleteFiles =
newDeleteFilesBySpec.computeIfAbsent(spec.specId(), ignored -> DeleteFileSet.create());
if (deleteFiles.add(file)) {
addedFilesSummary.addedFile(spec, file);
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

because we may be merging duplicates, we don't update the summary for delete files until after we dedupe and are just about to write the new manifests

hasNewDeleteFiles = true;
if (ContentFileUtil.isDV(file)) {
newDVRefs.add(file.referencedDataFile());
}
hasNewDeleteFiles = true;
Copy link
Contributor Author

@amogh-jahagirdar amogh-jahagirdar Jan 27, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since we're not tracking by DeleteFileSet at the time of adding, we treat every addition as a new delete, even potential duplicates (unless we want to do a look back in the list on every addDeleteFile, but I'm very against that since it's an O(deletes-added^2) operation effectively at that point for a commit).

If we look at how hasNewDeleteFiles is actually used, I don't think this is really consequential. hasNewDeleteFiles is true and there's a cached state we use the flag as an indication that the cache is stale, and should be cleared out/files cleaned up. Even if there are duplicates, there's at least 1 file which is new.

We end up merging/deduping the DVs (and the V2 pos deletes and equality deletes) anyways just before producing new manifests. See my comment below

if (ContentFileUtil.isDV(file)) {
List<DeleteFile> dvsForReferencedFile =
dvsByReferencedFile.computeIfAbsent(
file.referencedDataFile(), newFile -> Lists.newArrayList());
dvsForReferencedFile.add(file);
} else {
positionAndEqualityDeletes.add(file);
}
}

Expand Down Expand Up @@ -814,7 +816,7 @@ protected void validateAddedDVs(
Expression conflictDetectionFilter,
Snapshot parent) {
// skip if there is no current table state or this operation doesn't add new DVs
if (parent == null || newDVRefs.isEmpty()) {
if (parent == null || dvsByReferencedFile.isEmpty()) {
return;
}

Expand Down Expand Up @@ -847,7 +849,7 @@ private void validateAddedDVs(
DeleteFile file = entry.file();
if (newSnapshotIds.contains(entry.snapshotId()) && ContentFileUtil.isDV(file)) {
ValidationException.check(
!newDVRefs.contains(file.referencedDataFile()),
!dvsByReferencedFile.containsKey(file.referencedDataFile()),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this change is correct, but I want to note that in the future we could avoid failing by merging DVs as long as that is allowed by the operation being committed.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah had an old PR out for this https://github.com/apache/iceberg/pull/11693/files#diff-410ff1b47d9a44a2fd5dbd103cad9463d82c8f4f51aa1be63b8b403123ab6e0e (probably a bad PR title since by definition for the operation if the positions are disjoint, it's not conflicting)

"Found concurrently added DV for %s: %s",
file.referencedDataFile(),
ContentFileUtil.dvDesc(file));
Expand Down Expand Up @@ -1042,7 +1044,7 @@ private List<ManifestFile> newDataFilesAsManifests() {
}

private Iterable<ManifestFile> prepareDeleteManifests() {
if (newDeleteFilesBySpec.isEmpty()) {
if (!addsDeleteFiles()) {
return ImmutableList.of();
}

Expand All @@ -1060,9 +1062,32 @@ private List<ManifestFile> newDeleteFilesAsManifests() {
}

if (cachedNewDeleteManifests.isEmpty()) {
Map<String, List<DeleteFile>> duplicateDVs = Maps.newHashMap();
List<DeleteFile> validDVs = Lists.newArrayList();
for (Map.Entry<String, List<DeleteFile>> entry : dvsByReferencedFile.entrySet()) {
if (entry.getValue().size() > 1) {
duplicateDVs.put(entry.getKey(), entry.getValue());
} else {
validDVs.addAll(entry.getValue());
}
}

List<DeleteFile> mergedDVs =
duplicateDVs.isEmpty()
? ImmutableList.of()
: DVUtil.mergeDVsAndWrite(
ops(), duplicateDVs, tableName, ThreadPools.getDeleteWorkerPool());
// Prevent commiting duplicate V2 deletes by deduping them
Map<Integer, List<DeleteFile>> newDeleteFilesBySpec =
Streams.stream(
Iterables.concat(
mergedDVs, validDVs, DeleteFileSet.of(positionAndEqualityDeletes)))
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@rdblue let me know how you feel about the DeleteFileSet.of(positionandEqualityDeletes).
I know we were kind of against de-duping but I think the fact that the two fields are disjoint now avoids that partition spec case you mentioned. I'm a bit worried that not deduping before producing the manifests is a regression compared to the previous behavior. And there's a good argument that if we can do it correctly, relatively cheaply, it's better to do it to avoid any bad metadata (similar to why we do it for data files).

The summary stats are anyways produced from this "final" deleteFilesBySpec which should be all correct so I think we're covered in general.

.map(file -> Delegates.pendingDeleteFile(file, file.dataSequenceNumber()))
.collect(Collectors.groupingBy(ContentFile::specId));
newDeleteFilesBySpec.forEach(
(specId, deleteFiles) -> {
PartitionSpec spec = ops().current().spec(specId);
deleteFiles.forEach(file -> addedFilesSummary.addedFile(spec, file));
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't like that the DeleteFile instances are still kept in two places, and that both places will deduplicate using sets but different logic once you account for the map keys. I know that because we are not deduplicating v2 deletes, we need a place for them to be tracked, but that doesn't mean we need to store the DeleteFile instances for DVs twice.

The reason why I don't like the double storage is that it doesn't handle some strange cases. For instance, what if a DeleteFile is added for the same DV but with two different (possibly equivalent, unpartitioned) specs? Then the same file would be handled twice here, but would be deduplicated by the DeleteFileSet for its referenced data file causing a metrics bug. While the merge code checks that duplicates have the same spec ID, the DeleteFileSet does not.

I think it would be cleaner to keep a list of v2 deletes and the multimap of DVs and maintain them separately. This method should produce a new list of merged DVs, then both lists (v2 deletes and merged DVs) should be written to delete manifests by spec. It's easy enough to produce a filtered iterator, so I don't think we are buying much by grouping by spec ID as files are added.

List<ManifestFile> newDeleteManifests = writeDeleteManifests(deleteFiles, spec);
cachedNewDeleteManifests.addAll(newDeleteManifests);
});
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,17 @@ public void delete(String path, long pos, PartitionSpec spec, StructLike partiti
positions.delete(pos);
}

@Override
public void delete(
String path,
PositionDeleteIndex positionDeleteIndex,
PartitionSpec spec,
StructLike partition) {
Deletes deletes =
deletesByPath.computeIfAbsent(path, key -> new Deletes(path, spec, partition));
deletes.positions().merge(positionDeleteIndex);
}

@Override
public DeleteWriteResult result() {
Preconditions.checkState(result != null, "Cannot get result from unclosed writer");
Expand Down
17 changes: 17 additions & 0 deletions core/src/main/java/org/apache/iceberg/deletes/DVFileWriter.java
Original file line number Diff line number Diff line change
Expand Up @@ -43,4 +43,21 @@ public interface DVFileWriter extends Closeable {
* @return the writer result
*/
DeleteWriteResult result();

/**
* Marks every position that is deleted in positionDeleteIndex as deleted in the given data file.
* Implementations should merge with existing position indices for the provided path
*
* @param path the data file path
* @param positionDeleteIndex the position delete index containing all the positions to delete
* @param spec the data file partition spec
* @param partition the data file partition
*/
default void delete(
String path,
PositionDeleteIndex positionDeleteIndex,
PartitionSpec spec,
StructLike partition) {
throw new UnsupportedOperationException("Delete with positionDeleteIndex is not supported");
}
}
19 changes: 19 additions & 0 deletions core/src/main/java/org/apache/iceberg/deletes/Deletes.java
Original file line number Diff line number Diff line change
Expand Up @@ -29,13 +29,20 @@
import org.apache.iceberg.MetadataColumns;
import org.apache.iceberg.Schema;
import org.apache.iceberg.StructLike;
import org.apache.iceberg.encryption.EncryptingFileIO;
import org.apache.iceberg.encryption.EncryptionManager;
import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.IOUtil;
import org.apache.iceberg.io.InputFile;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.Iterables;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.types.Comparators;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.util.CharSequenceMap;
import org.apache.iceberg.util.ContentFileUtil;
import org.apache.iceberg.util.Filter;
import org.apache.iceberg.util.SortedMerge;
import org.apache.iceberg.util.StructLikeSet;
Expand Down Expand Up @@ -126,6 +133,18 @@ public static <T extends StructLike> CharSequenceMap<PositionDeleteIndex> toPosi
return toPositionIndexes(posDeletes, null /* unknown delete file */);
}

public static PositionDeleteIndex readDV(
DeleteFile deleteFile, FileIO fileIO, EncryptionManager encryptionManager) {
Preconditions.checkArgument(
ContentFileUtil.isDV(deleteFile), "Delete file must be a deletion vector");
InputFile inputFile =
EncryptingFileIO.combine(fileIO, encryptionManager).newInputFile(deleteFile);
long offset = deleteFile.contentOffset();
int length = deleteFile.contentSizeInBytes().intValue();
byte[] bytes = IOUtil.readBytes(inputFile, offset, length);
return PositionDeleteIndex.deserialize(bytes, deleteFile);
}

/**
* Builds a map of position delete indexes by path.
*
Expand Down
20 changes: 20 additions & 0 deletions core/src/main/java/org/apache/iceberg/io/IOUtil.java
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,9 @@
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.UncheckedIOException;
import java.nio.ByteBuffer;
import org.apache.iceberg.relocated.com.google.common.io.ByteStreams;

public class IOUtil {
// not meant to be instantiated
Expand All @@ -49,6 +51,24 @@ public static void readFully(InputStream stream, byte[] bytes, int offset, int l
}
}

public static byte[] readBytes(InputFile inputFile, long offset, int length) {
try (SeekableInputStream stream = inputFile.newStream()) {
byte[] bytes = new byte[length];

if (stream instanceof RangeReadable) {
RangeReadable rangeReadable = (RangeReadable) stream;
rangeReadable.readFully(offset, bytes);
} else {
stream.seek(offset);
ByteStreams.readFully(stream, bytes);
}

return bytes;
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}

/** Writes a buffer into a stream, making multiple write calls if necessary. */
public static void writeFully(OutputStream outputStream, ByteBuffer buffer) throws IOException {
if (!buffer.hasRemaining()) {
Expand Down
Loading