unarr-zig is a Zig wrapper around selmf/unarr. It exposes:
- low-level C symbols via
unarr.c - a higher-level Zig API (
Archive,Entry,Format,Error) - Zig-managed build integration for fetching and compiling upstream
unarr
Supported archive formats:
rartarzip7z
- Zig
0.16.0-dev+ - C toolchain supported by your Zig target
zig build
zig build testUseful build flags:
zig build -Dshared=true
zig build -Denable_7z=false
zig build -Dstatic_libc=false-Dshared=true: buildslibunarras a shared library-Denable_7z=false: excludes 7z source set/defines-Dstatic_libc=false: disables the defaultziglibcstatic-libc link path
Add dependency:
zig fetch --save <repo-url>build.zig:
const dep = b.dependency("unarr", .{
.target = target,
.optimize = optimize,
});
exe.root_module.addImport("unarr", dep.module("unarr"));Then in Zig source:
const unarr = @import("unarr");Possible errors:
OpenStreamFailedOpenArchiveFailedParseFailedDecompressFailedEntryTooLargeOutOfMemory
Archive type selector:
.rar.tar.zip.@"7z"
pub const OpenOptions = struct {
zip_deflated_only: bool = false,
};Only affects ZIP opening behavior.
pub const Version = struct {
packed_version: u32,
major: u8,
minor: u8,
patch: u8,
string: []const u8,
};Returns runtime version from linked unarr.
Open archive by filesystem path ([:0]const u8, null-terminated).
Open archive from memory buffer.
Open from raw *unarr.c.ar_stream.
Important: this API does not take stream ownership.
Releases archive resources. Closes stream only for openFile and openMemory.
Iterates entries.
- returns
Entrywhen available - returns
nullat EOF - returns
error.ParseFailedfor parse errors
Repositions parser to a previously captured entry offset.
Attempts to locate an entry by name. Returns bool.
Returns parser EOF state.
ZIP global comment helpers.
entry.name() ?[]const u8entry.rawName() ?[]const u8entry.offset() i64entry.size() usizeentry.filetime() i64
entry.read(out)reads exactlyout.lenbytes from current entry stream positionentry.readAlloc(allocator, limit)allocates full entry size with explicit upper bound
const std = @import("std");
const unarr = @import("unarr");
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
const path_z = try allocator.dupeZ(u8, "/tmp/example.zip");
defer allocator.free(path_z);
var ar = try unarr.Archive.openFile(.zip, path_z, .{});
defer ar.deinit();
while (try ar.nextEntry()) |entry| {
const name = entry.name() orelse "(unnamed)";
std.debug.print("name={s} size={} offset={}\n", .{ name, entry.size(), entry.offset() });
}
}const std = @import("std");
const unarr = @import("unarr");
fn readNamed(path: []const u8, wanted: []const u8) !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
const path_z = try allocator.dupeZ(u8, path);
defer allocator.free(path_z);
var ar = try unarr.Archive.openFile(.zip, path_z, .{});
defer ar.deinit();
const wanted_z = try allocator.dupeZ(u8, wanted);
defer allocator.free(wanted_z);
if (!ar.parseEntryFor(wanted_z)) return error.FileNotFound;
// parseEntryFor positions the parser on the matching entry
const entry: unarr.Entry = .{ .archive = &ar };
const bytes = try entry.readAlloc(allocator, 64 * 1024 * 1024);
defer allocator.free(bytes);
std.debug.print("read {d} bytes from {s}\n", .{ bytes.len, wanted });
}const std = @import("std");
const unarr = @import("unarr");
fn rereadFirst(path: []const u8) !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
const path_z = try allocator.dupeZ(u8, path);
defer allocator.free(path_z);
var ar = try unarr.Archive.openFile(.tar, path_z, .{});
defer ar.deinit();
const first = (try ar.nextEntry()) orelse return error.EndOfStream;
const off = first.offset();
const first_data = try first.readAlloc(allocator, 8 * 1024 * 1024);
defer allocator.free(first_data);
try ar.parseEntryAt(off);
const same_again: unarr.Entry = .{ .archive = &ar };
const second_data = try same_again.readAlloc(allocator, 8 * 1024 * 1024);
defer allocator.free(second_data);
try std.testing.expectEqualSlices(u8, first_data, second_data);
}const std = @import("std");
const unarr = @import("unarr");
fn parseEmbedded(bytes: []const u8) !void {
var ar = try unarr.Archive.openMemory(.zip, bytes, .{});
defer ar.deinit();
while (try ar.nextEntry()) |entry| {
_ = entry.name();
}
}const std = @import("std");
const unarr = @import("unarr");
fn showComment(path: []const u8) !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
const path_z = try allocator.dupeZ(u8, path);
defer allocator.free(path_z);
var ar = try unarr.Archive.openFile(.zip, path_z, .{});
defer ar.deinit();
const n = ar.globalCommentSize();
if (n == 0) return;
const buf = try allocator.alloc(u8, n);
defer allocator.free(buf);
const copied = ar.readGlobalComment(buf);
std.debug.print("comment: {s}\n", .{buf[0..copied]});
}const std = @import("std");
const unarr = @import("unarr");
fn openWithExistingStream(data: []const u8) !void {
const stream = unarr.c.ar_open_memory(data.ptr, data.len) orelse return error.OpenStreamFailed;
defer unarr.c.ar_close(stream); // you own stream lifetime
var ar = try unarr.Archive.openStream(.zip, stream, .{});
defer ar.deinit(); // closes archive only, not stream
_ = try ar.nextEntry();
}Recommended pattern:
switch (err) {
error.OpenArchiveFailed => { /* unsupported/invalid format */ },
error.ParseFailed => { /* malformed entry or traversal failure */ },
error.DecompressFailed => { /* damaged compressed data */ },
error.EntryTooLarge => { /* increase limit or skip file */ },
error.OutOfMemory => { /* allocator pressure */ },
else => return err,
}openFileandparseEntryForrequire null-terminated strings ([:0]const u8).parseEntryFor/parseEntryAtreposition parser state; treat iteration as stateful.Entryis a lightweight view over current archive parser state, not an owned snapshot.readAllocis bounded by your providedlimit; use it to prevent pathological allocations.- String pointers from C are converted to Zig slices, but validity is tied to parser progression.
filetime()is raw upstream value; interpretation depends on archive format metadata.
The repository test suite validates:
- version mapping correctness
- reject-empty and reject-invalid inputs
- ZIP entry reading, comments, and random access by offset
- TAR multi-entry traversal and name lookup
- stream ownership contract for
openStream
Run:
zig build testCurrent package version is pre-1.0. API and behavior may evolve while the wrapper matures.
For compatibility-sensitive integration, pin commit hashes in your consuming build.zig.zon.