diff --git a/.gitignore b/.gitignore
index a027418..a989550 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,13 +1,12 @@
target
-go.mod
-go.sum
*.wasm
-wasi_*
-wit_*
-.crates.toml
-.crates2.json
+
+# TODO: Remove once wasip3 patch is merged into Big GO
go-*-bootstrap*
-# examples/wasip2 stub
-export_wasi_http_handler/wit_bindings.go
-# examples/wasip3 stub
-export_wasi_http_incoming_handler/wit_bindings.go
+
+# Ignoring generated code
+examples/*/go.mod
+examples/*/go.sum
+examples/*/wasi_*
+examples/*/wit_*
+examples/*/export_wasi*
diff --git a/README.md b/README.md
index 306391d..4a8ae48 100644
--- a/README.md
+++ b/README.md
@@ -13,6 +13,7 @@
# Overview
+## componentize-go
This is a tool to convert a Go application to a [WebAssembly component](https://github.com/WebAssembly/component-model). It takes the following as input:
- a [WIT](https://github.com/WebAssembly/component-model/blob/main/design/mvp/WIT.md) file or directory
@@ -21,11 +22,30 @@ This is a tool to convert a Go application to a [WebAssembly component](https://
The output is a component which may be run using e.g. [`wasmtime`](https://github.com/bytecodealliance/wasmtime).
-## Installation
-### Prerequisites
+### Installation
+#### Prerequisites
- [**Rust toolchain**](https://rust-lang.org/) - Latest version
-### Run
+#### Run
```sh
cargo install --git https://github.com/bytecodealliance/componentize-go
```
+
+## go.bytecodealliance.org
+This repository hosts the vanity import path redirects for `go.bytecodealliance.org`. The HTML files in the [docs/](docs/) directory are served via GitHub Pages.
+
+### Actively Maintained Packages
+| Import Path | Description |
+|-------------|-------------|
+| `go.bytecodealliance.org/wit_types` | WIT type definitions (Option, Result, Tuple, Future, Stream, etc.) |
+| `go.bytecodealliance.org/wit_async` | Async primitives for WIT. |
+| `go.bytecodealliance.org/wit_runtime` | Runtime support for WIT. |
+
+### Legacy Packages:
+These packages are located at [bytecodealliance/go-modules](https://github.com/bytecodealliance/go-modules)
+| Import Path | Description |
+|-------------|-------------|
+| `go.bytecodealliance.org/cm` | Component Model types and utilities for use with TinyGo. |
+| `go.bytecodealliance.org/wit` | TinyGo representation of the WIT (WebAssembly Interface Type) specification. |
+| `go.bytecodealliance.org/cmd/wit-bindgen-go` | Legacy wit-bindgen-go for use with TinyGo. |
+| `go.bytecodealliance.org/x` | Experimental packages. |
diff --git a/docs/cmd.html b/docs/cmd.html
index 0b848a7..74a73a4 100644
--- a/docs/cmd.html
+++ b/docs/cmd.html
@@ -1,7 +1,7 @@
go.bytecodealliance.org/cmd
-
+
diff --git a/docs/index.html b/docs/index.html
index 7674635..44984ab 100644
--- a/docs/index.html
+++ b/docs/index.html
@@ -1,7 +1,7 @@
go.bytecodealliance.org
-
+
diff --git a/docs/wit_async.html b/docs/wit_async.html
new file mode 100644
index 0000000..18efaf5
--- /dev/null
+++ b/docs/wit_async.html
@@ -0,0 +1,10 @@
+
+
+ go.bytecodealliance.org/cmd
+
+
+
+
+ Redirecting to documentation…
+
+
diff --git a/docs/wit_runtime.html b/docs/wit_runtime.html
new file mode 100644
index 0000000..b515af4
--- /dev/null
+++ b/docs/wit_runtime.html
@@ -0,0 +1,10 @@
+
+
+ go.bytecodealliance.org/cmd
+
+
+
+
+ Redirecting to documentation…
+
+
diff --git a/docs/wit_types.html b/docs/wit_types.html
new file mode 100644
index 0000000..23bce25
--- /dev/null
+++ b/docs/wit_types.html
@@ -0,0 +1,10 @@
+
+
+ go.bytecodealliance.org/cmd
+
+
+
+
+ Redirecting to documentation…
+
+
diff --git a/docs/x.html b/docs/x.html
index 49be967..9615de9 100644
--- a/docs/x.html
+++ b/docs/x.html
@@ -1,7 +1,7 @@
go.bytecodealliance.org/x
-
+
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..7cc6381
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,3 @@
+module go.bytecodealliance.org
+
+go 1.25.5
diff --git a/wit_async/wit_async.go b/wit_async/wit_async.go
new file mode 100644
index 0000000..de5751e
--- /dev/null
+++ b/wit_async/wit_async.go
@@ -0,0 +1,229 @@
+package wit_async
+
+import (
+ "fmt"
+ "runtime"
+ "unsafe"
+
+ "go.bytecodealliance.org/wit_runtime"
+)
+
+const EVENT_NONE uint32 = 0
+const EVENT_SUBTASK uint32 = 1
+const EVENT_STREAM_READ uint32 = 2
+const EVENT_STREAM_WRITE uint32 = 3
+const EVENT_FUTURE_READ uint32 = 4
+const EVENT_FUTURE_WRITE uint32 = 5
+
+const STATUS_STARTING uint32 = 0
+const STATUS_STARTED uint32 = 1
+const STATUS_RETURNED uint32 = 2
+
+const CALLBACK_CODE_EXIT uint32 = 0
+const CALLBACK_CODE_YIELD uint32 = 1
+const CALLBACK_CODE_WAIT uint32 = 2
+
+const RETURN_CODE_BLOCKED uint32 = 0xFFFFFFFF
+const RETURN_CODE_COMPLETED uint32 = 0
+const RETURN_CODE_DROPPED uint32 = 1
+
+type unit struct{}
+
+type taskState struct {
+ channel chan unit
+ waitableSet uint32
+ pending map[uint32]chan uint32
+ yielding chan unit
+ pinner runtime.Pinner
+}
+
+var state *taskState = nil
+
+func Run(closure func()) uint32 {
+ state = &taskState{
+ make(chan unit),
+ 0,
+ make(map[uint32]chan uint32),
+ nil,
+ runtime.Pinner{},
+ }
+ state.pinner.Pin(state)
+
+ defer func() {
+ state = nil
+ }()
+
+ go closure()
+
+ return callback(EVENT_NONE, 0, 0)
+}
+
+func Callback(event0, event1, event2 uint32) uint32 {
+ state = (*taskState)(contextGet())
+ contextSet(nil)
+
+ return callback(event0, event1, event2)
+}
+
+//go:linkname wasiOnIdle runtime.wasiOnIdle
+func wasiOnIdle(callback func() bool)
+
+func callback(event0, event1, event2 uint32) uint32 {
+ yielding := state.yielding
+ if state.yielding != nil {
+ state.yielding = nil
+ yielding <- unit{}
+ }
+
+ // Tell the Go scheduler to write to `state.channel` only after all
+ // goroutines have either blocked or exited. This allows us to reliably
+ // delay returning control to the host until there's truly nothing more
+ // we can do in the guest.
+ //
+ // Note that this function is _not_ currently part of upstream Go; it
+ // requires [this
+ // patch](https://github.com/dicej/go/commit/40fc123d5bce6448fc4e4601fd33bad4250b36a5)
+ wasiOnIdle(func() bool {
+ state.channel <- unit{}
+ return true
+ })
+ defer wasiOnIdle(func() bool {
+ return false
+ })
+
+ for {
+ switch event0 {
+ case EVENT_NONE:
+
+ case EVENT_SUBTASK:
+ switch event2 {
+ case STATUS_STARTING:
+ panic(fmt.Sprintf("unexpected subtask status: %v", event2))
+
+ case STATUS_STARTED:
+
+ case STATUS_RETURNED:
+ waitableJoin(event1, 0)
+ subtaskDrop(event1)
+ channel := state.pending[event1]
+ delete(state.pending, event1)
+ channel <- event2
+
+ default:
+ panic("todo")
+ }
+
+ case EVENT_STREAM_READ, EVENT_STREAM_WRITE, EVENT_FUTURE_READ, EVENT_FUTURE_WRITE:
+ waitableJoin(event1, 0)
+ channel := state.pending[event1]
+ delete(state.pending, event1)
+ channel <- event2
+
+ default:
+ panic("todo")
+ }
+
+ // Block this goroutine until the scheduler wakes us up.
+ (<-state.channel)
+
+ if state.yielding != nil {
+ contextSet(unsafe.Pointer(state))
+ if len(state.pending) == 0 {
+ return CALLBACK_CODE_YIELD
+ } else {
+ if state.waitableSet == 0 {
+ panic("unreachable")
+ }
+ event0, event1, event2 = func() (uint32, uint32, uint32) {
+ pinner := runtime.Pinner{}
+ defer pinner.Unpin()
+ buffer := wit_runtime.Allocate(&pinner, 8, 4)
+ event0 := waitableSetPoll(state.waitableSet, buffer)
+ return event0,
+ unsafe.Slice((*uint32)(buffer), 2)[0],
+ unsafe.Slice((*uint32)(buffer), 2)[1]
+ }()
+ if event0 == EVENT_NONE {
+ return CALLBACK_CODE_YIELD
+ }
+ }
+ } else if len(state.pending) == 0 {
+ state.pinner.Unpin()
+ if state.waitableSet != 0 {
+ waitableSetDrop(state.waitableSet)
+ }
+ return CALLBACK_CODE_EXIT
+ } else {
+ if state.waitableSet == 0 {
+ panic("unreachable")
+ }
+ contextSet(unsafe.Pointer(state))
+ return CALLBACK_CODE_WAIT | (state.waitableSet << 4)
+ }
+ }
+}
+
+func SubtaskWait(status uint32) {
+ subtask := status >> 4
+ status = status & 0xF
+
+ switch status {
+ case STATUS_STARTING, STATUS_STARTED:
+ if state.waitableSet == 0 {
+ state.waitableSet = waitableSetNew()
+ }
+ waitableJoin(subtask, state.waitableSet)
+ channel := make(chan uint32)
+ state.pending[subtask] = channel
+ (<-channel)
+
+ case STATUS_RETURNED:
+
+ default:
+ panic(fmt.Sprintf("unexpected subtask status: %v", status))
+ }
+}
+
+func FutureOrStreamWait(code uint32, handle int32) (uint32, uint32) {
+ if code == RETURN_CODE_BLOCKED {
+ if state.waitableSet == 0 {
+ state.waitableSet = waitableSetNew()
+ }
+ waitableJoin(uint32(handle), state.waitableSet)
+ channel := make(chan uint32)
+ state.pending[uint32(handle)] = channel
+ code = (<-channel)
+ }
+
+ count := code >> 4
+ code = code & 0xF
+
+ return code, count
+}
+
+func Yield() {
+ channel := make(chan unit)
+ state.yielding = channel
+ (<-channel)
+}
+
+//go:wasmimport $root [waitable-set-new]
+func waitableSetNew() uint32
+
+//go:wasmimport $root [waitable-set-poll]
+func waitableSetPoll(waitableSet uint32, eventPayload unsafe.Pointer) uint32
+
+//go:wasmimport $root [waitable-set-drop]
+func waitableSetDrop(waitableSet uint32)
+
+//go:wasmimport $root [waitable-join]
+func waitableJoin(waitable, waitableSet uint32)
+
+//go:wasmimport $root [context-get-0]
+func contextGet() unsafe.Pointer
+
+//go:wasmimport $root [context-set-0]
+func contextSet(value unsafe.Pointer)
+
+//go:wasmimport $root [subtask-drop]
+func subtaskDrop(subtask uint32)
diff --git a/wit_runtime/wit_runtime.go b/wit_runtime/wit_runtime.go
new file mode 100644
index 0000000..5313ff1
--- /dev/null
+++ b/wit_runtime/wit_runtime.go
@@ -0,0 +1,120 @@
+package wit_runtime
+
+import (
+ "fmt"
+ "runtime"
+ "unsafe"
+)
+
+type Handle struct {
+ value int32
+}
+
+func (self *Handle) Use() int32 {
+ if self.value == 0 {
+ panic("nil handle")
+ }
+ return self.value
+}
+
+func (self *Handle) Take() int32 {
+ if self.value == 0 {
+ panic("nil handle")
+ }
+ value := self.value
+ self.value = 0
+ return value
+}
+
+func (self *Handle) Set(value int32) {
+ if value == 0 {
+ panic("nil handle")
+ }
+ if self.value != 0 {
+ panic("handle already set")
+ }
+ self.value = value
+}
+
+func (self *Handle) TakeOrNil() int32 {
+ value := self.value
+ self.value = 0
+ return value
+}
+
+func MakeHandle(value int32) *Handle {
+ if value == 0 {
+ panic("nil handle")
+ }
+ return &Handle{value}
+}
+
+func Allocate(pinner *runtime.Pinner, size, align uintptr) unsafe.Pointer {
+ pointer := allocateRaw(size, align)
+ pinner.Pin(pointer)
+ return pointer
+}
+
+func allocateRaw(size, align uintptr) unsafe.Pointer {
+ if size == 0 {
+ return unsafe.Pointer(uintptr(0))
+ }
+
+ if size%align != 0 {
+ panic(fmt.Sprintf("size %v is not compatible with alignment %v", size, align))
+ }
+
+ switch align {
+ case 1:
+ return unsafe.Pointer(unsafe.SliceData(make([]uint8, size)))
+ case 2:
+ return unsafe.Pointer(unsafe.SliceData(make([]uint16, size/align)))
+ case 4:
+ return unsafe.Pointer(unsafe.SliceData(make([]uint32, size/align)))
+ case 8:
+ return unsafe.Pointer(unsafe.SliceData(make([]uint64, size/align)))
+ default:
+ panic(fmt.Sprintf("unsupported alignment: %v", align))
+ }
+}
+
+// NB: `cabi_realloc` may be called before the Go runtime has been initialized,
+// in which case we need to use `runtime.sbrk` to do allocations. The following
+// is an abbreviation of [Till's
+// efforts](https://github.com/bytecodealliance/go-modules/pull/367).
+
+//go:linkname sbrk runtime.sbrk
+func sbrk(n uintptr) unsafe.Pointer
+
+var useGCAllocations = false
+
+func init() {
+ useGCAllocations = true
+}
+
+func offset(ptr, align uintptr) uintptr {
+ newptr := (ptr + align - 1) &^ (align - 1)
+ return newptr - ptr
+}
+
+var pinner = runtime.Pinner{}
+
+func Unpin() {
+ pinner.Unpin()
+}
+
+//go:wasmexport cabi_realloc
+func cabiRealloc(oldPointer unsafe.Pointer, oldSize, align, newSize uintptr) unsafe.Pointer {
+ if oldPointer != nil || oldSize != 0 {
+ panic("todo")
+ }
+
+ if useGCAllocations {
+ return Allocate(&pinner, newSize, align)
+ } else {
+ alignedSize := newSize + offset(newSize, align)
+ unaligned := sbrk(alignedSize)
+ off := offset(uintptr(unaligned), align)
+ return unsafe.Add(unaligned, off)
+ }
+}
diff --git a/wit_types/wit_future.go b/wit_types/wit_future.go
new file mode 100644
index 0000000..f66e5ba
--- /dev/null
+++ b/wit_types/wit_future.go
@@ -0,0 +1,151 @@
+package wit_types
+
+import (
+ "runtime"
+ "unsafe"
+
+ "go.bytecodealliance.org/wit_async"
+ "go.bytecodealliance.org/wit_runtime"
+)
+
+type FutureVtable[T any] struct {
+ Size uint32
+ Align uint32
+ Read func(handle int32, item unsafe.Pointer) uint32
+ Write func(handle int32, item unsafe.Pointer) uint32
+ CancelRead func(handle int32) uint32
+ CancelWrite func(handle int32) uint32
+ DropReadable func(handle int32)
+ DropWritable func(handle int32)
+ Lift func(src unsafe.Pointer) T
+ Lower func(pinner *runtime.Pinner, value T, dst unsafe.Pointer) func()
+}
+
+type FutureReader[T any] struct {
+ vtable *FutureVtable[T]
+ handle *wit_runtime.Handle
+}
+
+// Blocks until the future completes and returns its value.
+//
+// # Panic
+//
+// Read will panic if multiple concurrent or sequential reads are attempted on the same future.
+func (self *FutureReader[T]) Read() T {
+ handle := self.handle.Take()
+ defer self.vtable.DropReadable(handle)
+
+ pinner := runtime.Pinner{}
+ defer pinner.Unpin()
+
+ buffer := wit_runtime.Allocate(&pinner, uintptr(self.vtable.Size), uintptr(self.vtable.Align))
+
+ code, _ := wit_async.FutureOrStreamWait(self.vtable.Read(handle, buffer), handle)
+
+ switch code {
+ case wit_async.RETURN_CODE_COMPLETED:
+ if self.vtable.Lift == nil {
+ return unsafe.Slice((*T)(buffer), 1)[0]
+ } else {
+ return self.vtable.Lift(buffer)
+ }
+
+ case wit_async.RETURN_CODE_DROPPED:
+ panic("unreachable")
+
+ default:
+ panic("todo: handle cancellation")
+ }
+}
+
+// Notify the host that the FutureReader is no longer being used.
+func (self *FutureReader[T]) Drop() {
+ handle := self.handle.TakeOrNil()
+ if handle != 0 {
+ self.vtable.DropReadable(handle)
+ }
+}
+
+func (self *FutureReader[T]) TakeHandle() int32 {
+ return self.handle.Take()
+}
+
+func (self *FutureReader[T]) SetHandle(handle int32) {
+ self.handle.Set(handle)
+}
+
+func MakeFutureReader[T any](vtable *FutureVtable[T], handleValue int32) *FutureReader[T] {
+ handle := wit_runtime.MakeHandle(handleValue)
+ value := &FutureReader[T]{vtable, handle}
+ runtime.AddCleanup(value, func(_ int) {
+ handleValue := handle.TakeOrNil()
+ if handleValue != 0 {
+ vtable.DropReadable(handleValue)
+ }
+ }, 0)
+ return value
+}
+
+type FutureWriter[T any] struct {
+ vtable *FutureVtable[T]
+ handle *wit_runtime.Handle
+}
+
+// Writes data to a future.
+//
+// # Panic
+//
+// Write will panic if multiple concurrent or sequential writes are attempted on the same future.
+func (self *FutureWriter[T]) Write(item T) bool {
+ handle := self.handle.Take()
+ defer self.vtable.DropWritable(handle)
+
+ pinner := runtime.Pinner{}
+ defer pinner.Unpin()
+
+ var lifter func()
+ var buffer unsafe.Pointer
+ if self.vtable.Lower == nil {
+ buffer = unsafe.Pointer(unsafe.SliceData([]T{item}))
+ pinner.Pin(buffer)
+ } else {
+ buffer = wit_runtime.Allocate(&pinner, uintptr(self.vtable.Size), uintptr(self.vtable.Align))
+ lifter = self.vtable.Lower(&pinner, item, buffer)
+ }
+
+ code, _ := wit_async.FutureOrStreamWait(self.vtable.Write(handle, buffer), handle)
+
+ switch code {
+ case wit_async.RETURN_CODE_COMPLETED:
+ return true
+
+ case wit_async.RETURN_CODE_DROPPED:
+ if lifter != nil {
+ lifter()
+ }
+ return false
+
+ default:
+ panic("todo: handle cancellation")
+ }
+}
+
+// Notify the host that the FutureWriter is no longer being used.
+func (self *FutureWriter[T]) Drop() {
+ handle := self.handle.TakeOrNil()
+ if handle != 0 {
+ self.vtable.DropWritable(handle)
+ }
+}
+
+func MakeFutureWriter[T any](vtable *FutureVtable[T], handleValue int32) *FutureWriter[T] {
+ handle := wit_runtime.MakeHandle(handleValue)
+ value := &FutureWriter[T]{vtable, handle}
+ runtime.AddCleanup(value, func(_ int) {
+ handleValue := handle.TakeOrNil()
+ if handleValue != 0 {
+ vtable.DropReadable(handleValue)
+ }
+ }, 0)
+ return value
+}
diff --git a/wit_types/wit_option.go b/wit_types/wit_option.go
new file mode 100644
index 0000000..e54d804
--- /dev/null
+++ b/wit_types/wit_option.go
@@ -0,0 +1,46 @@
+package wit_types
+
+const (
+ OptionNone = 0
+ OptionSome = 1
+)
+
+type Option[T any] struct {
+ tag uint8
+ value T
+}
+
+func (self Option[T]) Tag() uint8 {
+ return self.tag
+}
+
+func (self Option[T]) Some() T {
+ if self.tag != OptionSome {
+ panic("tag mismatch")
+ }
+ return self.value
+}
+
+func (self Option[T]) SomeOr(value T) T {
+ if self.tag != OptionSome {
+ return value
+ } else {
+ return self.value
+ }
+}
+
+func (self Option[T]) IsSome() bool {
+ return self.tag == OptionSome
+}
+
+func (self Option[T]) IsNone() bool {
+ return self.tag == OptionNone
+}
+
+func None[T any]() Option[T] {
+ return Option[T]{OptionNone, make([]T, 1)[0]}
+}
+
+func Some[T any](value T) Option[T] {
+ return Option[T]{OptionSome, value}
+}
diff --git a/wit_types/wit_result.go b/wit_types/wit_result.go
new file mode 100644
index 0000000..92d7021
--- /dev/null
+++ b/wit_types/wit_result.go
@@ -0,0 +1,45 @@
+package wit_types
+
+const (
+ ResultOk = 0
+ ResultErr = 1
+)
+
+type Result[T any, U any] struct {
+ tag uint8
+ value any
+}
+
+func (self Result[T, U]) Tag() uint8 {
+ return self.tag
+}
+
+func (self Result[T, U]) Ok() T {
+ if self.tag != ResultOk {
+ panic("tag mismatch")
+ }
+ return self.value.(T)
+}
+
+func (self Result[T, U]) Err() U {
+ if self.tag != ResultErr {
+ panic("tag mismatch")
+ }
+ return self.value.(U)
+}
+
+func (self Result[T, U]) IsErr() bool {
+ return self.tag == ResultErr
+}
+
+func (self Result[T, U]) IsOk() bool {
+ return self.tag == ResultOk
+}
+
+func Ok[T any, U any](value T) Result[T, U] {
+ return Result[T, U]{ResultOk, value}
+}
+
+func Err[T any, U any](value U) Result[T, U] {
+ return Result[T, U]{ResultErr, value}
+}
diff --git a/wit_types/wit_stream.go b/wit_types/wit_stream.go
new file mode 100644
index 0000000..c1d71ee
--- /dev/null
+++ b/wit_types/wit_stream.go
@@ -0,0 +1,208 @@
+package wit_types
+
+import (
+ "runtime"
+ "unsafe"
+
+ "go.bytecodealliance.org/wit_async"
+ "go.bytecodealliance.org/wit_runtime"
+)
+
+type StreamVtable[T any] struct {
+ Size uint32
+ Align uint32
+ Read func(handle int32, items unsafe.Pointer, length uint32) uint32
+ Write func(handle int32, items unsafe.Pointer, length uint32) uint32
+ CancelRead func(handle int32) uint32
+ CancelWrite func(handle int32) uint32
+ DropReadable func(handle int32)
+ DropWritable func(handle int32)
+ Lift func(src unsafe.Pointer) T
+ Lower func(pinner *runtime.Pinner, value T, dst unsafe.Pointer) func()
+}
+
+type StreamReader[T any] struct {
+ vtable *StreamVtable[T]
+ handle *wit_runtime.Handle
+ writerDropped bool
+}
+
+func (self *StreamReader[T]) WriterDropped() bool {
+ return self.writerDropped
+}
+
+// Reads data from a stream into a destination slice.
+//
+// Blocks until the read completes or the destination slice is full.
+//
+// # Panic
+//
+// Read will panic if:
+// - dst is empty (length 0)
+// - multiple concurrent reads are attempted on the same stream
+func (self *StreamReader[T]) Read(dst []T) uint32 {
+ if len(dst) == 0 {
+ panic("StreamReader.Read: destination slice cannot be empty")
+ }
+
+ handle := self.handle.Take()
+ defer self.handle.Set(handle)
+
+ if self.writerDropped {
+ return 0
+ }
+
+ pinner := runtime.Pinner{}
+ defer pinner.Unpin()
+
+ var buffer unsafe.Pointer
+ if self.vtable.Lift == nil {
+ buffer = unsafe.Pointer(unsafe.SliceData(dst))
+ } else {
+ buffer = wit_runtime.Allocate(
+ &pinner,
+ uintptr(self.vtable.Size*uint32(len(dst))),
+ uintptr(self.vtable.Align),
+ )
+ }
+ pinner.Pin(buffer)
+
+ code, count := wit_async.FutureOrStreamWait(self.vtable.Read(handle, buffer, uint32(len(dst))), handle)
+
+ if code == wit_async.RETURN_CODE_DROPPED {
+ self.writerDropped = true
+ }
+
+ if self.vtable.Lift != nil {
+ for i := 0; i < int(count); i++ {
+ dst[i] = self.vtable.Lift(unsafe.Add(buffer, i*int(self.vtable.Size)))
+ }
+ }
+
+ return count
+}
+
+// Notify the host that the StreamReader is no longer being used.
+func (self *StreamReader[T]) Drop() {
+ handle := self.handle.TakeOrNil()
+ if handle != 0 {
+ self.vtable.DropReadable(handle)
+ }
+}
+
+func (self *StreamReader[T]) TakeHandle() int32 {
+ return self.handle.Take()
+}
+
+func (self *StreamReader[T]) SetHandle(handle int32) {
+ self.handle.Set(handle)
+}
+
+func MakeStreamReader[T any](vtable *StreamVtable[T], handleValue int32) *StreamReader[T] {
+ handle := wit_runtime.MakeHandle(handleValue)
+ value := &StreamReader[T]{vtable, handle, false}
+ runtime.AddCleanup(value, func(_ int) {
+ handleValue := handle.TakeOrNil()
+ if handleValue != 0 {
+ vtable.DropReadable(handleValue)
+ }
+ }, 0)
+ return value
+}
+
+type StreamWriter[T any] struct {
+ vtable *StreamVtable[T]
+ handle *wit_runtime.Handle
+ readerDropped bool
+}
+
+func (self *StreamWriter[T]) ReaderDropped() bool {
+ return self.readerDropped
+}
+
+// Writes items to a stream, returning the count written (may be partial).
+//
+// # Panic
+//
+// Write will panic if multiple concurrent writes are attempted on the same stream.
+func (self *StreamWriter[T]) Write(items []T) uint32 {
+ handle := self.handle.Take()
+ defer self.handle.Set(handle)
+
+ if self.readerDropped {
+ return 0
+ }
+
+ pinner := runtime.Pinner{}
+ defer pinner.Unpin()
+
+ writeCount := uint32(len(items))
+
+ var lifters []func()
+ var buffer unsafe.Pointer
+ if self.vtable.Lower == nil {
+ buffer = unsafe.Pointer(unsafe.SliceData(items))
+ pinner.Pin(buffer)
+ } else {
+ lifters = make([]func(), 0, writeCount)
+ buffer = wit_runtime.Allocate(
+ &pinner,
+ uintptr(self.vtable.Size*writeCount),
+ uintptr(self.vtable.Align),
+ )
+ for index, item := range items {
+ lifters = append(
+ lifters,
+ self.vtable.Lower(&pinner, item, unsafe.Add(buffer, index*int(self.vtable.Size))),
+ )
+ }
+ }
+
+ code, count := wit_async.FutureOrStreamWait(self.vtable.Write(handle, buffer, writeCount), handle)
+
+ if lifters != nil && count < writeCount {
+ for _, lifter := range lifters[count:] {
+ lifter()
+ }
+ }
+
+ if code == wit_async.RETURN_CODE_DROPPED {
+ self.readerDropped = true
+ }
+
+ return count
+}
+
+// Writes all items to the stream, looping until complete or reader drops.
+//
+// # Panic
+//
+// WriteAll will panic if multiple concurrent writes are attempted on the same stream.
+func (self *StreamWriter[T]) WriteAll(items []T) uint32 {
+ offset := uint32(0)
+ count := uint32(len(items))
+ for offset < count && !self.readerDropped {
+ offset += self.Write(items[offset:])
+ }
+ return offset
+}
+
+// Notify the host that the StreamReader is no longer being used.
+func (self *StreamWriter[T]) Drop() {
+ handle := self.handle.TakeOrNil()
+ if handle != 0 {
+ self.vtable.DropWritable(handle)
+ }
+}
+
+func MakeStreamWriter[T any](vtable *StreamVtable[T], handleValue int32) *StreamWriter[T] {
+ handle := wit_runtime.MakeHandle(handleValue)
+ value := &StreamWriter[T]{vtable, handle, false}
+ runtime.AddCleanup(value, func(_ int) {
+ handleValue := handle.TakeOrNil()
+ if handleValue != 0 {
+ vtable.DropReadable(handleValue)
+ }
+ }, 0)
+ return value
+}
diff --git a/wit_types/wit_tuple.go b/wit_types/wit_tuple.go
new file mode 100644
index 0000000..2c4d114
--- /dev/null
+++ b/wit_types/wit_tuple.go
@@ -0,0 +1,185 @@
+package wit_types
+
+type Tuple1[T0 any] struct {
+ F0 T0
+}
+
+type Tuple2[T0 any, T1 any] struct {
+ F0 T0
+ F1 T1
+}
+
+type Tuple3[T0 any, T1 any, T2 any] struct {
+ F0 T0
+ F1 T1
+ F2 T2
+}
+
+type Tuple4[T0 any, T1 any, T2 any, T3 any] struct {
+ F0 T0
+ F1 T1
+ F2 T2
+ F3 T3
+}
+
+type Tuple5[T0 any, T1 any, T2 any, T3 any, T4 any] struct {
+ F0 T0
+ F1 T1
+ F2 T2
+ F3 T3
+ F4 T4
+}
+
+type Tuple6[T0 any, T1 any, T2 any, T3 any, T4 any, T5 any] struct {
+ F0 T0
+ F1 T1
+ F2 T2
+ F3 T3
+ F4 T4
+ F5 T5
+}
+
+type Tuple7[T0 any, T1 any, T2 any, T3 any, T4 any, T5 any, T6 any] struct {
+ F0 T0
+ F1 T1
+ F2 T2
+ F3 T3
+ F4 T4
+ F5 T5
+ F6 T6
+}
+
+type Tuple8[T0 any, T1 any, T2 any, T3 any, T4 any, T5 any, T6 any, T7 any] struct {
+ F0 T0
+ F1 T1
+ F2 T2
+ F3 T3
+ F4 T4
+ F5 T5
+ F6 T6
+ F7 T7
+}
+
+type Tuple9[T0 any, T1 any, T2 any, T3 any, T4 any, T5 any, T6 any, T7 any, T8 any] struct {
+ F0 T0
+ F1 T1
+ F2 T2
+ F3 T3
+ F4 T4
+ F5 T5
+ F6 T6
+ F7 T7
+ F8 T8
+}
+
+type Tuple10[T0 any, T1 any, T2 any, T3 any, T4 any, T5 any, T6 any, T7 any, T8 any, T9 any] struct {
+ F0 T0
+ F1 T1
+ F2 T2
+ F3 T3
+ F4 T4
+ F5 T5
+ F6 T6
+ F7 T7
+ F8 T8
+ F9 T9
+}
+
+type Tuple11[T0 any, T1 any, T2 any, T3 any, T4 any, T5 any, T6 any, T7 any, T8 any, T9 any, T10 any] struct {
+ F0 T0
+ F1 T1
+ F2 T2
+ F3 T3
+ F4 T4
+ F5 T5
+ F6 T6
+ F7 T7
+ F8 T8
+ F9 T9
+ F10 T10
+}
+
+type Tuple12[T0 any, T1 any, T2 any, T3 any, T4 any, T5 any, T6 any, T7 any, T8 any, T9 any, T10 any, T11 any] struct {
+ F0 T0
+ F1 T1
+ F2 T2
+ F3 T3
+ F4 T4
+ F5 T5
+ F6 T6
+ F7 T7
+ F8 T8
+ F9 T9
+ F10 T10
+ F11 T11
+}
+
+type Tuple13[T0 any, T1 any, T2 any, T3 any, T4 any, T5 any, T6 any, T7 any, T8 any, T9 any, T10 any, T11 any, T12 any] struct {
+ F0 T0
+ F1 T1
+ F2 T2
+ F3 T3
+ F4 T4
+ F5 T5
+ F6 T6
+ F7 T7
+ F8 T8
+ F9 T9
+ F10 T10
+ F11 T11
+ F12 T12
+}
+
+type Tuple14[T0 any, T1 any, T2 any, T3 any, T4 any, T5 any, T6 any, T7 any, T8 any, T9 any, T10 any, T11 any, T12 any, T13 any] struct {
+ F0 T0
+ F1 T1
+ F2 T2
+ F3 T3
+ F4 T4
+ F5 T5
+ F6 T6
+ F7 T7
+ F8 T8
+ F9 T9
+ F10 T10
+ F11 T11
+ F12 T12
+ F13 T13
+}
+
+type Tuple15[T0 any, T1 any, T2 any, T3 any, T4 any, T5 any, T6 any, T7 any, T8 any, T9 any, T10 any, T11 any, T12 any, T13 any, T14 any] struct {
+ F0 T0
+ F1 T1
+ F2 T2
+ F3 T3
+ F4 T4
+ F5 T5
+ F6 T6
+ F7 T7
+ F8 T8
+ F9 T9
+ F10 T10
+ F11 T11
+ F12 T12
+ F13 T13
+ F14 T14
+}
+
+type Tuple16[T0 any, T1 any, T2 any, T3 any, T4 any, T5 any, T6 any, T7 any, T8 any, T9 any, T10 any, T11 any, T12 any, T13 any, T14 any, T15 any] struct {
+ F0 T0
+ F1 T1
+ F2 T2
+ F3 T3
+ F4 T4
+ F5 T5
+ F6 T6
+ F7 T7
+ F8 T8
+ F9 T9
+ F10 T10
+ F11 T11
+ F12 T12
+ F13 T13
+ F14 T14
+ F15 T15
+}
diff --git a/wit_types/wit_unit.go b/wit_types/wit_unit.go
new file mode 100644
index 0000000..26f927b
--- /dev/null
+++ b/wit_types/wit_unit.go
@@ -0,0 +1,3 @@
+package wit_types
+
+type Unit struct{}