diff --git a/Package.resolved b/Package.resolved index 4f70352..5aa2b99 100644 --- a/Package.resolved +++ b/Package.resolved @@ -1,5 +1,5 @@ { - "originHash" : "6412c8a7adbb37a645291e2b477057e2c62a4a6a966abd88ec792f9ebea96fec", + "originHash" : "f7b86b800200fa069a2b288e06bafe53bc937a1851b6effeebba326a62be227e", "pins" : [ { "identity" : "eventsource", @@ -19,15 +19,6 @@ "version" : "1.3.1" } }, - { - "identity" : "llama.swift", - "kind" : "remoteSourceControl", - "location" : "https://github.com/mattt/llama.swift", - "state" : { - "revision" : "dc3eb03c643209482c0805966a62b86821b9bcd2", - "version" : "2.7921.0" - } - }, { "identity" : "partialjsondecoder", "kind" : "remoteSourceControl", diff --git a/README.md b/README.md index 4f407a9..99a0569 100644 --- a/README.md +++ b/README.md @@ -367,6 +367,9 @@ Enable the trait in Package.swift: ) ``` +> [!NOTE] +> MLX supports guided generation (structured output via `@Generable`). + ### llama.cpp (GGUF) Run GGUF quantized models via [llama.cpp](https://github.com/ggml-org/llama.cpp) @@ -419,6 +422,7 @@ let response = try await session.respond( > [!NOTE] > Image inputs are not currently supported with `LlamaLanguageModel`. +> Guided generation (structured output via `@Generable`) is supported. ### OpenAI @@ -657,14 +661,14 @@ swift test Tests for different language model backends have varying requirements: -| Backend | Traits | Environment Variables | -|---------|--------|----------------------| -| CoreML | `CoreML` | `HF_TOKEN` | -| MLX | `MLX` | `HF_TOKEN` | -| Llama | `Llama` | `LLAMA_MODEL_PATH` | -| Anthropic | — | `ANTHROPIC_API_KEY` | -| OpenAI | — | `OPENAI_API_KEY` | -| Ollama | — | — | +| Backend | Traits | Environment Variables | +| --------- | -------- | --------------------- | +| CoreML | `CoreML` | `HF_TOKEN` | +| MLX | `MLX` | `HF_TOKEN` | +| Llama | `Llama` | `LLAMA_MODEL_PATH` | +| Anthropic | — | `ANTHROPIC_API_KEY` | +| OpenAI | — | `OPENAI_API_KEY` | +| Ollama | — | — | Example setup for running multiple tests at once: