Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 29 additions & 1 deletion .github/workflows/deploy-pages.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,36 @@ concurrency:
cancel-in-progress: true

jobs:
tests:
name: Run Test Suites
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4

- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: '20'

- name: Run tests
run: |
npm test

- name: Capture test status
id: capture
run: |
cat tests/test-results.json
node -e "const fs=require('fs');const r=JSON.parse(fs.readFileSync('tests/test-results.json','utf8'));fs.appendFileSync(process.env.GITHUB_OUTPUT,`status=${r.status}\n`);"

- name: Report test summary
run: |
node -e "const fs=require('fs');const r=JSON.parse(fs.readFileSync('tests/test-results.json','utf8'));console.log('# Test Summary');console.log('PolliLib',r.groups.pollilib.passed,'/',r.groups.pollilib.total);console.log('Site',r.groups.site.passed,'/',r.groups.site.total);console.log('Overall',r.passed,'/',r.total,'->',r.status);" >> $GITHUB_STEP_SUMMARY

build:
name: Build and Upload Artifact
needs: tests
if: needs.tests.outputs.status != 'fail'
runs-on: ubuntu-latest
steps:
- name: Checkout repository
Expand Down Expand Up @@ -49,7 +77,7 @@ jobs:

report-build-status:
name: Report Build Status
needs: build
needs: [build, tests]
runs-on: ubuntu-latest
if: always()
steps:
Expand Down
80 changes: 80 additions & 0 deletions docs/polliLib.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
# PolliLib Usage Guide

PolliLib provides a lightweight client for interacting with the Pollinations API from the browser or Node.js environments. This guide covers basic usage patterns for image, text and audio generation along with other helper utilities.

## Installation

PolliLib is bundled in this repository under `js/polliLib`. Include `polliLib-web.global.js` in the browser or import the individual modules from `js/polliLib/src` when using Node.js.

```html
<script src="js/polliLib/polliLib-web.global.js"></script>
<script>
polliLib.configure({ referrer: window.location.origin });
</script>
```

## Image Generation

```javascript
import { image } from './js/polliLib/src/image.js';

const blob = await image('a tiny red square', {
width: 64,
height: 64,
json: false, // set to true to receive raw JSON metadata
retries: 5 // poll until the image is ready
});
```

The call returns a `Blob` containing the generated PNG. Passing `json: true` forces Pollinations to return raw JSON when supported. When the service responds with a placeholder JSON payload, the function automatically polls until an actual image is available.

## Text Generation

```javascript
import { text, chat } from './js/polliLib/src/text.js';

const out = await text('Explain gravity in one sentence.', { model: 'openai' });

const chatOut = await chat({
model: 'openai',
messages: [
{ role: 'user', content: 'Say hello.' }
],
json: true // request strict JSON formatting
});
```

`text` returns a string (or an async iterator when `stream: true`). `chat` mirrors the OpenAI chat API and can also stream JSON objects when requested.

## Audio Generation

```javascript
import { tts, stt } from './js/polliLib/src/audio.js';

const speech = await tts('hello world', { voice: 'alloy' });
const transcript = await stt({ data: myArrayBuffer, format: 'mp3' });
```

`tts` produces a spoken audio `Blob` using the `openai-audio` model. `stt` performs speech‑to‑text on a provided file or raw audio buffer.

## Model Capabilities

```javascript
import { modelCapabilities } from './js/polliLib/src/models.js';

const caps = await modelCapabilities();
console.log(caps.image); // available image models
console.log(caps.text); // available text models
console.log(caps.audio); // audio voices (if available)
```

This helper combines information from the image and text model endpoints so applications can dynamically enable features based on available capabilities.

## Other Utilities

- **Feeds** – `imageFeed` and `textFeed` stream recent public generations.
- **Tools & MCP** – helpers for creating tool calls and constructing MCP servers.
- **Pipeline** – compose multi‑step workflows that mix text, image and audio steps.

See the source files in `js/polliLib/src` for full details on each module.

33 changes: 21 additions & 12 deletions js/chat/chat-storage.js
Original file line number Diff line number Diff line change
Expand Up @@ -183,18 +183,27 @@ document.addEventListener("DOMContentLoaded", () => {
img.dataset.imageUrl = url;
img.dataset.imageId = imageId;
img.crossOrigin = "anonymous";
img.onload = () => {
loadingDiv.remove();
img.style.display = "block";
attachImageButtons(img, imageId);
};
img.onerror = () => {
loadingDiv.innerHTML = "⚠️ Failed to load image";
loadingDiv.style.display = "flex";
loadingDiv.style.justifyContent = "center";
loadingDiv.style.alignItems = "center";
};
imageContainer.appendChild(img);
let attempts = 0;
const maxAttempts = 5;
const tryReload = () => {
if (attempts++ >= maxAttempts) {
loadingDiv.innerHTML = "⚠️ Failed to load image";
loadingDiv.style.display = "flex";
loadingDiv.style.justifyContent = "center";
loadingDiv.style.alignItems = "center";
return;
}
setTimeout(() => {
img.src = url + (url.includes('?') ? '&' : '?') + 'retry=' + Date.now();
}, 1000 * attempts);
};
img.onload = () => {
loadingDiv.remove();
img.style.display = "block";
attachImageButtons(img, imageId);
};
img.onerror = tryReload;
imageContainer.appendChild(img);
const imgButtonContainer = document.createElement("div");
imgButtonContainer.className = "image-button-container";
imgButtonContainer.dataset.imageId = imageId;
Expand Down
34 changes: 30 additions & 4 deletions js/polliLib/polliLib-web.global.js
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@

// --- helpers ---
const bool = v => (v == null ? undefined : (v ? 'true' : 'false'));
const sleep = ms => new Promise(res => setTimeout(res, ms));
function base64FromArrayBuffer(ab) {
const bytes = new Uint8Array(ab);
let binary = '';
Expand All @@ -83,7 +84,7 @@
}

// --- image.js ---
async function image(prompt, { model, seed, width, height, image: imgUrl, nologo, private: priv, enhance, safe, referrer } = {}, client = getDefaultClient()) {
async function image(prompt, { model, seed, width, height, image: imgUrl, nologo, private: priv, enhance, safe, referrer, json, retries = 5, retryDelayMs = 1000 } = {}, client = getDefaultClient()) {
const url = `${client.imageBase}/prompt/${encodeURIComponent(prompt)}`;
const params = {};
if (model) params.model = model;
Expand All @@ -96,8 +97,24 @@
if (enhance != null) params.enhance = bool(enhance);
if (safe != null) params.safe = bool(safe);
if (referrer) params.referrer = referrer;
const r = await client.get(url, { params });
if (json) params.json = 'true';
const headers = json ? { Accept: 'application/json' } : {};
const r = await client.get(url, { params, headers });
if (!r.ok) throw new Error(`image error ${r.status}`);
const ct = r.headers.get('content-type') ?? '';
if (ct.includes('application/json')) {
const data = await r.json();
if (json) return data;
if (data?.url) {
const ir = await fetch(data.url);
if (ir.ok) return await ir.blob();
}
if (retries > 0) {
await sleep(retryDelayMs);
return await image(prompt, { model, seed, width, height, image: imgUrl, nologo, private: priv, enhance, safe, referrer, json, retries: retries - 1, retryDelayMs }, client);
}
throw new Error('image pending');
}
return await r.blob();
}
async function imageModels(client = getDefaultClient()) {
Expand Down Expand Up @@ -131,7 +148,7 @@
return await r.text();
}
}
async function chat({ model, messages, seed, temperature, top_p, presence_penalty, frequency_penalty, max_tokens, stream, private: priv, tools, tool_choice, referrer }, client = getDefaultClient()) {
async function chat({ model, messages, seed, temperature, top_p, presence_penalty, frequency_penalty, max_tokens, stream, private: priv, tools, tool_choice, referrer, json }, client = getDefaultClient()) {
const url = `${client.textBase}/openai`;
const body = { model, messages };
if (seed != null) body.seed = seed;
Expand All @@ -144,6 +161,7 @@
if (tools) body.tools = tools;
if (tool_choice) body.tool_choice = tool_choice;
if (referrer) body.referrer = referrer;
if (json) body.json = true;
if (stream) {
body.stream = true;
const r = await client.postJson(url, body, { headers: { 'Accept': 'text/event-stream' } });
Expand Down Expand Up @@ -266,6 +284,14 @@
async function listTextModels(client) { return await textModels(client); }
async function listAudioVoices(client) { const models = await textModels(client); return models?.['openai-audio']?.voices ?? []; }

async function modelCapabilities(client = getDefaultClient()) {
const [image, text] = await Promise.all([
imageModels(client).catch(() => ({})),
textModels(client).catch(() => ({})),
]);
return { image, text, audio: text?.['openai-audio'] ?? {} };
}

// --- pipeline.js ---
class Context extends Map {}
class Pipeline { constructor() { this.steps = []; } step(s) { this.steps.push(s); return this; } async execute({ client, context = new Context() } = {}) { for (const s of this.steps) await s.run({ client, context }); return context; } }
Expand All @@ -282,7 +308,7 @@
const api = {
configure,
image, text, chat, search, tts, stt, vision,
imageModels, textModels, imageFeed, textFeed,
imageModels, textModels, imageFeed, textFeed, modelCapabilities,
tools: { functionTool, ToolBox, chatWithTools },
mcp: { serverName, toolDefinitions, generateImageUrl, generateImageBase64, listImageModels, listTextModels, listAudioVoices },
pipeline: { Context, Pipeline, TextGetStep, ImageStep, TtsStep, VisionUrlStep },
Expand Down
34 changes: 30 additions & 4 deletions js/polliLib/polliLib-web.global.js.bak
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@

// --- helpers ---
const bool = v => (v == null ? undefined : (v ? 'true' : 'false'));
const sleep = ms => new Promise(res => setTimeout(res, ms));
function base64FromArrayBuffer(ab) {
const bytes = new Uint8Array(ab);
let binary = '';
Expand All @@ -83,7 +84,7 @@
}

// --- image.js ---
async function image(prompt, { model, seed, width, height, image: imgUrl, nologo, private: priv, enhance, safe, referrer } = {}, client = getDefaultClient()) {
async function image(prompt, { model, seed, width, height, image: imgUrl, nologo, private: priv, enhance, safe, referrer, json, retries = 5, retryDelayMs = 1000 } = {}, client = getDefaultClient()) {
const url = `${client.imageBase}/prompt/${encodeURIComponent(prompt)}`;
const params = {};
if (model) params.model = model;
Expand All @@ -96,8 +97,24 @@
if (enhance != null) params.enhance = bool(enhance);
if (safe != null) params.safe = bool(safe);
if (referrer) params.referrer = referrer;
const r = await client.get(url, { params });
if (json) params.json = 'true';
const headers = json ? { Accept: 'application/json' } : {};
const r = await client.get(url, { params, headers });
if (!r.ok) throw new Error(`image error ${r.status}`);
const ct = r.headers.get('content-type') ?? '';
if (ct.includes('application/json')) {
const data = await r.json();
if (json) return data;
if (data?.url) {
const ir = await fetch(data.url);
if (ir.ok) return await ir.blob();
}
if (retries > 0) {
await sleep(retryDelayMs);
return await image(prompt, { model, seed, width, height, image: imgUrl, nologo, private: priv, enhance, safe, referrer, json, retries: retries - 1, retryDelayMs }, client);
}
throw new Error('image pending');
}
return await r.blob();
}
async function imageModels(client = getDefaultClient()) {
Expand Down Expand Up @@ -131,7 +148,7 @@
return await r.text();
}
}
async function chat({ model, messages, seed, temperature, top_p, presence_penalty, frequency_penalty, max_tokens, stream, private: priv, tools, tool_choice, referrer }, client = getDefaultClient()) {
async function chat({ model, messages, seed, temperature, top_p, presence_penalty, frequency_penalty, max_tokens, stream, private: priv, tools, tool_choice, referrer, json }, client = getDefaultClient()) {
const url = `${client.textBase}/openai`;
const body = { model, messages };
if (seed != null) body.seed = seed;
Expand All @@ -144,6 +161,7 @@
if (tools) body.tools = tools;
if (tool_choice) body.tool_choice = tool_choice;
if (referrer) body.referrer = referrer;
if (json) body.json = true;
if (stream) {
body.stream = true;
const r = await client.postJson(url, body, { headers: { 'Accept': 'text/event-stream' } });
Expand Down Expand Up @@ -266,6 +284,14 @@
async function listTextModels(client) { return await textModels(client); }
async function listAudioVoices(client) { const models = await textModels(client); return models?.['openai-audio']?.voices ?? []; }

async function modelCapabilities(client = getDefaultClient()) {
const [image, text] = await Promise.all([
imageModels(client).catch(() => ({})),
textModels(client).catch(() => ({})),
]);
return { image, text, audio: text?.['openai-audio'] ?? {} };
}

// --- pipeline.js ---
class Context extends Map {}
class Pipeline { constructor() { this.steps = []; } step(s) { this.steps.push(s); return this; } async execute({ client, context = new Context() } = {}) { for (const s of this.steps) await s.run({ client, context }); return context; } }
Expand All @@ -284,7 +310,7 @@
const api = {
configure,
image, text, chat, search, tts, stt, vision,
imageModels, textModels, imageFeed, textFeed,
imageModels, textModels, imageFeed, textFeed, modelCapabilities,
tools: { functionTool, ToolBox, chatWithTools },
mcp: { serverName, toolDefinitions, generateImageUrl, generateImageBase64, listImageModels, listTextModels, listAudioVoices },
pipeline: { Context, Pipeline, TextGetStep, ImageStep, TtsStep, VisionUrlStep },
Expand Down
26 changes: 25 additions & 1 deletion js/polliLib/src/image.js
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
import { getDefaultClient } from './client.js';

const bool = v => (v == null ? undefined : (v ? 'true' : 'false'));
const sleep = ms => new Promise(res => setTimeout(res, ms));

export async function image(prompt, {
model, seed, width, height, image, nologo, private: priv, enhance, safe, referrer,
json, retries = 5, retryDelayMs = 1000,
} = {}, client = getDefaultClient()) {
const url = `${client.imageBase}/prompt/${encodeURIComponent(prompt)}`;
const params = {};
Expand All @@ -17,9 +19,31 @@ export async function image(prompt, {
if (enhance != null) params.enhance = bool(enhance);
if (safe != null) params.safe = bool(safe);
if (referrer) params.referrer = referrer;
if (json) params.json = 'true';

const r = await client.get(url, { params });
const headers = json ? { Accept: 'application/json' } : {};

const r = await client.get(url, { params, headers });
if (!r.ok) throw new Error(`image error ${r.status}`);

const ct = r.headers.get('content-type') ?? '';
if (ct.includes('application/json')) {
const data = await r.json();
if (json) return data;
if (data?.url) {
const ir = await fetch(data.url);
if (ir.ok) return await ir.blob();
}
if (retries > 0) {
await sleep(retryDelayMs);
return await image(prompt, {
model, seed, width, height, image, nologo, private: priv,
enhance, safe, referrer, json, retries: retries - 1, retryDelayMs,
}, client);
}
throw new Error('image pending');
}

return await r.blob();
}

Expand Down
12 changes: 12 additions & 0 deletions js/polliLib/src/models.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
import { getDefaultClient } from './client.js';
import { imageModels } from './image.js';
import { textModels } from './text.js';

export async function modelCapabilities(client = getDefaultClient()) {
const [image, text] = await Promise.all([
imageModels(client).catch(() => ({})),
textModels(client).catch(() => ({})),
]);
return { image, text, audio: text?.['openai-audio'] ?? {} };
}

Loading