Skip to content

Commit 79e03e7

Browse files
committed
Release v0.0.31
## What's New ### Features - **Offline Mode (Public Alpha)** — Use 1Code with local LLMs via Ollama. Enable in Settings → Beta → Offline Mode. Requires Ollama to be installed and running. ### Improvements & Fixes - Show "Completed Task" instead of "Task" for finished tasks - Cmd+F now selects all text when chat search is already open - Improved GitHub clone flow with dedicated page - Removed duplicate PR creation toast notifications - Better message spacing for last messages
1 parent 04530f4 commit 79e03e7

File tree

23 files changed

+1180
-528
lines changed

23 files changed

+1180
-528
lines changed

package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "21st-desktop",
3-
"version": "0.0.30",
3+
"version": "0.0.31",
44
"private": true,
55
"description": "1Code - UI for parallel work with AI agents",
66
"author": {

src/main/lib/claude/offline-handler.ts

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,15 @@ export type OfflineCheckResult = {
2222
* 1. If customConfig provided → use it
2323
* 2. If OFFLINE → use Ollama (ignore auth token)
2424
* 3. If online + auth → use Claude API
25+
*
26+
* @param customConfig - Custom config from user settings
27+
* @param claudeCodeToken - Claude Code auth token
28+
* @param selectedOllamaModel - User-selected Ollama model (optional)
2529
*/
2630
export async function checkOfflineFallback(
2731
customConfig: CustomClaudeConfig | undefined,
2832
claudeCodeToken: string | null,
33+
selectedOllamaModel?: string | null,
2934
): Promise<OfflineCheckResult> {
3035
// If custom config is provided, use it (highest priority)
3136
if (customConfig) {
@@ -63,10 +68,12 @@ export async function checkOfflineFallback(
6368
}
6469
}
6570

66-
// Use Ollama!
67-
const config = getOllamaConfig(ollamaStatus.recommendedModel)
71+
// Use Ollama with selected model or recommended model
72+
console.log(`[Offline] selectedOllamaModel param: ${selectedOllamaModel || "(null/undefined)"}, recommendedModel: ${ollamaStatus.recommendedModel}`)
73+
const modelToUse = selectedOllamaModel || ollamaStatus.recommendedModel
74+
const config = getOllamaConfig(modelToUse)
6875

69-
console.log(`[Offline] Switching to Ollama (model: ${ollamaStatus.recommendedModel})`)
76+
console.log(`[Offline] Switching to Ollama (model: ${modelToUse})`)
7077

7178
return {
7279
config,

src/main/lib/trpc/routers/chats.ts

Lines changed: 229 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ import { computeContentHash, gitCache } from "../../git/cache"
2121
import { splitUnifiedDiffByFile } from "../../git/diff-parser"
2222
import { execWithShellEnv } from "../../git/shell-env"
2323
import { applyRollbackStash } from "../../git/stash"
24+
import { checkInternetConnection, checkOllamaStatus } from "../../ollama"
2425
import { terminalManager } from "../../terminal/manager"
2526
import { publicProcedure, router } from "../index"
2627

@@ -33,6 +34,149 @@ function getFallbackName(userMessage: string): string {
3334
return trimmed.substring(0, 25) + "..."
3435
}
3536

37+
/**
38+
* Generate text using local Ollama model
39+
* Used for chat title generation in offline mode
40+
* @param userMessage - The user message to generate a title for
41+
* @param model - Optional model to use (if not provided, uses recommended model)
42+
*/
43+
async function generateChatNameWithOllama(
44+
userMessage: string,
45+
model?: string | null
46+
): Promise<string | null> {
47+
try {
48+
const ollamaStatus = await checkOllamaStatus()
49+
if (!ollamaStatus.available) {
50+
return null
51+
}
52+
53+
// Use provided model, or recommended, or first available
54+
const modelToUse = model || ollamaStatus.recommendedModel || ollamaStatus.models[0]
55+
if (!modelToUse) {
56+
console.error("[Ollama] No model available")
57+
return null
58+
}
59+
60+
const prompt = `Generate a very short (2-5 words) title for a coding chat that starts with this message. Only output the title, nothing else. No quotes, no explanations.
61+
62+
User message: "${userMessage.slice(0, 500)}"
63+
64+
Title:`
65+
66+
const response = await fetch("http://localhost:11434/api/generate", {
67+
method: "POST",
68+
headers: { "Content-Type": "application/json" },
69+
body: JSON.stringify({
70+
model: modelToUse,
71+
prompt,
72+
stream: false,
73+
options: {
74+
temperature: 0.3,
75+
num_predict: 50,
76+
},
77+
}),
78+
})
79+
80+
if (!response.ok) {
81+
console.error("[Ollama] Generate chat name failed:", response.status)
82+
return null
83+
}
84+
85+
const data = await response.json()
86+
const result = data.response?.trim()
87+
if (result) {
88+
// Clean up the result - remove quotes, trim, limit length
89+
const cleaned = result
90+
.replace(/^["']|["']$/g, "")
91+
.replace(/^title:\s*/i, "")
92+
.trim()
93+
.slice(0, 50)
94+
if (cleaned.length > 0) {
95+
return cleaned
96+
}
97+
}
98+
return null
99+
} catch (error) {
100+
console.error("[Ollama] Generate chat name error:", error)
101+
return null
102+
}
103+
}
104+
105+
/**
106+
* Generate commit message using local Ollama model
107+
* Used for commit message generation in offline mode
108+
* @param diff - The diff text
109+
* @param fileCount - Number of files changed
110+
* @param additions - Lines added
111+
* @param deletions - Lines deleted
112+
* @param model - Optional model to use (if not provided, uses recommended model)
113+
*/
114+
async function generateCommitMessageWithOllama(
115+
diff: string,
116+
fileCount: number,
117+
additions: number,
118+
deletions: number,
119+
model?: string | null
120+
): Promise<string | null> {
121+
try {
122+
const ollamaStatus = await checkOllamaStatus()
123+
if (!ollamaStatus.available) {
124+
return null
125+
}
126+
127+
// Use provided model, or recommended, or first available
128+
const modelToUse = model || ollamaStatus.recommendedModel || ollamaStatus.models[0]
129+
if (!modelToUse) {
130+
console.error("[Ollama] No model available")
131+
return null
132+
}
133+
134+
const prompt = `Generate a conventional commit message for these changes. Use format: type: short description
135+
136+
Types: feat (new feature), fix (bug fix), docs, style, refactor, test, chore
137+
138+
Changes: ${fileCount} files, +${additions}/-${deletions} lines
139+
140+
Diff (truncated):
141+
${diff.slice(0, 3000)}
142+
143+
Commit message:`
144+
145+
const response = await fetch("http://localhost:11434/api/generate", {
146+
method: "POST",
147+
headers: { "Content-Type": "application/json" },
148+
body: JSON.stringify({
149+
model: modelToUse,
150+
prompt,
151+
stream: false,
152+
options: {
153+
temperature: 0.3,
154+
num_predict: 50,
155+
},
156+
}),
157+
})
158+
159+
if (!response.ok) {
160+
console.error("[Ollama] Generate commit message failed:", response.status)
161+
return null
162+
}
163+
164+
const data = await response.json()
165+
const result = data.response?.trim()
166+
if (result) {
167+
// Clean up - get just the first line
168+
const firstLine = result.split("\n")[0]?.trim()
169+
if (firstLine && firstLine.length > 0 && firstLine.length < 100) {
170+
return firstLine
171+
}
172+
}
173+
return null
174+
} catch (error) {
175+
console.error("[Ollama] Generate commit message error:", error)
176+
return null
177+
}
178+
}
179+
36180
export const chatsRouter = router({
37181
/**
38182
* List all non-archived chats (optionally filter by project)
@@ -795,11 +939,13 @@ export const chatsRouter = router({
795939
* Generate a commit message using AI based on the diff
796940
* @param chatId - The chat ID to get worktree path from
797941
* @param filePaths - Optional list of file paths to generate message for (if not provided, uses all changed files)
942+
* @param ollamaModel - Optional Ollama model for offline generation
798943
*/
799944
generateCommitMessage: publicProcedure
800945
.input(z.object({
801946
chatId: z.string(),
802947
filePaths: z.array(z.string()).optional(),
948+
ollamaModel: z.string().nullish(), // Optional model for offline mode
803949
}))
804950
.mutation(async ({ input }) => {
805951
const db = getDatabase()
@@ -844,47 +990,73 @@ export const chatsRouter = router({
844990

845991
// Build filtered diff text for API (only selected files)
846992
const filteredDiff = files.map(f => f.diffText).join('\n')
993+
const additions = files.reduce((sum, f) => sum + f.additions, 0)
994+
const deletions = files.reduce((sum, f) => sum + f.deletions, 0)
995+
996+
// Check internet first - if offline, use Ollama
997+
const hasInternet = await checkInternetConnection()
998+
999+
if (!hasInternet) {
1000+
console.log("[generateCommitMessage] Offline - trying Ollama...")
1001+
const ollamaMessage = await generateCommitMessageWithOllama(
1002+
filteredDiff,
1003+
files.length,
1004+
additions,
1005+
deletions,
1006+
input.ollamaModel
1007+
)
1008+
if (ollamaMessage) {
1009+
console.log("[generateCommitMessage] Generated via Ollama:", ollamaMessage)
1010+
return { message: ollamaMessage }
1011+
}
1012+
console.log("[generateCommitMessage] Ollama failed, using heuristic fallback")
1013+
// Fall through to heuristic fallback below
1014+
} else {
1015+
// Online - call web API to generate commit message
1016+
let apiError: string | null = null
1017+
try {
1018+
const authManager = getAuthManager()
1019+
const token = await authManager.getValidToken()
1020+
// Use localhost in dev, production otherwise
1021+
const apiUrl = process.env.NODE_ENV === "development" ? "http://localhost:3000" : "https://21st.dev"
8471022

848-
// Call web API to generate commit message
849-
let apiError: string | null = null
850-
try {
851-
const authManager = getAuthManager()
852-
const token = await authManager.getValidToken()
853-
// Use localhost in dev, production otherwise
854-
const apiUrl = process.env.NODE_ENV === "development" ? "http://localhost:3000" : "https://21st.dev"
855-
856-
if (!token) {
857-
apiError = "No auth token available"
858-
} else {
859-
const response = await fetch(
860-
`${apiUrl}/api/agents/generate-commit-message`,
861-
{
862-
method: "POST",
863-
headers: {
864-
"Content-Type": "application/json",
865-
"X-Desktop-Token": token,
1023+
if (!token) {
1024+
apiError = "No auth token available"
1025+
} else {
1026+
const response = await fetch(
1027+
`${apiUrl}/api/agents/generate-commit-message`,
1028+
{
1029+
method: "POST",
1030+
headers: {
1031+
"Content-Type": "application/json",
1032+
"X-Desktop-Token": token,
1033+
},
1034+
body: JSON.stringify({
1035+
diff: filteredDiff.slice(0, 10000), // Limit diff size, use filtered diff
1036+
fileCount: files.length,
1037+
additions,
1038+
deletions,
1039+
}),
8661040
},
867-
body: JSON.stringify({
868-
diff: filteredDiff.slice(0, 10000), // Limit diff size, use filtered diff
869-
fileCount: files.length,
870-
additions: files.reduce((sum, f) => sum + f.additions, 0),
871-
deletions: files.reduce((sum, f) => sum + f.deletions, 0),
872-
}),
873-
},
874-
)
1041+
)
8751042

876-
if (response.ok) {
877-
const data = await response.json()
878-
if (data.message) {
879-
return { message: data.message }
1043+
if (response.ok) {
1044+
const data = await response.json()
1045+
if (data.message) {
1046+
return { message: data.message }
1047+
}
1048+
apiError = "API returned ok but no message in response"
1049+
} else {
1050+
apiError = `API returned ${response.status}`
8801051
}
881-
apiError = "API returned ok but no message in response"
882-
} else {
883-
apiError = `API returned ${response.status}`
8841052
}
1053+
} catch (error) {
1054+
apiError = `API call failed: ${error instanceof Error ? error.message : String(error)}`
1055+
}
1056+
1057+
if (apiError) {
1058+
console.log("[generateCommitMessage] API error:", apiError)
8851059
}
886-
} catch (error) {
887-
apiError = `API call failed: ${error instanceof Error ? error.message : String(error)}`
8881060
}
8891061

8901062
// Fallback: Generate commit message with conventional commits style
@@ -943,26 +1115,39 @@ export const chatsRouter = router({
9431115
}),
9441116

9451117
/**
946-
* Generate a name for a sub-chat using AI (calls web API)
947-
* Always uses production API since it's a lightweight call
1118+
* Generate a name for a sub-chat using AI
1119+
* Uses Ollama when offline, otherwise calls web API
9481120
*/
9491121
generateSubChatName: publicProcedure
950-
.input(z.object({ userMessage: z.string() }))
1122+
.input(z.object({
1123+
userMessage: z.string(),
1124+
ollamaModel: z.string().nullish(), // Optional model for offline mode
1125+
}))
9511126
.mutation(async ({ input }) => {
9521127
try {
1128+
// Check internet first - if offline, use Ollama
1129+
const hasInternet = await checkInternetConnection()
1130+
1131+
if (!hasInternet) {
1132+
console.log("[generateSubChatName] Offline - trying Ollama...")
1133+
const ollamaName = await generateChatNameWithOllama(input.userMessage, input.ollamaModel)
1134+
if (ollamaName) {
1135+
console.log("[generateSubChatName] Generated name via Ollama:", ollamaName)
1136+
return { name: ollamaName }
1137+
}
1138+
console.log("[generateSubChatName] Ollama failed, using fallback")
1139+
return { name: getFallbackName(input.userMessage) }
1140+
}
1141+
1142+
// Online - use web API
9531143
const authManager = getAuthManager()
9541144
const token = await authManager.getValidToken()
955-
// Always use production API for name generation
9561145
const apiUrl = "https://21st.dev"
9571146

9581147
console.log(
959-
"[generateSubChatName] Calling API with token:",
1148+
"[generateSubChatName] Online - calling API with token:",
9601149
token ? "present" : "missing",
9611150
)
962-
console.log(
963-
"[generateSubChatName] URL:",
964-
`${apiUrl}/api/agents/sub-chat/generate-name`,
965-
)
9661151

9671152
const response = await fetch(
9681153
`${apiUrl}/api/agents/sub-chat/generate-name`,

0 commit comments

Comments
 (0)