1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
|
import { invoke } from "@tauri-apps/api/core";
import { listen, type UnlistenFn } from "@tauri-apps/api/event";
export interface GenerationStats {
total_duration: number;
load_duration: number;
prompt_eval_count: number;
prompt_eval_duration: number;
eval_count: number;
eval_duration: number;
}
export interface Message {
role: "user" | "assistant" | "system";
content: string;
stats?: GenerationStats;
}
interface StreamChunk {
content: string;
done: boolean;
stats?: GenerationStats;
}
// Module-level state using $state
let messages = $state<Message[]>([]);
let isProcessing = $state(false);
let isProviderHealthy = $state(false);
let streamingContent = "";
let initialized = false;
let streamUnlisten: UnlistenFn | null = null;
async function init() {
if (initialized) return;
initialized = true;
await checkHealth();
}
async function checkHealth() {
try {
isProviderHealthy = await invoke("assistant_check_health");
} catch (e) {
console.error("Failed to check provider health:", e);
isProviderHealthy = false;
}
}
function finishStreaming() {
isProcessing = false;
streamingContent = "";
if (streamUnlisten) {
streamUnlisten();
streamUnlisten = null;
}
}
async function sendMessage(
content: string,
isEnabled: boolean,
provider: string,
endpoint: string,
) {
if (!content.trim()) return;
if (!isEnabled) {
messages = [
...messages,
{
role: "assistant",
content: "Assistant is disabled. Enable it in Settings > AI Assistant.",
},
];
return;
}
// Add user message
messages = [...messages, { role: "user", content }];
isProcessing = true;
streamingContent = "";
// Add empty assistant message for streaming
messages = [...messages, { role: "assistant", content: "" }];
try {
// Set up stream listener
streamUnlisten = await listen<StreamChunk>("assistant-stream", (event) => {
const chunk = event.payload;
if (chunk.content) {
streamingContent += chunk.content;
// Update the last message (assistant's response)
const lastIdx = messages.length - 1;
if (lastIdx >= 0 && messages[lastIdx].role === "assistant") {
messages[lastIdx] = {
...messages[lastIdx],
content: streamingContent,
};
// Trigger reactivity
messages = [...messages];
}
}
if (chunk.done) {
if (chunk.stats) {
const lastIdx = messages.length - 1;
if (lastIdx >= 0 && messages[lastIdx].role === "assistant") {
messages[lastIdx] = {
...messages[lastIdx],
stats: chunk.stats,
};
messages = [...messages];
}
}
finishStreaming();
}
});
// Start streaming chat
await invoke<string>("assistant_chat_stream", {
messages: messages.slice(0, -1), // Exclude the empty assistant message
});
} catch (e) {
console.error("Failed to send message:", e);
const errorMessage = e instanceof Error ? e.message : String(e);
let helpText = "";
if (provider === "ollama") {
helpText = `\n\nPlease ensure Ollama is running at ${endpoint}.`;
} else if (provider === "openai") {
helpText = "\n\nPlease check your OpenAI API key in Settings.";
}
// Update the last message with error
const lastIdx = messages.length - 1;
if (lastIdx >= 0 && messages[lastIdx].role === "assistant") {
messages[lastIdx] = {
role: "assistant",
content: `Error: ${errorMessage}${helpText}`,
};
messages = [...messages];
}
finishStreaming();
}
}
function clearHistory() {
messages = [];
streamingContent = "";
}
// Export as an object with getters for reactive access
export const assistantState = {
get messages() {
return messages;
},
get isProcessing() {
return isProcessing;
},
get isProviderHealthy() {
return isProviderHealthy;
},
init,
checkHealth,
sendMessage,
clearHistory,
};
|