Skip to content

Commit b53bfb3

Browse files
committed
docs(getting-started): optimize model initialization and usage
Refactor the example code to use a cached model reference instead of recreating the model instance on each command invocation. This change improves performance by avoiding expensive reinitialization. Update both basic and config-based examples to demonstrate proper usage of `ComputedRef<ChatLunaChatModel>` and include null checks for safer access to the model instance.
1 parent e8e64eb commit b53bfb3

1 file changed

Lines changed: 32 additions & 6 deletions

File tree

docs/development/getting-started.md

Lines changed: 32 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -144,15 +144,29 @@ yarn create chatluna-plugin
144144
import { Context, Schema } from 'koishi'
145145
import type { ChatLunaService } from "koishi-plugin-chatluna/services/chat"
146146
import { getMessageContent } from 'koishi-plugin-chatluna/utils/string'
147+
import { ChatLunaChatModel } from 'koishi-plugin-chatluna/llm-core/platform/model'
148+
import { ComputedRef } from 'koishi-plugin-chatluna'
147149
148150
export function apply(ctx: Context) {
151+
152+
// 缓存 modelRef 避免昂贵的重新创建
153+
let modelRef: ComputedRef<ChatLunaChatModel>
154+
155+
ctx.on('ready', async () => {
156+
modelRef = await ctx.chatluna.createChatModel("openai/gpt-5-nano")
157+
})
158+
149159
ctx.command('example')
150160
.action(async ({ session }) => {
151-
// 创建一个 ChatLunaModel 实例
152-
const model = await ctx.chatluna.createChatModel("openai/gpt-5-nano")
161+
// 获取一个 ChatLunaChatModel 实例
162+
const model = modelRef?.value
163+
164+
if (!model) {
165+
return '模型实例未初始化或者不存在。'
166+
}
153167
154168
// 发送消息并获取回复
155-
const message = await model.value.invoke("你好,世界!")
169+
const message = await model.invoke(session.content)
156170
return getMessageContent(message.content)
157171
})
158172
}
@@ -178,6 +192,8 @@ import { modelSchema } from 'koishi-plugin-chatluna/utils/schema'
178192
import type {} from "koishi-plugin-chatluna/services/chat"
179193
import { getMessageContent } from 'koishi-plugin-chatluna/utils/string'
180194
import { Context, Schema } from 'koishi'
195+
import { ChatLunaChatModel } from 'koishi-plugin-chatluna/llm-core/platform/model'
196+
import { ComputedRef } from 'koishi-plugin-chatluna'
181197
182198
export interface Config {
183199
model: string
@@ -190,13 +206,23 @@ export const Config: Schema<Config> = Schema.object({
190206
export function apply(ctx: Context, config: Config) {
191207
modelSchema(ctx)
192208
209+
let modelRef: ComputedRef<ChatLunaChatModel>
210+
211+
ctx.on('ready', async () => {
212+
modelRef = await ctx.chatluna.createChatModel(config.model || ctx.chatluna.config.defaultModel)
213+
})
214+
193215
ctx.command('example')
194216
.action(async ({ session }) => {
195-
// 创建一个 ChatLunaModel 实例
196-
const model = await ctx.chatluna.createChatModel("openai/gpt-5-nano")
217+
// 获取一个 ChatLunaChatModel 实例
218+
const model = modelRef?.value
219+
220+
if (!model) {
221+
return '模型实例未初始化或者不存在。'
222+
}
197223
198224
// 发送消息并获取回复
199-
const message = await model.value.invoke("你好,世界!")
225+
const message = await model.invoke(session.content)
200226
return getMessageContent(message.content)
201227
})
202228
}

0 commit comments

Comments
 (0)