54 lines
1.9 KiB
TypeScript
54 lines
1.9 KiB
TypeScript
import { Message } from 'discord.js';
|
|
import { serializeMessageHistory } from '../util';
|
|
import { LLMDiscordMessage, LLMProvider } from './provider';
|
|
import 'dotenv/config';
|
|
import { logInfo } from '../../logging';
|
|
import { LLMConfig } from '../commands/types';
|
|
|
|
export class MikuAIProvider implements LLMProvider
|
|
{
|
|
private llmToken: string;
|
|
|
|
constructor(llmToken: string | undefined = process.env.LLM_TOKEN)
|
|
{
|
|
if (!llmToken) {
|
|
throw new TypeError("LLM token was not passed in, and environment variable LLM_TOKEN was unset!");
|
|
}
|
|
this.llmToken = llmToken;
|
|
}
|
|
|
|
name() {
|
|
return 'MikuAI: scoliono/groupchat_lora_instruct_structured-3.1-8b';
|
|
}
|
|
|
|
async requestLLMResponse(history: Message[], sysprompt: string, params: LLMConfig): Promise<string>
|
|
{
|
|
const queryParams = new URLSearchParams();
|
|
queryParams.append("token", this.llmToken);
|
|
queryParams.append("sys_prompt", sysprompt);
|
|
if (params) {
|
|
for (const field of Object.keys(params)) {
|
|
queryParams.append(field, params[field]);
|
|
}
|
|
}
|
|
const llmEndpoint = `${process.env.LLM_HOST}/?${queryParams.toString()}`;
|
|
let messageList = await Promise.all(
|
|
history.map(serializeMessageHistory)
|
|
);
|
|
messageList = messageList.filter(x => !!x);
|
|
|
|
logInfo("[bot] Requesting LLM response with message list: " + messageList.map(m => m?.content));
|
|
const res = await fetch(llmEndpoint, {
|
|
method: 'POST',
|
|
headers: {
|
|
'Content-Type': 'application/json',
|
|
},
|
|
body: JSON.stringify(messageList)
|
|
});
|
|
const botMsgTxt = await res.text();
|
|
logInfo(`[bot] Server returned LLM response: ${botMsgTxt}`);
|
|
const botMsg: LLMDiscordMessage = JSON.parse(botMsgTxt);
|
|
return botMsg.content;
|
|
}
|
|
}
|