Fix crash when typing in a non-allowed channel
This commit is contained in:
parent
c55e613a4a
commit
4124be492d
@ -37,6 +37,7 @@ import {
|
|||||||
} from './util';
|
} from './util';
|
||||||
import 'dotenv/config';
|
import 'dotenv/config';
|
||||||
|
|
||||||
|
const KNOWN_USERNAMES = ['vinso1445', 'bapazheng', 'f0oby', 'shibe.mp4', '1thinker', 'bapabakshi', 'keliande27', 'gnuwu', 'scoliono', 'adam28405'];
|
||||||
const config = {};
|
const config = {};
|
||||||
|
|
||||||
interface CommandClient extends Client {
|
interface CommandClient extends Client {
|
||||||
@ -150,9 +151,9 @@ async function onNewMessage(message: Message)
|
|||||||
message
|
message
|
||||||
];
|
];
|
||||||
|
|
||||||
await message.channel.sendTyping();
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
await message.channel.sendTyping();
|
||||||
|
|
||||||
const response = await requestLLMResponse(cleanHistoryList);
|
const response = await requestLLMResponse(cleanHistoryList);
|
||||||
// evaluate response
|
// evaluate response
|
||||||
if (!isGoodResponse(response)) {
|
if (!isGoodResponse(response)) {
|
||||||
@ -213,16 +214,23 @@ async function requestLLMResponse(messages)
|
|||||||
queryParams.append(field, config["llmconf"].llmSettings[field]);
|
queryParams.append(field, config["llmconf"].llmSettings[field]);
|
||||||
}
|
}
|
||||||
const llmEndpoint = `${process.env.LLM_HOST}/?${queryParams.toString()}`;
|
const llmEndpoint = `${process.env.LLM_HOST}/?${queryParams.toString()}`;
|
||||||
const messageList = messages.map((m: Message) => ({
|
const messageList = messages.map((m: Message) => {
|
||||||
role: m.author.bot ? "assistant" : "user",
|
let role = 'user';
|
||||||
content: m.cleanContent,
|
if (m.author.id === process.env.CLIENT) {
|
||||||
}));
|
role = 'assistant';
|
||||||
|
} else if (m.author.bot) {
|
||||||
|
return null;
|
||||||
|
} else if (KNOWN_USERNAMES.includes(m.author.username)) {
|
||||||
|
role = m.author.username;
|
||||||
|
}
|
||||||
|
return { role, content: m.cleanContent };
|
||||||
|
});
|
||||||
const reqBody = [
|
const reqBody = [
|
||||||
{
|
{
|
||||||
"role": "system",
|
"role": "system",
|
||||||
"content": config["llmconf"].sys_prompt
|
"content": config["llmconf"].sys_prompt
|
||||||
},
|
},
|
||||||
...messageList
|
...messageList.filter(x => x)
|
||||||
];
|
];
|
||||||
logInfo("[bot] Requesting LLM response with message list: " + reqBody.map(m => m.content));
|
logInfo("[bot] Requesting LLM response with message list: " + reqBody.map(m => m.content));
|
||||||
const res = await fetch(llmEndpoint, {
|
const res = await fetch(llmEndpoint, {
|
||||||
@ -236,7 +244,7 @@ async function requestLLMResponse(messages)
|
|||||||
const txtRaw: string = txt["raw"][0];
|
const txtRaw: string = txt["raw"][0];
|
||||||
// Depends on chat template used
|
// Depends on chat template used
|
||||||
const prefix = "<|start_header_id|>assistant<|end_header_id|>\n\n";
|
const prefix = "<|start_header_id|>assistant<|end_header_id|>\n\n";
|
||||||
const suffix = "<|reserved_special_token_";
|
const suffix = "<|eot_id|>";
|
||||||
const txtStart = txtRaw.lastIndexOf(prefix);
|
const txtStart = txtRaw.lastIndexOf(prefix);
|
||||||
const txtEnd = txtRaw.slice(txtStart + prefix.length);
|
const txtEnd = txtRaw.slice(txtStart + prefix.length);
|
||||||
const txtStop = txtEnd.indexOf(suffix) !== -1 ? txtEnd.indexOf(suffix) : txtEnd.length;
|
const txtStop = txtEnd.indexOf(suffix) !== -1 ? txtEnd.indexOf(suffix) : txtEnd.length;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user