James Shiffer
8ef7a03895
changed some defaults; added and then decided to drop repetition penalty related hyperparameters; fixed prompt formatting
73 lines
2.8 KiB
TypeScript
73 lines
2.8 KiB
TypeScript
import {
|
|
ChatInputCommandInteraction,
|
|
SlashCommandBuilder
|
|
} from 'discord.js';
|
|
import { LLMConfig } from '../types';
|
|
import 'dotenv/config';
|
|
|
|
const config: LLMConfig = {
|
|
max_new_tokens: 100,
|
|
min_new_tokens: 1,
|
|
temperature: 0.5,
|
|
top_p: 0.9,
|
|
msg_context: 8,
|
|
frequency_penalty: 0.0,
|
|
presence_penalty: 0.0
|
|
};
|
|
|
|
async function configCommand(interaction: ChatInputCommandInteraction)
|
|
{
|
|
if (interaction.user.id !== process.env.ADMIN) {
|
|
await interaction.reply("You are not authorized to change model settings");
|
|
return;
|
|
}
|
|
|
|
config.max_new_tokens = interaction.options.getInteger('max_new_tokens') ?? config.max_new_tokens;
|
|
config.min_new_tokens = interaction.options.getInteger('min_new_tokens') ?? config.min_new_tokens;
|
|
config.msg_context = interaction.options.getInteger('msg_context') ?? config.msg_context;
|
|
config.temperature = interaction.options.getNumber('temperature') ?? config.temperature;
|
|
config.top_p = interaction.options.getNumber('top_p') ?? config.top_p;
|
|
config.frequency_penalty = interaction.options.getNumber('frequency_penalty') ?? config.frequency_penalty;
|
|
config.presence_penalty = interaction.options.getNumber('presence_penalty') ?? config.presence_penalty;
|
|
await interaction.reply(`
|
|
\`\`\`
|
|
max_new_tokens = ${config.max_new_tokens}
|
|
min_new_tokens = ${config.min_new_tokens}
|
|
msg_context = ${config.msg_context}
|
|
temperature = ${config.temperature}
|
|
top_p = ${config.top_p}
|
|
frequency_penalty = ${config.frequency_penalty}
|
|
presence_penalty = ${config.presence_penalty}
|
|
\`\`\`
|
|
`);
|
|
}
|
|
|
|
export = {
|
|
data: new SlashCommandBuilder()
|
|
.setName('llmconf')
|
|
.setDescription('Change model inference settings')
|
|
.addNumberOption(
|
|
opt => opt.setName('temperature').setDescription('Temperature; not recommended w/ top_p (default: 0.7)')
|
|
)
|
|
.addNumberOption(
|
|
opt => opt.setName('top_p').setDescription('Cumulative prob. of min. token set to sample from; not recommended w/ temperature (default: 0.9)')
|
|
)
|
|
.addNumberOption(
|
|
opt => opt.setName('frequency_penalty').setDescription('[unused] Penalize tokens from reappearing multiple times; ranges from -2 to 2 (default: 0.0)')
|
|
)
|
|
.addNumberOption(
|
|
opt => opt.setName('presence_penalty').setDescription('[unused] Penalize a token from reappearing; ranges from -2 to 2 (default: 0.0)')
|
|
)
|
|
.addIntegerOption(
|
|
opt => opt.setName('max_new_tokens').setDescription('Max. new tokens (default: 100)')
|
|
)
|
|
.addIntegerOption(
|
|
opt => opt.setName('min_new_tokens').setDescription('Min. new tokens (default: 1)')
|
|
)
|
|
.addIntegerOption(
|
|
opt => opt.setName('msg_context').setDescription('Num. messages in context (default: 8)')
|
|
),
|
|
execute: configCommand,
|
|
state: () => config,
|
|
};
|