import { ChatInputCommandInteraction, SlashCommandBuilder } from 'discord.js'; import { llamacpp, streamText } from 'modelfusion'; import { logInfo, logError } from '../../../logging'; const llamaCppServer = llamacpp.Api({ baseUrl: { host: process.env.LLAMACPP_HOST, port: process.env.LLAMACPP_PORT, } }); async function llamaChat(interaction: ChatInputCommandInteraction) { logInfo(`[chat] Fetching last 5 messages in ${interaction.channelId}...`); const history = await interaction.channel.messages.fetch({ limit: 5 }); logInfo(`[chat] Generating LLaMA response for interaction ${interaction.id}...`); const textStream = await streamText({ model: llamacpp .CompletionTextGenerator({ promptTemplate: llamacpp.prompt.Llama2, maxGenerationTokens: 16, temperature: 0.7, api: llamaCppServer }) .withChatPrompt(), prompt: { system: 'You are Hatsune Miku, the famous 16-year-old Japanese virtual singer from Crypton Future Media. You have a penchant for politically incorrect humor, and are making banter with your like-minded friends.', messages: [{ 'role': 'user', 'content': interaction.options.getString('prompt') }] } }); let allText = ''; await interaction.deferReply(); try { for await (const textPart of textStream) { logInfo(`[chat] Added to LLaMA response: ${textPart}`); allText += textPart; await interaction.editReply(allText); } logInfo(`[chat] Final LLaMA response: ${allText}`); } catch (err) { logError(err); await interaction.editReply(err.toString()); } } export = { data: new SlashCommandBuilder() .setName('chat') .setDescription('Miku responds to your prompt with an AI-generated response.') .addStringOption( opt => opt.setName('prompt').setDescription('Prompt').setRequired(true) ), execute: llamaChat };