From 29498436bcc80e47921606572b8fac549e9cc425 Mon Sep 17 00:00:00 2001 From: James S Date: Tue, 28 May 2024 18:31:24 +0000 Subject: [PATCH] Use new uncensored model --- model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/model.py b/model.py index 94671b2..7bad474 100644 --- a/model.py +++ b/model.py @@ -3,7 +3,7 @@ from unsloth.chat_templates import get_chat_template from transformers import TextStreamer model, tokenizer = FastLanguageModel.from_pretrained( - model_name = "scoliono/groupchat_lora", + model_name = "scoliono/groupchat_lora_lexi_8b", max_seq_length = 2048, dtype = None, load_in_4bit = True, @@ -12,7 +12,7 @@ FastLanguageModel.for_inference(model) # Enable native 2x faster inference tokenizer = get_chat_template( tokenizer, - chat_template = "chatml", # Supports zephyr, chatml, mistral, llama, alpaca, vicuna, vicuna_old, unsloth + chat_template = "llama-3", # Supports zephyr, chatml, mistral, llama, alpaca, vicuna, vicuna_old, unsloth map_eos_token = True, # Maps <|im_end|> to instead )