Use new uncensored model

This commit is contained in:
James S 2024-05-28 18:31:24 +00:00
parent 59264b9421
commit 29498436bc

View File

@ -3,7 +3,7 @@ from unsloth.chat_templates import get_chat_template
from transformers import TextStreamer
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "scoliono/groupchat_lora",
model_name = "scoliono/groupchat_lora_lexi_8b",
max_seq_length = 2048,
dtype = None,
load_in_4bit = True,
@ -12,7 +12,7 @@ FastLanguageModel.for_inference(model) # Enable native 2x faster inference
tokenizer = get_chat_template(
tokenizer,
chat_template = "chatml", # Supports zephyr, chatml, mistral, llama, alpaca, vicuna, vicuna_old, unsloth
chat_template = "llama-3", # Supports zephyr, chatml, mistral, llama, alpaca, vicuna, vicuna_old, unsloth
map_eos_token = True, # Maps <|im_end|> to </s> instead
)