| | --- |
| | license: apache-2.0 |
| | --- |
| | ### This model is trained from Mistral-7B-Instruct-V0.2 with 90% chinese dataset and 10% english dataset |
| | github [Web-UI](https://github.com/moseshu/llama2-chat/tree/main/webui) |
| |
|
| |  |
| |
|
| | ``` |
| | from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer,AutoTokenizer,AutoModelForCausalLM,MistralForCausalLM |
| | import torch |
| | |
| | model_id=Mistral-7B-Instruct-v0.3 |
| | |
| | tokenizer = AutoTokenizer.from_pretrained(model_id) |
| | model = AutoModelForCausalLM.from_pretrained(model_id,torch_dtype=torch.bfloat16,device_map="auto",) |
| | prompt = "[INST] <<SYS>>\nYou are a helpful, respectful and honest assistant.Help humman as much as you can.\n<</SYS>>\n\n{instruction} [/INST]" |
| | text = prompt.format_map({"instruction":"你好,最近干嘛呢"}) |
| | |
| | def predict(content_prompt): |
| | inputs = tokenizer(content_prompt,return_tensors="pt",add_special_tokens=True) |
| | input_ids = inputs["input_ids"].to("cuda:0") |
| | # print(f"input length:{len(input_ids[0])}") |
| | with torch.no_grad(): |
| | generation_output = model.generate( |
| | input_ids=input_ids, |
| | #generation_config=generation_config, |
| | return_dict_in_generate=True, |
| | output_scores=True, |
| | max_new_tokens=2048, |
| | top_p=0.9, |
| | num_beams=1, |
| | do_sample=True, |
| | repetition_penalty=1.0, |
| | eos_token_id=tokenizer.eos_token_id, |
| | pad_token_id=tokenizer.pad_token_id, |
| | ) |
| | s = generation_output.sequences[0] |
| | output = tokenizer.decode(s,skip_special_tokens=True) |
| | output1 = output.split("[/INST]")[-1].strip() |
| | # print(output1) |
| | return output1 |
| | |
| | predict(text) |
| | output:你好!作为一个大型语言模型,我一直在学习和提高自己的能力。最近,我一直在努力学习新知识、改进算法,以便更好地回答用户的问题并提供帮助。同时,我也会定期接受人工智能专家的指导和评估,以确保我的表现不断提升。希望这些信息对你有所帮助! |
| | ``` |
| |
|