my2000cup commited on
Commit
936e95e
·
1 Parent(s): de4fc1e

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +27 -39
README.md CHANGED
@@ -71,52 +71,40 @@ Users (both direct and downstream) should be made aware of the risks, biases and
71
 
72
  ## How to Get Started with the Model
73
  ```python
74
- import torch
75
- from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
76
- from peft import PeftModel
77
-
78
- # Load the base model and tokenizer
79
- base_model_id = "deepseek-ai/deepseek-r1-distill-qwen-7b"
80
- tokenizer = AutoTokenizer.from_pretrained(base_model_id, trust_remote_code=True)
81
- tokenizer.pad_token = tokenizer.eos_token
82
-
83
- # Load the base model
84
- base_model = AutoModelForCausalLM.from_pretrained(
85
- base_model_id,
86
- device_map="auto",
87
- trust_remote_code=True,
88
- torch_dtype=torch.float16
89
- )
90
 
91
- # Load the fine-tuned LoRA adapter
92
- adapter_path = "your-username/deepseek-medical-reasoner" # Replace with actual model path
93
- model = PeftModel.from_pretrained(
94
- base_model,
95
- adapter_path,
96
- torch_dtype=torch.float16,
97
  device_map="auto"
98
  )
99
-
100
- # Create a text generation pipeline
101
- pipe = pipeline(
102
- "text-generation",
103
- model=model,
104
- tokenizer=tokenizer,
105
- max_new_tokens=512,
106
- temperature=0.6,
107
- top_p=0.95,
108
- repetition_penalty=1.15
 
109
  )
 
110
 
111
- # Example usage
112
- prompt = """Please reason step by step:
 
 
 
 
 
113
 
114
- A 45-year-old patient presents with sudden onset chest pain, shortness of breath, and anxiety.
115
- The pain is described as sharp and worsens with deep breathing.
116
- What is the most likely diagnosis and what immediate tests should be ordered?"""
117
 
118
- result = pipe(prompt)
119
- print(result[0]["generated_text"])
120
  ```
121
  [More Information Needed]
122
 
 
71
 
72
  ## How to Get Started with the Model
73
  ```python
74
+ from transformers import AutoModelForCausalLM, AutoTokenizer
75
+
76
+ model_name = "./new"
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
+ model = AutoModelForCausalLM.from_pretrained(
79
+ model_name,
80
+ torch_dtype="auto",
 
 
 
81
  device_map="auto"
82
  )
83
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
84
+
85
+ prompt = "白内障手术后视力改善的标准是什么?"
86
+ messages = [
87
+ {"role": "system", "content": "你是白内障手术医生,请回答患者的问题."},
88
+ {"role": "user", "content": prompt}
89
+ ]
90
+ text = tokenizer.apply_chat_template(
91
+ messages,
92
+ tokenize=False,
93
+ add_generation_prompt=True
94
  )
95
+ model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
96
 
97
+ generated_ids = model.generate(
98
+ **model_inputs,
99
+ max_new_tokens=512
100
+ )
101
+ generated_ids = [
102
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
103
+ ]
104
 
105
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
106
+ print(response)
 
107
 
 
 
108
  ```
109
  [More Information Needed]
110