bbayrm0 commited on
Commit
4e9b22b
·
verified ·
1 Parent(s): 896b5f8

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +38 -8
README.md CHANGED
@@ -1,4 +1,3 @@
1
-
2
  ---
3
  base_model: meta-llama/Llama-2-8b-chat-hf
4
  library_name: peft
@@ -58,18 +57,32 @@ pip install torch transformers peft accelerate bitsandbytes
58
  ### Kullanım
59
 
60
  ```python
61
- from transformers import AutoTokenizer, AutoModelForCausalLM
62
  from peft import PeftModel
63
  import torch
64
 
 
 
 
 
 
 
 
 
65
  # Model ve tokenizer yükleme
66
  model_name = "your-username/llama-8b-mold-protection"
67
  tokenizer = AutoTokenizer.from_pretrained(model_name)
 
 
 
 
 
68
  model = AutoModelForCausalLM.from_pretrained(
69
  model_name,
70
  torch_dtype=torch.float16,
71
  device_map="auto",
72
- load_in_4bit=True
 
73
  )
74
 
75
  # Log analizi örneği
@@ -80,20 +93,37 @@ def analyze_log(log_entry):
80
 
81
  Açıklama ve Çözüm:"""
82
 
83
- inputs = tokenizer(prompt, return_tensors="pt")
 
 
 
 
 
 
 
 
 
 
84
 
85
  with torch.no_grad():
86
  outputs = model.generate(
87
  **inputs,
88
- max_length=512,
89
  temperature=0.7,
90
  do_sample=True,
91
  top_p=0.9,
92
- pad_token_id=tokenizer.eos_token_id
 
 
 
 
93
  )
94
 
95
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
96
- return response.split("Açıklama ve Çözüm:")[-1].strip()
 
 
 
97
 
98
  # Örnek kullanım
99
  log_sample = "2025-01-15 14:30:22 | MoldProtection | CRITICAL | KALIP KORUMA UYARISI - Hatalı ROI'ler: ROI 1, ROI 2 | Tetikleyici: plc"
 
 
1
  ---
2
  base_model: meta-llama/Llama-2-8b-chat-hf
3
  library_name: peft
 
57
  ### Kullanım
58
 
59
  ```python
60
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
61
  from peft import PeftModel
62
  import torch
63
 
64
+ # Quantization konfigürasyonu
65
+ quantization_config = BitsAndBytesConfig(
66
+ load_in_4bit=True,
67
+ bnb_4bit_compute_dtype=torch.float16,
68
+ bnb_4bit_quant_type="nf4",
69
+ bnb_4bit_use_double_quant=True,
70
+ )
71
+
72
  # Model ve tokenizer yükleme
73
  model_name = "your-username/llama-8b-mold-protection"
74
  tokenizer = AutoTokenizer.from_pretrained(model_name)
75
+
76
+ # Pad token ayarlama
77
+ if tokenizer.pad_token is None:
78
+ tokenizer.pad_token = tokenizer.eos_token
79
+
80
  model = AutoModelForCausalLM.from_pretrained(
81
  model_name,
82
  torch_dtype=torch.float16,
83
  device_map="auto",
84
+ quantization_config=quantization_config,
85
+ trust_remote_code=True
86
  )
87
 
88
  # Log analizi örneği
 
93
 
94
  Açıklama ve Çözüm:"""
95
 
96
+ # Input'ları modelle aynı cihaza taşı
97
+ inputs = tokenizer(
98
+ prompt,
99
+ return_tensors="pt",
100
+ padding=True,
101
+ truncation=True,
102
+ max_length=512
103
+ )
104
+
105
+ # Input tensor'ları GPU'ya taşı
106
+ inputs = {k: v.to(model.device) for k, v in inputs.items()}
107
 
108
  with torch.no_grad():
109
  outputs = model.generate(
110
  **inputs,
111
+ max_new_tokens=256, # max_length yerine max_new_tokens kullan
112
  temperature=0.7,
113
  do_sample=True,
114
  top_p=0.9,
115
+ top_k=50,
116
+ repetition_penalty=1.1,
117
+ pad_token_id=tokenizer.eos_token_id,
118
+ eos_token_id=tokenizer.eos_token_id,
119
+ early_stopping=True
120
  )
121
 
122
+ # Sadece yeni generate edilen kısmı al
123
+ generated_tokens = outputs[0][inputs['input_ids'].shape[1]:]
124
+ response = tokenizer.decode(generated_tokens, skip_special_tokens=True)
125
+
126
+ return response.strip()
127
 
128
  # Örnek kullanım
129
  log_sample = "2025-01-15 14:30:22 | MoldProtection | CRITICAL | KALIP KORUMA UYARISI - Hatalı ROI'ler: ROI 1, ROI 2 | Tetikleyici: plc"