bbayrm0 commited on
Commit
9dcb089
·
verified ·
1 Parent(s): 4e9b22b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +33 -71
README.md CHANGED
@@ -48,87 +48,49 @@ Kalıp koruma sistemlerinden gelen log kayıtlarını analiz ederek:
48
 
49
  ## 🚀 Hızlı Başlangıç
50
 
51
- ### Kurulum
52
 
53
  ```bash
54
- pip install torch transformers peft accelerate bitsandbytes
 
 
 
 
 
55
  ```
56
 
57
  ### Kullanım
58
 
59
  ```python
60
- from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
61
- from peft import PeftModel
62
  import torch
63
-
64
- # Quantization konfigürasyonu
65
- quantization_config = BitsAndBytesConfig(
66
- load_in_4bit=True,
67
- bnb_4bit_compute_dtype=torch.float16,
68
- bnb_4bit_quant_type="nf4",
69
- bnb_4bit_use_double_quant=True,
70
- )
71
-
72
- # Model ve tokenizer yükleme
73
- model_name = "your-username/llama-8b-mold-protection"
74
- tokenizer = AutoTokenizer.from_pretrained(model_name)
75
-
76
- # Pad token ayarlama
77
- if tokenizer.pad_token is None:
78
- tokenizer.pad_token = tokenizer.eos_token
79
-
80
- model = AutoModelForCausalLM.from_pretrained(
81
- model_name,
82
- torch_dtype=torch.float16,
83
- device_map="auto",
84
- quantization_config=quantization_config,
85
- trust_remote_code=True
86
  )
87
 
88
- # Log analizi örneği
89
- def analyze_log(log_entry):
90
- prompt = f"""Kalıp koruma sistemi log girişini analiz et ve detaylı açıklama ile çözüm önerisi sun:
91
-
92
- {log_entry}
93
-
94
- Açıklama ve Çözüm:"""
95
-
96
- # Input'ları modelle aynı cihaza taşı
97
- inputs = tokenizer(
98
- prompt,
99
- return_tensors="pt",
100
- padding=True,
101
- truncation=True,
102
- max_length=512
103
- )
104
-
105
- # Input tensor'ları GPU'ya taşı
106
- inputs = {k: v.to(model.device) for k, v in inputs.items()}
107
-
108
- with torch.no_grad():
109
- outputs = model.generate(
110
- **inputs,
111
- max_new_tokens=256, # max_length yerine max_new_tokens kullan
112
- temperature=0.7,
113
- do_sample=True,
114
- top_p=0.9,
115
- top_k=50,
116
- repetition_penalty=1.1,
117
- pad_token_id=tokenizer.eos_token_id,
118
- eos_token_id=tokenizer.eos_token_id,
119
- early_stopping=True
120
- )
121
-
122
- # Sadece yeni generate edilen kısmı al
123
- generated_tokens = outputs[0][inputs['input_ids'].shape[1]:]
124
- response = tokenizer.decode(generated_tokens, skip_special_tokens=True)
125
-
126
- return response.strip()
127
-
128
- # Örnek kullanım
129
- log_sample = "2025-01-15 14:30:22 | MoldProtection | CRITICAL | KALIP KORUMA UYARISI - Hatalı ROI'ler: ROI 1, ROI 2 | Tetikleyici: plc"
130
- result = analyze_log(log_sample)
131
- print(result)
132
  ```
133
 
134
  ## 📈 Training Detayları
 
48
 
49
  ## 🚀 Hızlı Başlangıç
50
 
51
+ ### Google Colabda Kurulum
52
 
53
  ```bash
54
+ %%capture
55
+ !pip install unsloth
56
+
57
+ # Also get the latest nightly Unsloth!
58
+
59
+ !pip uninstall unsloth -y && pip install --upgrade --no-cache-dir --no-deps git+https://github.com/unslothai/unsloth.git@nightly git+https://github.com/unslothai/unsloth-zoo.git
60
  ```
61
 
62
  ### Kullanım
63
 
64
  ```python
65
+ from unsloth import FastLanguageModel
 
66
  import torch
67
+ max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
68
+ dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
69
+ load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
70
+
71
+ model, tokenizer = FastLanguageModel.from_pretrained(
72
+ model_name = "bbayrm0/lora_model",
73
+ max_seq_length = max_seq_length,
74
+ dtype = dtype,
75
+ load_in_4bit = load_in_4bit
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  )
77
 
78
+ FastLanguageModel.for_inference(model) # Enable native 2x faster inference
79
+
80
+ messages = [
81
+ {"role": "user", "content": "2025-09-01 11:25:55 | MoldProtection | CRITICAL | KALIP KORUMA UYARISI - Hatalı ROI'ler: ROI 2, ROI 3, ROI 4 | Tetikleyici: manual"},
82
+ ]
83
+ inputs = tokenizer.apply_chat_template(
84
+ messages,
85
+ tokenize = True,
86
+ add_generation_prompt = True, # Must add for generation
87
+ return_tensors = "pt",
88
+ ).to("cuda")
89
+
90
+ from transformers import TextStreamer
91
+ text_streamer = TextStreamer(tokenizer, skip_prompt = True)
92
+ _ = model.generate(input_ids = inputs, streamer = text_streamer, max_new_tokens = 250,
93
+ use_cache = True, temperature = 1, min_p = 0.1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  ```
95
 
96
  ## 📈 Training Detayları