HayatoHongo commited on
Commit
b4da420
·
verified ·
1 Parent(s): afdc1bb

Upload 本郷颯人_ChatGPT利用.csv

Browse files
Files changed (1) hide show
  1. 本郷颯人_ChatGPT利用.csv +535 -0
本郷颯人_ChatGPT利用.csv ADDED
@@ -0,0 +1,535 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ChatGPTやその他のLLMに対して実際に送っているプロンプトのデータを大規模に集めたアンケートとかデータセットを調べて。英語で
2
+ dollyっていうデータセットの日本語版を調べて
3
+
4
+ "config_dict = config.__class__.__dict__.items()
5
+ print(config_dict)
6
+
7
+ # ModelConfigクラスを辞書に変換
8
+ config_dict_raw = {key: value for key, value in config_dict}
9
+ print(config_dict_raw)
10
+
11
+ ---------------------------------------------------------------------------
12
+ ValueError Traceback (most recent call last)
13
+ /tmp/ipython-input-1660747023.py in <cell line: 0>()
14
+ 1 # ModelConfigクラスを辞書に変換
15
+ ----> 2 config_dict_raw = {key: value for key, value in config_dict}
16
+ 3 print(config_dict_raw)
17
+
18
+ ValueError: too many values to unpack (expected 2)"
19
+ finewebとかweb corpus をクロールするような ipynbファイルのnotebook形式の教材を探してください
20
+ "以下について、擬似データサンプルを1つ用意して、コード1行ごとにそのサンプルがどう変化していくかをコメントアウトで示して
21
+
22
+ from datasketch import MinHash, MinHashLSH
23
+
24
+ lsh = MinHashLSH(threshold=0.8, num_perm=128)
25
+ minhash_store = {}
26
+
27
+ def is_duplicate(text: str) -> bool:
28
+ m = MinHash(num_perm=128)
29
+ for w in text.split():
30
+ m.update(w.encode(""utf8""))
31
+ # 既存と似ていれば重複とみなす
32
+ if lsh.query(m):
33
+ return True
34
+ # 初めてなら登録
35
+ key = str(len(minhash_store))
36
+ lsh.insert(key, m)
37
+ minhash_store[key] = True
38
+ return False
39
+
40
+ # 具体例に適用(最初は False のはず)
41
+ print(""is_duplicate? "", is_duplicate(raw_text))
42
+ # 同じテキストでもう一度聞くと True になる
43
+ print(""is_duplicate again? "", is_duplicate(raw_text))"
44
+ "以下のnotebookを原稿にしつつ、初心者フレンドリーに簡素化して欲しい
45
+
46
+ # 現在のスタイル
47
+ あらかじめガッツリ関数を決めてから、大量の具体例で実践する
48
+
49
+ 上記は順番としてわかりづらい。
50
+
51
+ # 理想のスタイル
52
+
53
+ 具体例は1つだけに絞り、一番最初に用意する。
54
+
55
+ 関数を1つずつ定義しながら、徐々に最初の具体例に対して適用していく
56
+
57
+
58
+ # 原稿
59
+
60
+
61
+ import numpy as np # linear algebra
62
+ import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
63
+ import os
64
+ for dirname, _, filenames in os.walk('/kaggle/input'):
65
+ for filename in filenames:
66
+ print(os.path.join(dirname, filename))
67
+
68
+
69
+ !pip install requests beautifulsoup4 langdetect datasketch fake_useragent
70
+
71
+ import re
72
+ import time
73
+ import requests
74
+ from bs4 import BeautifulSoup
75
+ from langdetect import detect
76
+ from datasketch import MinHash, MinHashLSH
77
+ from urllib.parse import urlparse, urljoin
78
+ from fake_useragent import UserAgent
79
+
80
+ # MinHash LSH for deduplication
81
+ minhash_lsh = MinHashLSH(threshold=0.8, num_perm=128)
82
+ minhash_store = {}
83
+
84
+ # User-Agent Rotator to Avoid Blocking
85
+ ua = UserAgent()
86
+
87
+ # Crawling settings
88
+ MAX_PAGES = 20 # Limit number of pages to crawl
89
+ CRAWL_DEPTH = 2 # How deep to crawl links
90
+
91
+ visited_urls = set()
92
+
93
+ # Crawl theweb dynamically
94
+ def crawl_web(seed_urls, depth=CRAWL_DEPTH):
95
+ """"""Crawls web starting from seed URLs.""""""
96
+ to_crawl = [(url, 0) for url in seed_urls] # (URL, depth)
97
+ crawled_data = []
98
+
99
+ while to_crawl and len(crawled_data) < MAX_PAGES:
100
+ url, current_depth = to_crawl.pop(0)
101
+
102
+ if url in visited_urls or current_depth > depth:
103
+ continue
104
+
105
+ visited_urls.add(url)
106
+ print(f""Crawling: {url}"")
107
+
108
+ text, links = extract_text_and_links(url)
109
+ if text:
110
+ crawled_data.append(text)
111
+
112
+ if current_depth < depth:
113
+ to_crawl.extend([(link, current_depth + 1) for link in links if link not in visited_urls])
114
+
115
+ return crawled_data
116
+
117
+ # Extracting text and links from page
118
+ def extract_text_and_links(url):
119
+ """"""Fetch text content and discover new links from a webpage.""""""
120
+ try:
121
+ headers = {""User-Agent"": ua.random}
122
+ response = requests.get(url, headers=headers, timeout=5)
123
+ response.raise_for_status()
124
+
125
+ soup = BeautifulSoup(response.text, ""html.parser"")
126
+ text = "" "".join(p.get_text() for p in soup.find_all(""p""))
127
+ links = {urljoin(url, a[""href""]) for a in soup.find_all(""a"", href=True)}
128
+
129
+ return text.strip(), links
130
+ except requests.RequestException:
131
+ return """", set()
132
+
133
+ # Language Filtering
134
+ def filter_language(text, target_lang=""en""):
135
+ try:
136
+ return detect(text) == target_lang
137
+ except:
138
+ return False
139
+
140
+ # Step 3: Gopher Filtering - Remove unwanted words
141
+ UNWANTED_KEYWORDS = [""click here"", ""advertisement"", ""sign up"", ""subscribe""]
142
+
143
+ def gopher_filter(text):
144
+ return not any(keyword in text.lower() for keyword in UNWANTED_KEYWORDS)
145
+
146
+ # MinHash Deduplication
147
+ def is_duplicate(text):
148
+ m = MinHash(num_perm=128)
149
+ for word in text.split():
150
+ m.update(word.encode(""utf8""))
151
+
152
+ if len(minhash_lsh.query(m)) > 0:
153
+ return True
154
+
155
+ key = len(minhash_store)
156
+ minhash_lsh.insert(str(key), m)
157
+ minhash_store[key] = text
158
+ return False
159
+
160
+ # Step 5: C4 Filters - Remove low-quality text
161
+ def c4_filters(text):
162
+ return len(text.split()) > 50 # Keep meaningful content
163
+
164
+ # Step 6: PII Removal - Emails, phone numbers, etc.
165
+ def remove_pii(text):
166
+ text = re.sub(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,7}\b', ""[EMAIL REDACTED]"", text)
167
+ text = re.sub(r'\b\d{10,13}\b', ""[PHONE REDACTED]"", text)
168
+ return text
169
+
170
+ # Pipeline Execution
171
+ def fineweb_pipeline(seed_urls):
172
+ """"""Crawl web and process text through the FineWeb pipeline.""""""
173
+ raw_texts = crawl_web(seed_urls)
174
+ clean_texts = []
175
+
176
+ for text in raw_texts:
177
+ if not filter_language(text):
178
+ continue
179
+ if not gopher_filter(text):
180
+ continue
181
+ if is_duplicate(text):
182
+ continue
183
+ if not c4_filters(text):
184
+ continue
185
+
186
+ clean_texts.append(remove_pii(text))
187
+
188
+ return clean_texts
189
+
190
+ # Use Case
191
+ seed_urls = [
192
+ ""https://en.wikipedia.org/wiki/Web_crawler"",
193
+ ""https://www.bbc.com/news""
194
+ ]
195
+
196
+ clean_data = fineweb_pipeline(seed_urls)
197
+
198
+ for i, text in enumerate(clean_data):
199
+ print(f""\n--- Processed Text {i+1} ---\n{text[:500]}"") # First 500 chars
200
+
201
+
202
+ import re
203
+ import time
204
+ import requests
205
+ import pandas as pd
206
+ from bs4 import BeautifulSoup
207
+ from langdetect import detect
208
+ from datasketch import MinHash, MinHashLSH
209
+ from urllib.parse import urlparse, urljoin
210
+ from fake_useragent import UserAgent
211
+
212
+ # MinHash LSH for deduplication
213
+ minhash_lsh = MinHashLSH(threshold=0.8, num_perm=128)
214
+ minhash_store = {}
215
+
216
+ # User-Agent Rotator to Avoid Blocking
217
+ ua = UserAgent()
218
+
219
+ # Crawling settings
220
+ MAX_PAGES = 100 # Maximum number of pages to crawl
221
+ CRAWL_DEPTH = 2 # How deep to crawl links
222
+ SEED_URLS = [
223
+ ""https://en.wikipedia.org/wiki/Web_crawler"",
224
+ ""https://www.bbc.com/news"",
225
+ ""https://www.cnn.com"",
226
+ ""https://www.theguardian.com/international"",
227
+ ""https://www.nytimes.com""
228
+ ]
229
+
230
+ visited_urls = set()
231
+
232
+ # Web Crawler
233
+ def crawl_web(seed_urls, depth=CRAWL_DEPTH):
234
+ """"""Crawls the web starting from given seed URLs.""""""
235
+ to_crawl = [(url, 0) for url in seed_urls] # (URL, depth)
236
+ crawled_data = []
237
+
238
+ while to_crawl and len(crawled_data) < MAX_PAGES:
239
+ url, current_depth = to_crawl.pop(0)
240
+
241
+ if url in visited_urls or current_depth > depth:
242
+ continue
243
+
244
+ visited_urls.add(url)
245
+ print(f""Crawling: {url}"")
246
+
247
+ text, links = extract_text_and_links(url)
248
+ if text:
249
+ crawled_data.append({""url"": url, ""text"": text})
250
+
251
+ if current_depth < depth:
252
+ to_crawl.extend([(link, current_depth + 1) for link in links if link not in visited_urls])
253
+
254
+ return crawled_data
255
+
256
+ # Extract text and links from page
257
+ def extract_text_and_links(url):
258
+ """"""Fetch text content and discover new links from a webpage.""""""
259
+ try:
260
+ headers = {""User-Agent"": ua.random}
261
+ response = requests.get(url, headers=headers, timeout=5)
262
+ response.raise_for_status()
263
+
264
+ soup = BeautifulSoup(response.text, ""html.parser"")
265
+ text = "" "".join(p.get_text() for p in soup.find_all(""p""))
266
+ links = {urljoin(url, a[""href""]) for a in soup.find_all(""a"", href=True)}
267
+
268
+ return text.strip(), links
269
+ except requests.RequestException:
270
+ return """", set()
271
+
272
+ # Language Filtering
273
+ def filter_language(text, target_lang=""en""):
274
+ try:
275
+ return detect(text) == target_lang
276
+ except:
277
+ return False
278
+
279
+ # Gopher Filtering - Removal of unwanted words
280
+ UNWANTED_KEYWORDS = [""click here"", ""advertisement"", ""sign up"", ""subscribe""]
281
+
282
+ def gopher_filter(text):
283
+ return not any(keyword in text.lower() for keyword in UNWANTED_KEYWORDS)
284
+
285
+ # MinHash Deduplication
286
+ def is_duplicate(text):
287
+ m = MinHash(num_perm=128)
288
+ for word in text.split():
289
+ m.update(word.encode(""utf8""))
290
+
291
+ if len(minhash_lsh.query(m)) > 0:
292
+ return True
293
+
294
+ key = len(minhash_store)
295
+ minhash_lsh.insert(str(key), m)
296
+ minhash_store[key] = text
297
+ return False
298
+
299
+ # C4 Filters - Removal of low-quality text
300
+ def c4_filters(text):
301
+ return len(text.split()) > 50 # Only eaningful content
302
+
303
+ # PII Removal - Emails, phone numbers, etc.
304
+ def remove_pii(text):
305
+ text = re.sub(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,7}\b', ""[EMAIL REDACTED]"", text)
306
+ text = re.sub(r'\b\d{10,13}\b', ""[PHONE REDACTED]"", text)
307
+ return text
308
+
309
+ # Pipeline Execution
310
+ def fineweb_pipeline(seed_urls):
311
+ """"""Crawl web & process text through FineWeb pipeline.""""""
312
+ raw_data = crawl_web(seed_urls)
313
+ clean_data = []
314
+
315
+ for entry in raw_data:
316
+ text = entry[""text""]
317
+ url = entry[""url""]
318
+
319
+ if not filter_language(text):
320
+ continue
321
+ if not gopher_filter(text):
322
+ continue
323
+ if is_duplicate(text):
324
+ continue
325
+ if not c4_filters(text):
326
+ continue
327
+
328
+ clean_text = remove_pii(text)
329
+ clean_data.append({""url"": url, ""text"": clean_text})
330
+
331
+ return clean_data
332
+
333
+
334
+ def save_dataset(data, filename=""fineweb_dataset""):
335
+ """"""Save data in CSV and JSON formats.""""""
336
+ df = pd.DataFrame(data)
337
+ df.to_csv(f""{filename}.csv"", index=False, encoding=""utf-8"")
338
+ df.to_json(f""{filename}.json"", orient=""records"", indent=4)
339
+
340
+ print(f""\nDataset saved as {filename}.csv and {filename}.json"")
341
+
342
+ # Use Case
343
+ clean_data = fineweb_pipeline(SEED_URLS)
344
+
345
+
346
+ save_dataset(clean_data)
347
+
348
+
349
+ for i, entry in enumerate(clean_data[:3]):
350
+ print(f""\n--- Processed Text {i+1} from {entry['url']} ---\n{text[:500]}"") # First 500 chars"
351
+ ChatGPTは1日にどれだけのトークンを生成していますか?
352
+ 愛知 名古屋以外のふつうの町 アウトドア 雨の日 できること考えて
353
+ レポ技術同人誌ってなんですか
354
+ いっぺんの長さが1センチメートルから
355
+ Streamlitで無料でデプロイできるのってどのくらいのサイズの言語もでるまで?
356
+ wandbのチームプランについて、学生向けライセンスやアカウントでチームプランって組める?
357
+ お金のかからないレクリエーション
358
+ microsoftのAI研究員の数を調べて
359
+ 東大のMIYABI GPU パソコンを学生が使う方法と料金をブラウザで検索して
360
+ 今日ドライブ旅行してるんだけど、前日にわくわくしすぎてずっと目が覚めてて、1.5時間くらいしか寝られなかった、、
361
+ "# ModelConfigクラスを辞書に変換
362
+ config_dict = {k: v for k, v in config.__class__.__dict__.items() if not k.startswith(""__"")}
363
+ print(config_dict)"
364
+ "import pandas as pd
365
+ # pandas の DataFrame に変換
366
+ df = pd.DataFrame(results)"
367
+ "以下の文章を、具体的な意味が伝わるようにしつつも、もっと簡潔にしてほしい
368
+
369
+ `train`関数がどんどん長くなってますね。
370
+
371
+ 特に、条件分岐`if`文が増えて読みづらくなっています。
372
+
373
+ コードの可読性を高めるために、リファクタリングを行います。
374
+
375
+ 条件分岐`if`文を減らすために、最後のステップと評価ステップを揃えたいと思います。
376
+
377
+ `for step in range(self.config.total_training_steps):`
378
+
379
+ だったので、stepは`0`から`self.config.total_training_steps-1`の範囲でした。
380
+
381
+ 例えば`total_training_steps`が1,000なら、最後のstepは999となり、`evaluation_frequency`が100であれば、評価対象である100の倍数になりません。
382
+
383
+ ここで、`for step in range(self.config.total_training_step+1):`にすることで、最後のステップと評価ステップを揃えます。
384
+
385
+ なお、この方法は`total_training_step`が`evaluation_frequency`の倍数でない場合は成立しないのですが、(例えば`total_training_step`が1050)で`evaluation_frequency`が100とか、通常はそういうことは起きないので気にしなくて大丈夫です。"
386
+ "50分くらいで見られるコンテンツとかなんか調べて教えて
387
+ 世界中の人にとって意味があるやつで"
388
+ "import time
389
+
390
+ class Trainer:
391
+ def __init__(self, model, optimizer, data_loader, config):
392
+ self.model = model
393
+ self.optimizer = optimizer
394
+ self.data_loader = data_loader
395
+ self.config = config
396
+
397
+ self.steps = []
398
+ self.train_losses = []
399
+ self.val_losses = []
400
+ ########## NEW ##########
401
+ self.total_seen_tokens_list = []
402
+ self.total_train_time_list = []
403
+ ########## NEW ##########
404
+
405
+ def train_step(self):
406
+ # トレーニング用バッチを取得。
407
+ input_batch, target_batch = self.data_loader.get_batch('train')
408
+ self.optimizer.zero_grad()
409
+
410
+ # モデルの順伝播と損失計算
411
+ logits, loss = self.model(input_batch, target_batch)
412
+ loss.backward() # 誤差逆伝播
413
+ self.optimizer.step() # パラメータ更新
414
+
415
+ return loss.item() # 損失の値を返す
416
+
417
+ def evaluate(self):
418
+ self.model.eval() # 評価モードに切り替え
419
+ losses = {""train"": [], ""val"": []} # 学習・検証データ両方の損失を計算
420
+ with torch.no_grad():
421
+ for split in ['train', 'val']:
422
+ for _ in range(self.config.evaluation_loops):
423
+ input_batch, target_batch = self.data_loader.get_batch(split)
424
+ _, loss = self.model(input_batch, target_batch)
425
+ losses[split].append(loss.item())
426
+ self.model.train() # 再び学習モードへ戻す
427
+
428
+ # 各データセット(train, val)での損失の平均を計算して返す
429
+ return {split: sum(values) / len(values) for split, values in losses.items()}
430
+
431
+ def train(self):
432
+ # configで指定された回数だけtrain_stepを実行する。
433
+ for step in range(self.config.total_training_steps):
434
+ # 100回ごと、または最終ステップのみ評価する。
435
+ if step % self.config.evaluation_frequency == 0 or step == self.config.total_training_steps - 1:
436
+ # step==0 は last_eval_end_time 未定義のため除外。最終ステップは途中計測になる可能性があるため除外。
437
+ if step == 0 or step == self.config.total_training_steps - 1:
438
+ tokens_per_second = None
439
+ ########## NEW ##########
440
+ # 最初のステップ
441
+ if step == 0:
442
+ total_train_time = 0
443
+ # 最終ステップ
444
+ else:
445
+ current_time = time.time()
446
+ interval_from_last_eval = current_time - last_eval_end_time
447
+ total_train_time += interval_from_last_eval
448
+ ########## NEW ##########
449
+ else:
450
+ current_eval_start_time = time.time()
451
+ evaluation_interval = current_eval_start_time - last_eval_end_time
452
+ ########## NEW ##########
453
+ total_train_time += evaluation_interval
454
+ ########## NEW ##########
455
+ tokens_per_evaluation_interval = self.config.batch_size * self.config.input_sequence_length * self.config.evaluation_frequency
456
+ tokens_per_second = tokens_per_evaluation_interval / evaluation_interval
457
+
458
+ eval_loss = self.evaluate()
459
+
460
+ ########## NEW ##########
461
+ total_seen_tokens = self.config.batch_size * self.config.input_sequence_length * step
462
+ ########## NEW ##########
463
+
464
+ ########## NEW ##########
465
+ print(
466
+ f""step {step:05d} | ""
467
+ f""train loss {eval_loss['train']:.4f} | ""
468
+ f""val loss {eval_loss['val']:.4f} | ""
469
+ f""tok/s {int(tokens_per_second) if tokens_per_second is not None else 'None'} | ""
470
+ f""tokens {total_seen_tokens:,} | ""
471
+ f""time {total_train_time:.2f}s""
472
+ )
473
+ ########## NEW ##########
474
+
475
+ self.steps.append(step)
476
+ self.train_losses.append(eval_loss['train'])
477
+ self.val_losses.append(eval_loss['val'])
478
+ ########## NEW ##########
479
+ self.total_seen_tokens_list.append(total_seen_tokens)
480
+ self.total_train_time_list.append(total_train_time)
481
+ ########## NEW ##########
482
+
483
+ # この評価が終わった時間を記録する。次の評価開始時との時間差が`evaluation_interval`となる。
484
+ last_eval_end_time = time.time()
485
+
486
+ # 1回の学習ステップ(毎回行う主な処理)
487
+ train_loss = self.train_step()
488
+
489
+
490
+ def new_train(self):
491
+ # (configで指定された回数+1)だけtrain_stepを実行する。)
492
+ ########## NEW ##########
493
+ for step in range(self.config.total_training_steps+1):
494
+ ########## NEW ##########
495
+ # 100回ごとに評価する。
496
+ ########## NEW ##########
497
+ if step % self.config.evaluation_frequency == 0:
498
+ ########## NEW ##########
499
+
500
+ ########## NEW ##########
501
+ if step == 0:
502
+ tokens_per_second = None
503
+ total_train_time = 0
504
+ ########## NEW ##########
505
+ else:
506
+ current_eval_start_time = time.time()
507
+ evaluation_interval = current_eval_start_time - last_eval_end_time
508
+ total_train_time += evaluation_interval
509
+ tokens_per_evaluation_interval = self.config.batch_size * self.config.input_sequence_length * self.config.evaluation_frequency
510
+ tokens_per_second = tokens_per_evaluation_interval / evaluation_interval
511
+
512
+ eval_loss = self.evaluate()
513
+ total_seen_tokens = self.config.batch_size * self.config.input_sequence_length * step
514
+
515
+ print(
516
+ f""step {step:05d} | ""
517
+ f""train loss {eval_loss['train']:.4f} | ""
518
+ f""val loss {eval_loss['val']:.4f} | ""
519
+ f""tok/s {int(tokens_per_second) if tokens_per_second is not None else 'None'} | ""
520
+ f""tokens {total_seen_tokens:,} | ""
521
+ f""time {total_train_time:.2f}s""
522
+ )
523
+
524
+ self.steps.append(step)
525
+ self.train_losses.append(eval_loss['train'])
526
+ self.val_losses.append(eval_loss['val'])
527
+ self.total_seen_tokens_list.append(total_seen_tokens)
528
+ self.total_train_time_list.append(total_train_time)
529
+
530
+ # この評価が終わった時間を記録する。次の評価開始時との時間差が`evaluation_interval`となる。
531
+ last_eval_end_time = time.time()
532
+
533
+ # 1回の学習ステップ(毎回行う主な処理)
534
+ train_loss = self.train_step()"
535
+ Adam optimizerを数式で教えて