daiweinan.thu commited on
Commit
967796e
·
1 Parent(s): 7575216
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. data0805/compute_ce_fsdp_recom.py +12 -12
  2. data0805/compute_ce_fsdp_recom_test.py +14 -14
  3. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h1.json +0 -0
  4. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h10.json +0 -0
  5. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h11.json +0 -0
  6. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h12.json +0 -0
  7. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h13.json +0 -0
  8. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h14.json +0 -0
  9. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h15.json +0 -0
  10. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h16.json +0 -0
  11. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h17.json +0 -0
  12. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h18.json +0 -0
  13. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h19.json +0 -0
  14. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h2.json +0 -0
  15. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h20.json +0 -0
  16. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h21.json +0 -0
  17. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h22.json +0 -0
  18. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h23.json +0 -0
  19. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h24.json +0 -0
  20. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h25.json +0 -0
  21. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h26.json +0 -0
  22. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h27.json +0 -0
  23. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h28.json +0 -0
  24. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h29.json +0 -0
  25. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h3.json +0 -0
  26. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h4.json +0 -0
  27. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h5.json +0 -0
  28. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h6.json +0 -0
  29. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h7.json +0 -0
  30. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h8.json +0 -0
  31. data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h9.json +0 -0
  32. data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h1.json +0 -0
  33. data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h10.json +0 -0
  34. data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h11.json +0 -0
  35. data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h12.json +0 -0
  36. data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h13.json +0 -0
  37. data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h14.json +0 -0
  38. data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h15.json +0 -0
  39. data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h16.json +0 -0
  40. data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h17.json +0 -0
  41. data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h18.json +0 -0
  42. data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h19.json +0 -0
  43. data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h2.json +0 -0
  44. data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h20.json +0 -0
  45. data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h21.json +0 -0
  46. data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h22.json +0 -0
  47. data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h23.json +0 -0
  48. data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h24.json +0 -0
  49. data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h25.json +0 -0
  50. data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h26.json +0 -0
data0805/compute_ce_fsdp_recom.py CHANGED
@@ -112,21 +112,21 @@ class NetflixRatingDataset(Dataset):
112
  @torch.inference_mode()
113
  def run(underlying_model, lm_head, dataset, output_path):
114
  dataloader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, drop_last=False, num_workers=0)
115
- print(dataloader.dataset[0])
116
- print("----hi---- start run")
117
  results = []
118
  for idx, batch in enumerate(tqdm(dataloader, desc="Inference")):
119
- print(f"----hi---- {idx} id")
120
  if "Error" in batch:
121
  # Skip invalid items
122
- print("----hi---- skip invalid items")
123
  continue
124
  # batch["input_ids"] = batch["input_ids"].to(model.device).to(torch.float16)
125
- print("----hi-----id "+ str(idx))
126
  hidden_states = underlying_model(
127
  input_ids=batch["input_ids"].to(underlying_model.device),
128
  ).last_hidden_state
129
- print("----hi----- calculate hiddenstate")
130
 
131
  for cur_input_ids, cur_hidden_state, cur_prefix_length in zip(batch['input_ids'], hidden_states, batch["prefix_length"]):
132
  cur_input_ids = cur_input_ids.to(underlying_model.device)
@@ -159,9 +159,9 @@ def run(underlying_model, lm_head, dataset, output_path):
159
  "losses": losses.cpu().float().numpy().tolist(),
160
  "history_round_len": dataset.history_round_len,
161
  })
162
- print("!!!!!!!!!!!!!!!!!!!-------------------------")
163
- print(results[0])
164
- break
165
  # Save results to JSON file
166
  os.makedirs(os.path.dirname(output_path), exist_ok=True)
167
  with open(output_path, 'w') as f:
@@ -184,7 +184,7 @@ def main():
184
  args.language = None
185
  # setup_distributed(rank, world_size)
186
 
187
- model_path = f"/data-share/lijiaqi/model/{args.model_path}"
188
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
189
 
190
  with init_empty_weights():
@@ -250,13 +250,13 @@ def main():
250
  language = args.language
251
  round_threshold = args.round_threshold
252
  if args.data_name == "netflix":
253
- dataset = NetflixRatingDataset("/data-share/lijiaqi/personlaw/data/netflix/ratings2000.csv","/data-share/lijiaqi/personlaw/data/netflix/movie_titles.csv",
254
  round_threshold=round_threshold,
255
  tokenizer=tokenizer,
256
  max_seq_len=args.max_seq_len,
257
  max_item_per_user=args.max_item_per_user,
258
  max_history_len=args.max_history_len,)
259
- output_filefolder = "/data-share/lijiaqi/personlaw/data/netflix/2000"
260
  else:
261
  raise NotImplementedError
262
  # for i in tqdm(range(round_threshold-1, 0, -1), desc="Main"):
 
112
  @torch.inference_mode()
113
  def run(underlying_model, lm_head, dataset, output_path):
114
  dataloader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, drop_last=False, num_workers=0)
115
+ # print(dataloader.dataset[0])
116
+ # print("----hi---- start run")
117
  results = []
118
  for idx, batch in enumerate(tqdm(dataloader, desc="Inference")):
119
+ # print(f"----hi---- {idx} id")
120
  if "Error" in batch:
121
  # Skip invalid items
122
+ # print("----hi---- skip invalid items")
123
  continue
124
  # batch["input_ids"] = batch["input_ids"].to(model.device).to(torch.float16)
125
+ # print("----hi-----id "+ str(idx))
126
  hidden_states = underlying_model(
127
  input_ids=batch["input_ids"].to(underlying_model.device),
128
  ).last_hidden_state
129
+ # print("----hi----- calculate hiddenstate")
130
 
131
  for cur_input_ids, cur_hidden_state, cur_prefix_length in zip(batch['input_ids'], hidden_states, batch["prefix_length"]):
132
  cur_input_ids = cur_input_ids.to(underlying_model.device)
 
159
  "losses": losses.cpu().float().numpy().tolist(),
160
  "history_round_len": dataset.history_round_len,
161
  })
162
+ # print("!!!!!!!!!!!!!!!!!!!-------------------------")
163
+ # print(results[0])
164
+ # break
165
  # Save results to JSON file
166
  os.makedirs(os.path.dirname(output_path), exist_ok=True)
167
  with open(output_path, 'w') as f:
 
184
  args.language = None
185
  # setup_distributed(rank, world_size)
186
 
187
+ model_path = args.model_path
188
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
189
 
190
  with init_empty_weights():
 
250
  language = args.language
251
  round_threshold = args.round_threshold
252
  if args.data_name == "netflix":
253
+ dataset = NetflixRatingDataset("ratings2000.csv","movie_titles.csv",
254
  round_threshold=round_threshold,
255
  tokenizer=tokenizer,
256
  max_seq_len=args.max_seq_len,
257
  max_item_per_user=args.max_item_per_user,
258
  max_history_len=args.max_history_len,)
259
+ output_filefolder = "netflix/2000"
260
  else:
261
  raise NotImplementedError
262
  # for i in tqdm(range(round_threshold-1, 0, -1), desc="Main"):
data0805/compute_ce_fsdp_recom_test.py CHANGED
@@ -72,8 +72,8 @@ class NetflixRatingDataset(Dataset):
72
  # exit()
73
  if len(history) < history_round_len:
74
  continue
75
- if len(history) > self.max_history_len:
76
- history = history.tail(self.max_history_len)
77
  # 构建 Prompt
78
  prefix = "历史评分记录:\n"
79
  for _, row in history.iterrows():
@@ -112,21 +112,21 @@ class NetflixRatingDataset(Dataset):
112
  @torch.inference_mode()
113
  def run(underlying_model, lm_head, dataset, output_path):
114
  dataloader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, drop_last=False, num_workers=0)
115
- print(dataloader.dataset[0])
116
- print("----hi---- start run")
117
  results = []
118
  for idx, batch in enumerate(tqdm(dataloader, desc="Inference")):
119
- print(f"----hi---- {idx} id")
120
  if "Error" in batch:
121
  # Skip invalid items
122
- print("----hi---- skip invalid items")
123
  continue
124
  # batch["input_ids"] = batch["input_ids"].to(model.device).to(torch.float16)
125
- print("----hi-----id "+ str(idx))
126
  hidden_states = underlying_model(
127
  input_ids=batch["input_ids"].to(underlying_model.device),
128
  ).last_hidden_state
129
- print("----hi----- calculate hiddenstate")
130
 
131
  for cur_input_ids, cur_hidden_state, cur_prefix_length in zip(batch['input_ids'], hidden_states, batch["prefix_length"]):
132
  cur_input_ids = cur_input_ids.to(underlying_model.device)
@@ -159,9 +159,9 @@ def run(underlying_model, lm_head, dataset, output_path):
159
  "losses": losses.cpu().float().numpy().tolist(),
160
  "history_round_len": dataset.history_round_len,
161
  })
162
- print("!!!!!!!!!!!!!!!!!!!-------------------------")
163
- print(results[0])
164
- break
165
  # Save results to JSON file
166
  os.makedirs(os.path.dirname(output_path), exist_ok=True)
167
  with open(output_path, 'w') as f:
@@ -184,7 +184,7 @@ def main():
184
  args.language = None
185
  # setup_distributed(rank, world_size)
186
 
187
- model_path = f"/data-share/lijiaqi/model/{args.model_path}"
188
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
189
 
190
  with init_empty_weights():
@@ -250,13 +250,13 @@ def main():
250
  language = args.language
251
  round_threshold = args.round_threshold
252
  if args.data_name == "netflix":
253
- dataset = NetflixRatingDataset("/data-share/lijiaqi/personlaw/data/netflix/ratings100.csv","/data-share/lijiaqi/personlaw/data/netflix/movie_titles.csv",
254
  round_threshold=round_threshold,
255
  tokenizer=tokenizer,
256
  max_seq_len=args.max_seq_len,
257
  max_item_per_user=args.max_item_per_user,
258
  max_history_len=args.max_history_len,)
259
- output_filefolder = "/data-share/lijiaqi/personlaw/data/netflix/100"
260
  else:
261
  raise NotImplementedError
262
  # for i in tqdm(range(round_threshold-1, 0, -1), desc="Main"):
 
72
  # exit()
73
  if len(history) < history_round_len:
74
  continue
75
+ if len(history) > self.history_round_len:
76
+ history = history.tail(self.history_round_len)
77
  # 构建 Prompt
78
  prefix = "历史评分记录:\n"
79
  for _, row in history.iterrows():
 
112
  @torch.inference_mode()
113
  def run(underlying_model, lm_head, dataset, output_path):
114
  dataloader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, drop_last=False, num_workers=0)
115
+ # print(dataloader.dataset[0])
116
+ # print("----hi---- start run")
117
  results = []
118
  for idx, batch in enumerate(tqdm(dataloader, desc="Inference")):
119
+ # print(f"----hi---- {idx} id")
120
  if "Error" in batch:
121
  # Skip invalid items
122
+ # print("----hi---- skip invalid items")
123
  continue
124
  # batch["input_ids"] = batch["input_ids"].to(model.device).to(torch.float16)
125
+ # print("----hi-----id "+ str(idx))
126
  hidden_states = underlying_model(
127
  input_ids=batch["input_ids"].to(underlying_model.device),
128
  ).last_hidden_state
129
+ # print("----hi----- calculate hiddenstate")
130
 
131
  for cur_input_ids, cur_hidden_state, cur_prefix_length in zip(batch['input_ids'], hidden_states, batch["prefix_length"]):
132
  cur_input_ids = cur_input_ids.to(underlying_model.device)
 
159
  "losses": losses.cpu().float().numpy().tolist(),
160
  "history_round_len": dataset.history_round_len,
161
  })
162
+ # print("!!!!!!!!!!!!!!!!!!!-------------------------")
163
+ # print(results[0])
164
+ # break
165
  # Save results to JSON file
166
  os.makedirs(os.path.dirname(output_path), exist_ok=True)
167
  with open(output_path, 'w') as f:
 
184
  args.language = None
185
  # setup_distributed(rank, world_size)
186
 
187
+ model_path = args.model_path
188
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
189
 
190
  with init_empty_weights():
 
250
  language = args.language
251
  round_threshold = args.round_threshold
252
  if args.data_name == "netflix":
253
+ dataset = NetflixRatingDataset("ratings100.csv","movie_titles.csv",
254
  round_threshold=round_threshold,
255
  tokenizer=tokenizer,
256
  max_seq_len=args.max_seq_len,
257
  max_item_per_user=args.max_item_per_user,
258
  max_history_len=args.max_history_len,)
259
+ output_filefolder = "data/netflix/100"
260
  else:
261
  raise NotImplementedError
262
  # for i in tqdm(range(round_threshold-1, 0, -1), desc="Main"):
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h1.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h10.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h11.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h12.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h13.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h14.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h15.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h16.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h17.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h18.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h19.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h2.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h20.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h21.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h22.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h23.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h24.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h25.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h26.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h27.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h28.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h29.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h3.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h4.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h5.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h6.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h7.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h8.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-0.5B/round30-l.English/sub100/h9.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h1.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h10.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h11.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h12.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h13.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h14.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h15.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h16.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h17.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h18.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h19.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h2.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h20.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h21.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h22.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h23.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h24.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h25.json ADDED
The diff for this file is too large to render. See raw diff
 
data0805/data/netflix/100/Qwen/Qwen2.5-1.5B/round30-l.English/sub100/h26.json ADDED
The diff for this file is too large to render. See raw diff