MurathanKurfali commited on
Commit
0c22e02
·
1 Parent(s): 57d170c

welcome scidcc

Browse files
.gitattributes CHANGED
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ *.csv filter=lfs diff=lfs merge=lfs -text
SciDCC/prepare_scidcc.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ from sklearn.model_selection import train_test_split
4
+
5
+ # Load the CSV file
6
+ df = pd.read_csv("SciDCC.csv")
7
+
8
+ # Drop unnecessary columns
9
+ df = df.drop(["Date", "Link", "Year"], axis=1)
10
+
11
+ # Perform a single stratified split on the full dataset (ensuring order is maintained)
12
+ train_idx, temp_idx = train_test_split(df.index, test_size=0.2, stratify=df["Category"], random_state=42)
13
+ dev_idx, test_idx = train_test_split(temp_idx, test_size=0.5, stratify=df.loc[temp_idx, "Category"], random_state=42)
14
+
15
+
16
+ # Function to compute min, max, and median word counts for Summary and Body
17
+ def compute_word_stats(df, column):
18
+ word_counts = df[column].dropna().apply(lambda x: len(str(x).split()))
19
+ return {
20
+ "min": word_counts.min(),
21
+ "max": word_counts.max(),
22
+ "median": word_counts.median()
23
+ }
24
+
25
+
26
+ # Function to save the split datasets and print category distributions + word count stats
27
+ def save_splits(dataset_df, dataset_name):
28
+ """Saves train/dev/test splits into separate folders while maintaining consistent ordering."""
29
+ output_folder = f"{dataset_name}"
30
+ os.makedirs(output_folder, exist_ok=True)
31
+
32
+ train_df, dev_df, test_df = dataset_df.loc[train_idx], dataset_df.loc[dev_idx], dataset_df.loc[test_idx]
33
+
34
+ train_df.to_csv(os.path.join(output_folder, "train.csv"), index=False)
35
+ dev_df.to_csv(os.path.join(output_folder, "dev.csv"), index=False)
36
+ test_df.to_csv(os.path.join(output_folder, "test.csv"), index=False)
37
+
38
+ print(f"\nCategory Distribution for {output_folder}:")
39
+ print("Train Distribution:\n", train_df["Category"].value_counts())
40
+ print("Dev Distribution:\n", dev_df["Category"].value_counts())
41
+ print("Test Distribution:\n", test_df["Category"].value_counts())
42
+
43
+ # Compute word stats for Summary and Body (only for the "full" dataset)
44
+ if "Body" in dataset_df.columns and "Summary" in dataset_df.columns:
45
+ print("\nWord Count Statistics:")
46
+ for split_name, split_df in [("Train", train_df), ("Dev", dev_df), ("Test", test_df)]:
47
+ summary_stats = compute_word_stats(split_df, "Summary")
48
+ body_stats = compute_word_stats(split_df, "Body")
49
+ print(f" {split_name} Split:")
50
+ print(
51
+ f" Summary - Min: {summary_stats['min']}, Max: {summary_stats['max']}, Median: {summary_stats['median']}")
52
+ print(f" Body - Min: {body_stats['min']}, Max: {body_stats['max']}, Median: {body_stats['median']}")
53
+ print("-" * 50)
54
+
55
+
56
+ # Create dataset variations while keeping ordering the same
57
+ save_splits(df[["Title", "Category"]], "title") # Title only
58
+ save_splits(df[["Title", "Summary", "Category"]], "title_summary") # Title, Summary, and Body
59
+ save_splits(df[["Title", "Summary", "Body", "Category"]], "title_summary_body") # Title, Summary, and Body
60
+
61
+ print("All datasets successfully created with consistent ordering and word count stats printed!")
SciDCC/title/dev.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b70de844866432064130d840bf736646da4983fa14d079ef76271af5dd5cb78
3
+ size 87913
SciDCC/title/test.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7211947208566fe98ae9b5452592d15a52b80a347f0d2b68ab6f948a19306f16
3
+ size 88195
SciDCC/title/train.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df8a49cd22adc381007f93f89b30ba6ba319986244e4ea2941c6f271ef5b4c38
3
+ size 714109
SciDCC/title_summary/dev.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d41c264d29a68cfc444dd023f5c3af50cdec7bfcfa0d4fc034a9406ab18d94a
3
+ size 437912
SciDCC/title_summary/test.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ab456ace21b16ec91cdfbb821352568266541580a2aaef598a14f91b0814ad1
3
+ size 446086
SciDCC/title_summary/train.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e976017b5a314426abd4a7ba6d682756f8736ab0bc7411a5fa7754f8774d81a
3
+ size 3514876
SciDCC/title_summary_body/dev.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b72dab8241ef6f6f5ec74678bedca300486b56ae158c4c4784aef27b2ac25390
3
+ size 4304615
SciDCC/title_summary_body/test.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d52bd4c0eae2f0266bce7e16a88e258271b9384f210b984305c3762884ff669
3
+ size 4323147
SciDCC/title_summary_body/train.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:894f8b0472c1aca2bb5a7e7c05cae667fc93cc5ff3361da0237c90807ac70a29
3
+ size 34950560