Vu Anh commited on
Commit
0a474d0
·
1 Parent(s): 855f9d1

Add ruff linting and formatting

Browse files

- Added ruff as development dependency
- Added ruff.toml configuration
- Fixed all linting issues automatically
- Reformatted code with consistent style

Files changed (5) hide show
  1. dataset_statistics.py +67 -57
  2. preprocess_data.py +37 -35
  3. pyproject.toml +5 -0
  4. ruff.toml +23 -0
  5. uv.lock +35 -1
dataset_statistics.py CHANGED
@@ -1,13 +1,13 @@
1
  import json
2
- from pathlib import Path
3
- from collections import Counter
4
  import statistics as stats
 
 
5
 
6
 
7
  def load_jsonl(file_path):
8
  """Load JSONL file and return list of items."""
9
  items = []
10
- with open(file_path, 'r', encoding='utf-8') as f:
11
  for line in f:
12
  items.append(json.loads(line.strip()))
13
  return items
@@ -15,36 +15,36 @@ def load_jsonl(file_path):
15
 
16
  def calculate_text_statistics(items):
17
  """Calculate statistics for text fields."""
18
- text_lengths = [len(item['text'].split()) for item in items]
19
- char_lengths = [len(item['text']) for item in items]
20
 
21
  return {
22
- 'avg_words': stats.mean(text_lengths),
23
- 'min_words': min(text_lengths),
24
- 'max_words': max(text_lengths),
25
- 'median_words': stats.median(text_lengths),
26
- 'avg_chars': stats.mean(char_lengths),
27
- 'min_chars': min(char_lengths),
28
- 'max_chars': max(char_lengths),
29
- 'median_chars': stats.median(char_lengths),
30
  }
31
 
32
 
33
  def analyze_classification_subset():
34
  """Analyze classification subset statistics."""
35
- print("\n" + "="*60)
36
  print("CLASSIFICATION SUBSET ANALYSIS")
37
- print("="*60)
38
 
39
- for split in ['train', 'test']:
40
- file_path = Path(f'data/classification/{split}.jsonl')
41
  items = load_jsonl(file_path)
42
 
43
  print(f"\n{split.upper()} Split:")
44
  print(f" Total examples: {len(items)}")
45
 
46
  # Label distribution
47
- label_counter = Counter(item['label'] for item in items)
48
  print("\n Label Distribution:")
49
  for label, count in label_counter.most_common():
50
  percentage = (count / len(items)) * 100
@@ -53,29 +53,33 @@ def analyze_classification_subset():
53
  # Text statistics
54
  text_stats = calculate_text_statistics(items)
55
  print("\n Text Statistics:")
56
- print(f" Words per text - Avg: {text_stats['avg_words']:.1f}, "
57
- f"Min: {text_stats['min_words']}, Max: {text_stats['max_words']}, "
58
- f"Median: {text_stats['median_words']:.1f}")
59
- print(f" Chars per text - Avg: {text_stats['avg_chars']:.1f}, "
60
- f"Min: {text_stats['min_chars']}, Max: {text_stats['max_chars']}, "
61
- f"Median: {text_stats['median_chars']:.1f}")
 
 
 
 
62
 
63
 
64
  def analyze_sentiment_subset():
65
  """Analyze sentiment subset statistics."""
66
- print("\n" + "="*60)
67
  print("SENTIMENT SUBSET ANALYSIS")
68
- print("="*60)
69
 
70
- for split in ['train', 'test']:
71
- file_path = Path(f'data/sentiment/{split}.jsonl')
72
  items = load_jsonl(file_path)
73
 
74
  print(f"\n{split.upper()} Split:")
75
  print(f" Total examples: {len(items)}")
76
 
77
  # Sentiment distribution
78
- sentiment_counter = Counter(item['sentiment'] for item in items)
79
  print("\n Sentiment Distribution:")
80
  for sentiment, count in sentiment_counter.most_common():
81
  percentage = (count / len(items)) * 100
@@ -84,44 +88,50 @@ def analyze_sentiment_subset():
84
  # Text statistics
85
  text_stats = calculate_text_statistics(items)
86
  print("\n Text Statistics:")
87
- print(f" Words per text - Avg: {text_stats['avg_words']:.1f}, "
88
- f"Min: {text_stats['min_words']}, Max: {text_stats['max_words']}, "
89
- f"Median: {text_stats['median_words']:.1f}")
 
 
90
 
91
 
92
  def analyze_aspect_sentiment_subset():
93
  """Analyze aspect-sentiment subset statistics."""
94
- print("\n" + "="*60)
95
  print("ASPECT-SENTIMENT SUBSET ANALYSIS")
96
- print("="*60)
97
 
98
- for split in ['train', 'test']:
99
- file_path = Path(f'data/aspect_sentiment/{split}.jsonl')
100
  items = load_jsonl(file_path)
101
 
102
  print(f"\n{split.upper()} Split:")
103
  print(f" Total examples: {len(items)}")
104
 
105
  # Multi-aspect analysis
106
- single_aspect = sum(1 for item in items if len(item['aspects']) == 1)
107
- multi_aspect = sum(1 for item in items if len(item['aspects']) > 1)
108
- max_aspects = max(len(item['aspects']) for item in items)
109
-
110
- print(f"\n Aspect Coverage:")
111
- print(f" Single aspect: {single_aspect} ({(single_aspect/len(items))*100:.1f}%)")
112
- print(f" Multi-aspect: {multi_aspect} ({(multi_aspect/len(items))*100:.1f}%)")
 
 
 
 
113
  print(f" Max aspects per example: {max_aspects}")
114
 
115
  # Aspect-sentiment pair distribution
116
  aspect_sentiment_pairs = []
117
  for item in items:
118
- for asp in item['aspects']:
119
  aspect_sentiment_pairs.append(f"{asp['aspect']}#{asp['sentiment']}")
120
 
121
  pair_counter = Counter(aspect_sentiment_pairs)
122
  print("\n Top 10 Aspect-Sentiment Pairs:")
123
  for pair, count in pair_counter.most_common(10):
124
- aspect, sentiment = pair.split('#')
125
  percentage = (count / len(aspect_sentiment_pairs)) * 100
126
  print(f" {aspect:20s} + {sentiment:8s}: {count:4d} ({percentage:5.1f}%)")
127
 
@@ -130,9 +140,9 @@ def analyze_aspect_sentiment_subset():
130
  sentiment_by_aspect = {}
131
 
132
  for item in items:
133
- for asp in item['aspects']:
134
- aspect = asp['aspect']
135
- sentiment = asp['sentiment']
136
  aspect_counter[aspect] += 1
137
 
138
  if aspect not in sentiment_by_aspect:
@@ -147,7 +157,7 @@ def analyze_aspect_sentiment_subset():
147
  # Sentiment breakdown for this aspect
148
  sentiments = sentiment_by_aspect[aspect]
149
  total_aspect = sum(sentiments.values())
150
- for sentiment in ['positive', 'negative', 'neutral']:
151
  if sentiment in sentiments:
152
  sent_count = sentiments[sentiment]
153
  sent_pct = (sent_count / total_aspect) * 100
@@ -156,18 +166,18 @@ def analyze_aspect_sentiment_subset():
156
 
157
  def generate_summary_statistics():
158
  """Generate overall summary statistics."""
159
- print("\n" + "="*60)
160
  print("DATASET SUMMARY")
161
- print("="*60)
162
 
163
- total_train = len(load_jsonl('data/classification/train.jsonl'))
164
- total_test = len(load_jsonl('data/classification/test.jsonl'))
165
 
166
  print("\nTotal Dataset Size:")
167
  print(f" Train: {total_train} examples")
168
  print(f" Test: {total_test} examples")
169
  print(f" Total: {total_train + total_test} examples")
170
- print(f" Train/Test Ratio: {total_train/total_test:.2f}:1")
171
 
172
  # Available subsets
173
  print("\nAvailable Subsets:")
@@ -208,7 +218,7 @@ def save_statistics_report():
208
  sys.stdout = old_stdout
209
 
210
  # Save to file
211
- with open('statistics_report.md', 'w', encoding='utf-8') as f:
212
  f.write("# UTS2017_Bank Dataset Statistics Report\n\n")
213
  f.write("```\n")
214
  f.write(output)
@@ -223,5 +233,5 @@ if __name__ == "__main__":
223
  analyze_sentiment_subset()
224
  analyze_aspect_sentiment_subset()
225
 
226
- print("\n" + "="*60)
227
- save_statistics_report()
 
1
  import json
 
 
2
  import statistics as stats
3
+ from collections import Counter
4
+ from pathlib import Path
5
 
6
 
7
  def load_jsonl(file_path):
8
  """Load JSONL file and return list of items."""
9
  items = []
10
+ with open(file_path, encoding="utf-8") as f:
11
  for line in f:
12
  items.append(json.loads(line.strip()))
13
  return items
 
15
 
16
  def calculate_text_statistics(items):
17
  """Calculate statistics for text fields."""
18
+ text_lengths = [len(item["text"].split()) for item in items]
19
+ char_lengths = [len(item["text"]) for item in items]
20
 
21
  return {
22
+ "avg_words": stats.mean(text_lengths),
23
+ "min_words": min(text_lengths),
24
+ "max_words": max(text_lengths),
25
+ "median_words": stats.median(text_lengths),
26
+ "avg_chars": stats.mean(char_lengths),
27
+ "min_chars": min(char_lengths),
28
+ "max_chars": max(char_lengths),
29
+ "median_chars": stats.median(char_lengths),
30
  }
31
 
32
 
33
  def analyze_classification_subset():
34
  """Analyze classification subset statistics."""
35
+ print("\n" + "=" * 60)
36
  print("CLASSIFICATION SUBSET ANALYSIS")
37
+ print("=" * 60)
38
 
39
+ for split in ["train", "test"]:
40
+ file_path = Path(f"data/classification/{split}.jsonl")
41
  items = load_jsonl(file_path)
42
 
43
  print(f"\n{split.upper()} Split:")
44
  print(f" Total examples: {len(items)}")
45
 
46
  # Label distribution
47
+ label_counter = Counter(item["label"] for item in items)
48
  print("\n Label Distribution:")
49
  for label, count in label_counter.most_common():
50
  percentage = (count / len(items)) * 100
 
53
  # Text statistics
54
  text_stats = calculate_text_statistics(items)
55
  print("\n Text Statistics:")
56
+ print(
57
+ f" Words per text - Avg: {text_stats['avg_words']:.1f}, "
58
+ f"Min: {text_stats['min_words']}, Max: {text_stats['max_words']}, "
59
+ f"Median: {text_stats['median_words']:.1f}"
60
+ )
61
+ print(
62
+ f" Chars per text - Avg: {text_stats['avg_chars']:.1f}, "
63
+ f"Min: {text_stats['min_chars']}, Max: {text_stats['max_chars']}, "
64
+ f"Median: {text_stats['median_chars']:.1f}"
65
+ )
66
 
67
 
68
  def analyze_sentiment_subset():
69
  """Analyze sentiment subset statistics."""
70
+ print("\n" + "=" * 60)
71
  print("SENTIMENT SUBSET ANALYSIS")
72
+ print("=" * 60)
73
 
74
+ for split in ["train", "test"]:
75
+ file_path = Path(f"data/sentiment/{split}.jsonl")
76
  items = load_jsonl(file_path)
77
 
78
  print(f"\n{split.upper()} Split:")
79
  print(f" Total examples: {len(items)}")
80
 
81
  # Sentiment distribution
82
+ sentiment_counter = Counter(item["sentiment"] for item in items)
83
  print("\n Sentiment Distribution:")
84
  for sentiment, count in sentiment_counter.most_common():
85
  percentage = (count / len(items)) * 100
 
88
  # Text statistics
89
  text_stats = calculate_text_statistics(items)
90
  print("\n Text Statistics:")
91
+ print(
92
+ f" Words per text - Avg: {text_stats['avg_words']:.1f}, "
93
+ f"Min: {text_stats['min_words']}, Max: {text_stats['max_words']}, "
94
+ f"Median: {text_stats['median_words']:.1f}"
95
+ )
96
 
97
 
98
  def analyze_aspect_sentiment_subset():
99
  """Analyze aspect-sentiment subset statistics."""
100
+ print("\n" + "=" * 60)
101
  print("ASPECT-SENTIMENT SUBSET ANALYSIS")
102
+ print("=" * 60)
103
 
104
+ for split in ["train", "test"]:
105
+ file_path = Path(f"data/aspect_sentiment/{split}.jsonl")
106
  items = load_jsonl(file_path)
107
 
108
  print(f"\n{split.upper()} Split:")
109
  print(f" Total examples: {len(items)}")
110
 
111
  # Multi-aspect analysis
112
+ single_aspect = sum(1 for item in items if len(item["aspects"]) == 1)
113
+ multi_aspect = sum(1 for item in items if len(item["aspects"]) > 1)
114
+ max_aspects = max(len(item["aspects"]) for item in items)
115
+
116
+ print("\n Aspect Coverage:")
117
+ print(
118
+ f" Single aspect: {single_aspect} ({(single_aspect / len(items)) * 100:.1f}%)"
119
+ )
120
+ print(
121
+ f" Multi-aspect: {multi_aspect} ({(multi_aspect / len(items)) * 100:.1f}%)"
122
+ )
123
  print(f" Max aspects per example: {max_aspects}")
124
 
125
  # Aspect-sentiment pair distribution
126
  aspect_sentiment_pairs = []
127
  for item in items:
128
+ for asp in item["aspects"]:
129
  aspect_sentiment_pairs.append(f"{asp['aspect']}#{asp['sentiment']}")
130
 
131
  pair_counter = Counter(aspect_sentiment_pairs)
132
  print("\n Top 10 Aspect-Sentiment Pairs:")
133
  for pair, count in pair_counter.most_common(10):
134
+ aspect, sentiment = pair.split("#")
135
  percentage = (count / len(aspect_sentiment_pairs)) * 100
136
  print(f" {aspect:20s} + {sentiment:8s}: {count:4d} ({percentage:5.1f}%)")
137
 
 
140
  sentiment_by_aspect = {}
141
 
142
  for item in items:
143
+ for asp in item["aspects"]:
144
+ aspect = asp["aspect"]
145
+ sentiment = asp["sentiment"]
146
  aspect_counter[aspect] += 1
147
 
148
  if aspect not in sentiment_by_aspect:
 
157
  # Sentiment breakdown for this aspect
158
  sentiments = sentiment_by_aspect[aspect]
159
  total_aspect = sum(sentiments.values())
160
+ for sentiment in ["positive", "negative", "neutral"]:
161
  if sentiment in sentiments:
162
  sent_count = sentiments[sentiment]
163
  sent_pct = (sent_count / total_aspect) * 100
 
166
 
167
  def generate_summary_statistics():
168
  """Generate overall summary statistics."""
169
+ print("\n" + "=" * 60)
170
  print("DATASET SUMMARY")
171
+ print("=" * 60)
172
 
173
+ total_train = len(load_jsonl("data/classification/train.jsonl"))
174
+ total_test = len(load_jsonl("data/classification/test.jsonl"))
175
 
176
  print("\nTotal Dataset Size:")
177
  print(f" Train: {total_train} examples")
178
  print(f" Test: {total_test} examples")
179
  print(f" Total: {total_train + total_test} examples")
180
+ print(f" Train/Test Ratio: {total_train / total_test:.2f}:1")
181
 
182
  # Available subsets
183
  print("\nAvailable Subsets:")
 
218
  sys.stdout = old_stdout
219
 
220
  # Save to file
221
+ with open("statistics_report.md", "w", encoding="utf-8") as f:
222
  f.write("# UTS2017_Bank Dataset Statistics Report\n\n")
223
  f.write("```\n")
224
  f.write(output)
 
233
  analyze_sentiment_subset()
234
  analyze_aspect_sentiment_subset()
235
 
236
+ print("\n" + "=" * 60)
237
+ save_statistics_report()
preprocess_data.py CHANGED
@@ -1,5 +1,5 @@
1
- import re
2
  import json
 
3
  from pathlib import Path
4
 
5
 
@@ -15,18 +15,20 @@ def process_banking_data(input_file, output_dir):
15
  sentiment_data = []
16
  aspect_sentiment_data = []
17
 
18
- with open(input_file, 'r', encoding='utf-8') as f:
19
  for line_num, line in enumerate(f, 1):
20
  line = line.strip()
21
  if not line:
22
  continue
23
 
24
  # Extract labels and sentiments
25
- label_pattern = r'__label__([A-Z_]+)#(positive|negative|neutral)'
26
  matches = re.findall(label_pattern, line)
27
 
28
  # Remove labels from text
29
- text = re.sub(r'__label__[A-Z_]+#(positive|negative|neutral)\s*', '', line).strip()
 
 
30
 
31
  if not text or not matches:
32
  print(f"Skipping line {line_num}: No valid data found")
@@ -37,10 +39,7 @@ def process_banking_data(input_file, output_dir):
37
  sentiments = [m[1] for m in matches]
38
 
39
  # 1. Classification subset (first aspect as main label)
40
- classification_data.append({
41
- "text": text,
42
- "label": aspects[0]
43
- })
44
 
45
  # 2. Sentiment-only subset (overall sentiment)
46
  # If multiple sentiments, use the first one or most frequent
@@ -54,23 +53,18 @@ def process_banking_data(input_file, output_dir):
54
  sentiment_counts[s] = sentiment_counts.get(s, 0) + 1
55
  overall_sentiment = max(sentiment_counts, key=sentiment_counts.get)
56
 
57
- sentiment_data.append({
58
- "text": text,
59
- "sentiment": overall_sentiment
60
- })
61
 
62
  # 3. Aspect-Sentiment subset
63
  aspect_sentiment_pairs = []
64
- for aspect, sentiment in zip(aspects, sentiments):
65
- aspect_sentiment_pairs.append({
66
- "aspect": aspect,
67
- "sentiment": sentiment
68
- })
69
 
70
- aspect_sentiment_data.append({
71
- "text": text,
72
- "aspects": aspect_sentiment_pairs
73
- })
74
 
75
  # Save the three subsets
76
  output_dir = Path(output_dir)
@@ -82,26 +76,30 @@ def process_banking_data(input_file, output_dir):
82
  # Save classification subset
83
  classification_file = output_dir / "classification" / f"{split}.jsonl"
84
  classification_file.parent.mkdir(parents=True, exist_ok=True)
85
- with open(classification_file, 'w', encoding='utf-8') as f:
86
  for item in classification_data:
87
- f.write(json.dumps(item, ensure_ascii=False) + '\n')
88
- print(f"Saved {len(classification_data)} classification examples to {classification_file}")
 
 
89
 
90
  # Save sentiment subset
91
  sentiment_file = output_dir / "sentiment" / f"{split}.jsonl"
92
  sentiment_file.parent.mkdir(parents=True, exist_ok=True)
93
- with open(sentiment_file, 'w', encoding='utf-8') as f:
94
  for item in sentiment_data:
95
- f.write(json.dumps(item, ensure_ascii=False) + '\n')
96
  print(f"Saved {len(sentiment_data)} sentiment examples to {sentiment_file}")
97
 
98
  # Save aspect-sentiment subset
99
  aspect_sentiment_file = output_dir / "aspect_sentiment" / f"{split}.jsonl"
100
  aspect_sentiment_file.parent.mkdir(parents=True, exist_ok=True)
101
- with open(aspect_sentiment_file, 'w', encoding='utf-8') as f:
102
  for item in aspect_sentiment_data:
103
- f.write(json.dumps(item, ensure_ascii=False) + '\n')
104
- print(f"Saved {len(aspect_sentiment_data)} aspect-sentiment examples to {aspect_sentiment_file}")
 
 
105
 
106
  # Print statistics
107
  print("\n=== Statistics ===")
@@ -110,7 +108,7 @@ def process_banking_data(input_file, output_dir):
110
  # Label distribution
111
  label_counts = {}
112
  for item in classification_data:
113
- label = item['label']
114
  label_counts[label] = label_counts.get(label, 0) + 1
115
 
116
  print("\nLabel distribution:")
@@ -120,15 +118,19 @@ def process_banking_data(input_file, output_dir):
120
  # Sentiment distribution
121
  sentiment_counts = {}
122
  for item in sentiment_data:
123
- sentiment = item['sentiment']
124
  sentiment_counts[sentiment] = sentiment_counts.get(sentiment, 0) + 1
125
 
126
  print("\nSentiment distribution:")
127
- for sentiment, count in sorted(sentiment_counts.items(), key=lambda x: x[1], reverse=True):
 
 
128
  print(f" {sentiment}: {count}")
129
 
130
  # Multi-aspect examples
131
- multi_aspect_count = sum(1 for item in aspect_sentiment_data if len(item['aspects']) > 1)
 
 
132
  print(f"\nExamples with multiple aspects: {multi_aspect_count}")
133
 
134
 
@@ -138,6 +140,6 @@ if __name__ == "__main__":
138
  process_banking_data("raw_data/train.txt", "data")
139
 
140
  # Process test data
141
- print("\n" + "="*50)
142
  print("Processing test data...")
143
- process_banking_data("raw_data/test.txt", "data")
 
 
1
  import json
2
+ import re
3
  from pathlib import Path
4
 
5
 
 
15
  sentiment_data = []
16
  aspect_sentiment_data = []
17
 
18
+ with open(input_file, encoding="utf-8") as f:
19
  for line_num, line in enumerate(f, 1):
20
  line = line.strip()
21
  if not line:
22
  continue
23
 
24
  # Extract labels and sentiments
25
+ label_pattern = r"__label__([A-Z_]+)#(positive|negative|neutral)"
26
  matches = re.findall(label_pattern, line)
27
 
28
  # Remove labels from text
29
+ text = re.sub(
30
+ r"__label__[A-Z_]+#(positive|negative|neutral)\s*", "", line
31
+ ).strip()
32
 
33
  if not text or not matches:
34
  print(f"Skipping line {line_num}: No valid data found")
 
39
  sentiments = [m[1] for m in matches]
40
 
41
  # 1. Classification subset (first aspect as main label)
42
+ classification_data.append({"text": text, "label": aspects[0]})
 
 
 
43
 
44
  # 2. Sentiment-only subset (overall sentiment)
45
  # If multiple sentiments, use the first one or most frequent
 
53
  sentiment_counts[s] = sentiment_counts.get(s, 0) + 1
54
  overall_sentiment = max(sentiment_counts, key=sentiment_counts.get)
55
 
56
+ sentiment_data.append({"text": text, "sentiment": overall_sentiment})
 
 
 
57
 
58
  # 3. Aspect-Sentiment subset
59
  aspect_sentiment_pairs = []
60
+ for aspect, sentiment in zip(aspects, sentiments, strict=False):
61
+ aspect_sentiment_pairs.append(
62
+ {"aspect": aspect, "sentiment": sentiment}
63
+ )
 
64
 
65
+ aspect_sentiment_data.append(
66
+ {"text": text, "aspects": aspect_sentiment_pairs}
67
+ )
 
68
 
69
  # Save the three subsets
70
  output_dir = Path(output_dir)
 
76
  # Save classification subset
77
  classification_file = output_dir / "classification" / f"{split}.jsonl"
78
  classification_file.parent.mkdir(parents=True, exist_ok=True)
79
+ with open(classification_file, "w", encoding="utf-8") as f:
80
  for item in classification_data:
81
+ f.write(json.dumps(item, ensure_ascii=False) + "\n")
82
+ print(
83
+ f"Saved {len(classification_data)} classification examples to {classification_file}"
84
+ )
85
 
86
  # Save sentiment subset
87
  sentiment_file = output_dir / "sentiment" / f"{split}.jsonl"
88
  sentiment_file.parent.mkdir(parents=True, exist_ok=True)
89
+ with open(sentiment_file, "w", encoding="utf-8") as f:
90
  for item in sentiment_data:
91
+ f.write(json.dumps(item, ensure_ascii=False) + "\n")
92
  print(f"Saved {len(sentiment_data)} sentiment examples to {sentiment_file}")
93
 
94
  # Save aspect-sentiment subset
95
  aspect_sentiment_file = output_dir / "aspect_sentiment" / f"{split}.jsonl"
96
  aspect_sentiment_file.parent.mkdir(parents=True, exist_ok=True)
97
+ with open(aspect_sentiment_file, "w", encoding="utf-8") as f:
98
  for item in aspect_sentiment_data:
99
+ f.write(json.dumps(item, ensure_ascii=False) + "\n")
100
+ print(
101
+ f"Saved {len(aspect_sentiment_data)} aspect-sentiment examples to {aspect_sentiment_file}"
102
+ )
103
 
104
  # Print statistics
105
  print("\n=== Statistics ===")
 
108
  # Label distribution
109
  label_counts = {}
110
  for item in classification_data:
111
+ label = item["label"]
112
  label_counts[label] = label_counts.get(label, 0) + 1
113
 
114
  print("\nLabel distribution:")
 
118
  # Sentiment distribution
119
  sentiment_counts = {}
120
  for item in sentiment_data:
121
+ sentiment = item["sentiment"]
122
  sentiment_counts[sentiment] = sentiment_counts.get(sentiment, 0) + 1
123
 
124
  print("\nSentiment distribution:")
125
+ for sentiment, count in sorted(
126
+ sentiment_counts.items(), key=lambda x: x[1], reverse=True
127
+ ):
128
  print(f" {sentiment}: {count}")
129
 
130
  # Multi-aspect examples
131
+ multi_aspect_count = sum(
132
+ 1 for item in aspect_sentiment_data if len(item["aspects"]) > 1
133
+ )
134
  print(f"\nExamples with multiple aspects: {multi_aspect_count}")
135
 
136
 
 
140
  process_banking_data("raw_data/train.txt", "data")
141
 
142
  # Process test data
143
+ print("\n" + "=" * 50)
144
  print("Processing test data...")
145
+ process_banking_data("raw_data/test.txt", "data")
pyproject.toml CHANGED
@@ -6,3 +6,8 @@ requires-python = ">=3.13"
6
  dependencies = [
7
  "datasets>=4.1.1",
8
  ]
 
 
 
 
 
 
6
  dependencies = [
7
  "datasets>=4.1.1",
8
  ]
9
+
10
+ [dependency-groups]
11
+ dev = [
12
+ "ruff>=0.13.1",
13
+ ]
ruff.toml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [lint]
2
+ select = [
3
+ "E", # pycodestyle errors
4
+ "W", # pycodestyle warnings
5
+ "F", # pyflakes
6
+ "I", # isort
7
+ "B", # flake8-bugbear
8
+ "C4", # flake8-comprehensions
9
+ "UP", # pyupgrade
10
+ ]
11
+
12
+ ignore = [
13
+ "E501", # line too long, handled by formatter
14
+ ]
15
+
16
+ [format]
17
+ quote-style = "double"
18
+ indent-style = "space"
19
+ skip-magic-trailing-comma = false
20
+ line-ending = "auto"
21
+
22
+ [lint.isort]
23
+ known-first-party = []
uv.lock CHANGED
@@ -522,6 +522,32 @@ wheels = [
522
  { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" },
523
  ]
524
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
525
  [[package]]
526
  name = "six"
527
  version = "1.17.0"
@@ -572,15 +598,23 @@ wheels = [
572
 
573
  [[package]]
574
  name = "uts2017-bank"
575
- version = "0.1.0"
576
  source = { virtual = "." }
577
  dependencies = [
578
  { name = "datasets" },
579
  ]
580
 
 
 
 
 
 
581
  [package.metadata]
582
  requires-dist = [{ name = "datasets", specifier = ">=4.1.1" }]
583
 
 
 
 
584
  [[package]]
585
  name = "xxhash"
586
  version = "3.5.0"
 
522
  { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" },
523
  ]
524
 
525
+ [[package]]
526
+ name = "ruff"
527
+ version = "0.13.1"
528
+ source = { registry = "https://pypi.org/simple" }
529
+ sdist = { url = "https://files.pythonhosted.org/packages/ab/33/c8e89216845615d14d2d42ba2bee404e7206a8db782f33400754f3799f05/ruff-0.13.1.tar.gz", hash = "sha256:88074c3849087f153d4bb22e92243ad4c1b366d7055f98726bc19aa08dc12d51", size = 5397987, upload-time = "2025-09-18T19:52:44.33Z" }
530
+ wheels = [
531
+ { url = "https://files.pythonhosted.org/packages/f3/41/ca37e340938f45cfb8557a97a5c347e718ef34702546b174e5300dbb1f28/ruff-0.13.1-py3-none-linux_armv6l.whl", hash = "sha256:b2abff595cc3cbfa55e509d89439b5a09a6ee3c252d92020bd2de240836cf45b", size = 12304308, upload-time = "2025-09-18T19:51:56.253Z" },
532
+ { url = "https://files.pythonhosted.org/packages/ff/84/ba378ef4129415066c3e1c80d84e539a0d52feb250685091f874804f28af/ruff-0.13.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:4ee9f4249bf7f8bb3984c41bfaf6a658162cdb1b22e3103eabc7dd1dc5579334", size = 12937258, upload-time = "2025-09-18T19:52:00.184Z" },
533
+ { url = "https://files.pythonhosted.org/packages/8d/b6/ec5e4559ae0ad955515c176910d6d7c93edcbc0ed1a3195a41179c58431d/ruff-0.13.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:5c5da4af5f6418c07d75e6f3224e08147441f5d1eac2e6ce10dcce5e616a3bae", size = 12214554, upload-time = "2025-09-18T19:52:02.753Z" },
534
+ { url = "https://files.pythonhosted.org/packages/70/d6/cb3e3b4f03b9b0c4d4d8f06126d34b3394f6b4d764912fe80a1300696ef6/ruff-0.13.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80524f84a01355a59a93cef98d804e2137639823bcee2931f5028e71134a954e", size = 12448181, upload-time = "2025-09-18T19:52:05.279Z" },
535
+ { url = "https://files.pythonhosted.org/packages/d2/ea/bf60cb46d7ade706a246cd3fb99e4cfe854efa3dfbe530d049c684da24ff/ruff-0.13.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff7f5ce8d7988767dd46a148192a14d0f48d1baea733f055d9064875c7d50389", size = 12104599, upload-time = "2025-09-18T19:52:07.497Z" },
536
+ { url = "https://files.pythonhosted.org/packages/2d/3e/05f72f4c3d3a69e65d55a13e1dd1ade76c106d8546e7e54501d31f1dc54a/ruff-0.13.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c55d84715061f8b05469cdc9a446aa6c7294cd4bd55e86a89e572dba14374f8c", size = 13791178, upload-time = "2025-09-18T19:52:10.189Z" },
537
+ { url = "https://files.pythonhosted.org/packages/81/e7/01b1fc403dd45d6cfe600725270ecc6a8f8a48a55bc6521ad820ed3ceaf8/ruff-0.13.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:ac57fed932d90fa1624c946dc67a0a3388d65a7edc7d2d8e4ca7bddaa789b3b0", size = 14814474, upload-time = "2025-09-18T19:52:12.866Z" },
538
+ { url = "https://files.pythonhosted.org/packages/fa/92/d9e183d4ed6185a8df2ce9faa3f22e80e95b5f88d9cc3d86a6d94331da3f/ruff-0.13.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c366a71d5b4f41f86a008694f7a0d75fe409ec298685ff72dc882f882d532e36", size = 14217531, upload-time = "2025-09-18T19:52:15.245Z" },
539
+ { url = "https://files.pythonhosted.org/packages/3b/4a/6ddb1b11d60888be224d721e01bdd2d81faaf1720592858ab8bac3600466/ruff-0.13.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4ea9d1b5ad3e7a83ee8ebb1229c33e5fe771e833d6d3dcfca7b77d95b060d38", size = 13265267, upload-time = "2025-09-18T19:52:17.649Z" },
540
+ { url = "https://files.pythonhosted.org/packages/81/98/3f1d18a8d9ea33ef2ad508f0417fcb182c99b23258ec5e53d15db8289809/ruff-0.13.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0f70202996055b555d3d74b626406476cc692f37b13bac8828acff058c9966a", size = 13243120, upload-time = "2025-09-18T19:52:20.332Z" },
541
+ { url = "https://files.pythonhosted.org/packages/8d/86/b6ce62ce9c12765fa6c65078d1938d2490b2b1d9273d0de384952b43c490/ruff-0.13.1-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:f8cff7a105dad631085d9505b491db33848007d6b487c3c1979dd8d9b2963783", size = 13443084, upload-time = "2025-09-18T19:52:23.032Z" },
542
+ { url = "https://files.pythonhosted.org/packages/a1/6e/af7943466a41338d04503fb5a81b2fd07251bd272f546622e5b1599a7976/ruff-0.13.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:9761e84255443316a258dd7dfbd9bfb59c756e52237ed42494917b2577697c6a", size = 12295105, upload-time = "2025-09-18T19:52:25.263Z" },
543
+ { url = "https://files.pythonhosted.org/packages/3f/97/0249b9a24f0f3ebd12f007e81c87cec6d311de566885e9309fcbac5b24cc/ruff-0.13.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:3d376a88c3102ef228b102211ef4a6d13df330cb0f5ca56fdac04ccec2a99700", size = 12072284, upload-time = "2025-09-18T19:52:27.478Z" },
544
+ { url = "https://files.pythonhosted.org/packages/f6/85/0b64693b2c99d62ae65236ef74508ba39c3febd01466ef7f354885e5050c/ruff-0.13.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:cbefd60082b517a82c6ec8836989775ac05f8991715d228b3c1d86ccc7df7dae", size = 12970314, upload-time = "2025-09-18T19:52:30.212Z" },
545
+ { url = "https://files.pythonhosted.org/packages/96/fc/342e9f28179915d28b3747b7654f932ca472afbf7090fc0c4011e802f494/ruff-0.13.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:dd16b9a5a499fe73f3c2ef09a7885cb1d97058614d601809d37c422ed1525317", size = 13422360, upload-time = "2025-09-18T19:52:32.676Z" },
546
+ { url = "https://files.pythonhosted.org/packages/37/54/6177a0dc10bce6f43e392a2192e6018755473283d0cf43cc7e6afc182aea/ruff-0.13.1-py3-none-win32.whl", hash = "sha256:55e9efa692d7cb18580279f1fbb525146adc401f40735edf0aaeabd93099f9a0", size = 12178448, upload-time = "2025-09-18T19:52:35.545Z" },
547
+ { url = "https://files.pythonhosted.org/packages/64/51/c6a3a33d9938007b8bdc8ca852ecc8d810a407fb513ab08e34af12dc7c24/ruff-0.13.1-py3-none-win_amd64.whl", hash = "sha256:3a3fb595287ee556de947183489f636b9f76a72f0fa9c028bdcabf5bab2cc5e5", size = 13286458, upload-time = "2025-09-18T19:52:38.198Z" },
548
+ { url = "https://files.pythonhosted.org/packages/fd/04/afc078a12cf68592345b1e2d6ecdff837d286bac023d7a22c54c7a698c5b/ruff-0.13.1-py3-none-win_arm64.whl", hash = "sha256:c0bae9ffd92d54e03c2bf266f466da0a65e145f298ee5b5846ed435f6a00518a", size = 12437893, upload-time = "2025-09-18T19:52:41.283Z" },
549
+ ]
550
+
551
  [[package]]
552
  name = "six"
553
  version = "1.17.0"
 
598
 
599
  [[package]]
600
  name = "uts2017-bank"
601
+ version = "1.0.0"
602
  source = { virtual = "." }
603
  dependencies = [
604
  { name = "datasets" },
605
  ]
606
 
607
+ [package.dev-dependencies]
608
+ dev = [
609
+ { name = "ruff" },
610
+ ]
611
+
612
  [package.metadata]
613
  requires-dist = [{ name = "datasets", specifier = ">=4.1.1" }]
614
 
615
+ [package.metadata.requires-dev]
616
+ dev = [{ name = "ruff", specifier = ">=0.13.1" }]
617
+
618
  [[package]]
619
  name = "xxhash"
620
  version = "3.5.0"