Berom0227 commited on
Commit
8036525
·
verified ·
1 Parent(s): b74db29

Upload scripts/sample_atomic_commites.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. scripts/sample_atomic_commites.py +54 -58
scripts/sample_atomic_commites.py CHANGED
@@ -1,16 +1,17 @@
1
  #!/usr/bin/env python3
2
  """
3
- Script to sample atomic commits from CCS dataset for concern extraction.
4
- Applies atomic sampling strategy with token filtering and SHA deduplication.
5
  """
6
 
7
  import pandas as pd
 
8
  import tiktoken
9
  from typing import Dict, List, Set
10
 
11
  # Processing configuration
12
- CONVENTIONAL_COMMIT_TYPES: List[str] = ["cicd", "refactor", "fix", "test"]
13
- SAMPLES_PER_TYPE: int = 2
14
  TARGET_TOKEN_LIMIT: int = 12288 # 16384 - 4096
15
  ENCODING_MODEL: str = "cl100k_base" # GPT-4 encoding
16
 
@@ -30,68 +31,58 @@ OUTPUT_COLUMNS: List[str] = [
30
  CI_TO_CICD_REPLACEMENT: str = "cicd"
31
 
32
  # File paths
33
- CCS_SOURCE_PATH: str = "data/CCS Dataset Training Data.csv"
34
  SAMPLED_CSV_PATH: str = "data/sampled_ccs_dataset.csv"
 
35
  DIFF_OUTPUT_DIR: str = "data/types"
36
 
37
 
38
  def normalize_dataset(df: pd.DataFrame) -> pd.DataFrame:
39
- """Apply CI to CICD label normalization using pandas vectorized operations."""
40
- # Use pandas replace for vectorized string replacement
41
  df[COLUMN_ANNOTATED_TYPE] = (
42
  df[COLUMN_ANNOTATED_TYPE]
43
  .str.lower()
44
  .str.strip()
45
  .replace("ci", CI_TO_CICD_REPLACEMENT)
46
  )
47
- print("Applied CI -> CICD normalization using pandas replace()")
48
  return df
49
 
50
 
51
  def apply_token_filtering(df: pd.DataFrame) -> pd.DataFrame:
52
- """Apply token-based filtering using GPT-4 tokenizer with pandas operations."""
53
  encoding = tiktoken.get_encoding(ENCODING_MODEL)
54
 
55
- # Create combined text column for token counting using pandas string operations
56
  combined_text = (
57
  df[COLUMN_GIT_DIFF].astype(str)
58
  + " "
59
  + df[COLUMN_MASKED_COMMIT_MESSAGE].astype(str)
60
  )
61
 
62
- # Apply token counting function and create boolean mask using pandas apply()
63
  token_counts = combined_text.apply(lambda x: len(encoding.encode(x)))
64
- token_mask = token_counts <= TARGET_TOKEN_LIMIT
65
-
66
- # Filter using pandas boolean indexing
67
- filtered_df = df[token_mask].copy()
68
 
69
  removed_count = len(df) - len(filtered_df)
70
  if removed_count > 0:
71
- print(
72
- f"Token filtering: removed {removed_count} commits exceeding {TARGET_TOKEN_LIMIT} tokens using pandas boolean indexing"
73
- )
74
 
75
  print(f"Token filtering: kept {len(filtered_df)} commits")
76
  return filtered_df
77
 
78
 
79
  def apply_sha_deduplication(df: pd.DataFrame, excluded_shas: Set[str]) -> pd.DataFrame:
80
- """Apply SHA deduplication using pandas isin() for efficient filtering."""
81
  original_count = len(df)
82
-
83
- # Use pandas isin() for vectorized membership testing
84
  sha_mask = ~df[COLUMN_SHA].astype(str).isin(excluded_shas)
85
  filtered_df = df[sha_mask].copy()
86
 
87
  removed_count = original_count - len(filtered_df)
88
- print(
89
- f"SHA deduplication: removed {removed_count} duplicate commits using pandas isin()"
90
- )
91
  return filtered_df
92
 
93
 
94
- def load_existing_shas(file_path: str) -> Set[str]:
95
  """Load existing SHAs from sampled dataset to exclude duplicates."""
96
  try:
97
  df = pd.read_csv(file_path)
@@ -107,22 +98,17 @@ def load_existing_shas(file_path: str) -> Set[str]:
107
 
108
 
109
  def load_ccs_dataset(file_path: str) -> pd.DataFrame:
110
- """Load CCS dataset CSV file as pandas DataFrame."""
111
  try:
112
  df = pd.read_csv(file_path)
113
  if df.empty:
114
  raise ValueError("Dataset is empty")
115
 
116
- required_columns = set(OUTPUT_COLUMNS)
117
- available_columns = set(df.columns)
118
-
119
- missing_columns = required_columns - available_columns
120
  if missing_columns:
121
  raise ValueError(f"Missing required columns: {missing_columns}")
122
 
123
- print(f"Dataset validation passed: {len(df)} records with required columns")
124
-
125
- print(f"Loaded {len(df)} records from CCS dataset as DataFrame")
126
  return df
127
  except Exception as e:
128
  print(f"Error loading dataset: {e}")
@@ -132,7 +118,7 @@ def load_ccs_dataset(file_path: str) -> pd.DataFrame:
132
  def save_to_csv(
133
  data: List[Dict[str, str]], output_path: str, columns: List[str]
134
  ) -> None:
135
- """Save processed data to CSV file."""
136
  import os
137
 
138
  os.makedirs(os.path.dirname(output_path), exist_ok=True)
@@ -154,17 +140,13 @@ def save_to_csv(
154
  def group_commits_by_type(
155
  df: pd.DataFrame, valid_types: List[str]
156
  ) -> Dict[str, pd.DataFrame]:
157
- """Group commits by their concern type using pandas groupby."""
158
- # Filter valid types using pandas isin() for vectorized filtering
159
  type_mask = df[COLUMN_ANNOTATED_TYPE].isin(valid_types)
160
  valid_df = df[type_mask].copy()
161
 
162
  excluded_count = len(df) - len(valid_df)
163
- print(
164
- f"Type filtering: excluded {excluded_count} records (invalid types) using pandas isin()"
165
- )
166
 
167
- # Use pandas groupby for efficient grouping
168
  commits_by_type = {}
169
  for commit_type, group_df in valid_df.groupby(COLUMN_ANNOTATED_TYPE):
170
  commits_by_type[commit_type] = group_df
@@ -176,13 +158,9 @@ def group_commits_by_type(
176
  def sample_commits_for_type(
177
  df: pd.DataFrame, count: int, output_columns: List[str]
178
  ) -> List[Dict[str, str]]:
179
- """Randomly sample specified number of commits using pandas sample()."""
180
- # Use pandas sample() for efficient random sampling
181
  sampled_df = df.sample(n=count, random_state=None)
182
-
183
- # Convert only the final result to dict list for compatibility
184
- sampled_data = sampled_df[output_columns].to_dict("records")
185
- return sampled_data
186
 
187
 
188
  def extract_diffs(sampled_data: List[Dict[str, str]], output_dir: str) -> None:
@@ -223,6 +201,19 @@ def extract_diffs(sampled_data: List[Dict[str, str]], output_dir: str) -> None:
223
 
224
  print(f"Extracted {len(sampled_data)} diff files to {output_dir}")
225
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
 
227
  def main() -> None:
228
  """
@@ -239,23 +230,28 @@ def main() -> None:
239
 
240
  # Step 1: Load dataset and backup SHAs
241
  print("Step 1: Loading dataset and backup SHAs")
242
- excluded_shas = load_existing_shas(SAMPLED_CSV_PATH)
 
243
  ccs_df = load_ccs_dataset(CCS_SOURCE_PATH)
244
 
245
- # Step 2: Apply CI->CICD normalization
246
- print("\nStep 2: Applying CI->CICD normalization")
 
 
 
 
247
  ccs_df = normalize_dataset(ccs_df)
248
 
249
- # Step 3: Apply token-based filtering
250
- print("\nStep 3: Applying token-based filtering")
251
  ccs_df = apply_token_filtering(ccs_df)
252
 
253
- # Step 4: Apply SHA deduplication
254
- print("\nStep 4: Applying SHA deduplication")
255
- ccs_df = apply_sha_deduplication(ccs_df, excluded_shas)
256
 
257
- # Step 5: Group by type and randomly sample
258
- print("\nStep 5: Grouping by type and random sampling")
259
  commits_by_type = group_commits_by_type(ccs_df, CONVENTIONAL_COMMIT_TYPES)
260
 
261
  all_sampled_data = []
@@ -267,8 +263,8 @@ def main() -> None:
267
 
268
  print(f"Random sampling: generated {len(all_sampled_data)} samples total")
269
 
270
- # Step 6: Save results and extract diffs
271
- print("\nStep 6: Saving results and extracting diffs")
272
  save_to_csv(all_sampled_data, SAMPLED_CSV_PATH, OUTPUT_COLUMNS)
273
  extract_diffs(all_sampled_data, DIFF_OUTPUT_DIR)
274
 
 
1
  #!/usr/bin/env python3
2
  """
3
+ Sample atomic commits from CCS dataset for concern extraction.
4
+ Implements atomic sampling with token limits and SHA deduplication.
5
  """
6
 
7
  import pandas as pd
8
+
9
  import tiktoken
10
  from typing import Dict, List, Set
11
 
12
  # Processing configuration
13
+ CONVENTIONAL_COMMIT_TYPES: List[str] = ["feat", "fix", "refactor", "test", "docs", "build", "cicd"]
14
+ SAMPLES_PER_TYPE: int = 50
15
  TARGET_TOKEN_LIMIT: int = 12288 # 16384 - 4096
16
  ENCODING_MODEL: str = "cl100k_base" # GPT-4 encoding
17
 
 
31
  CI_TO_CICD_REPLACEMENT: str = "cicd"
32
 
33
  # File paths
34
+ CCS_SOURCE_PATH: str = "data/CCS Dataset.csv"
35
  SAMPLED_CSV_PATH: str = "data/sampled_ccs_dataset.csv"
36
+ EXCLUDED_COMMITS_PATH: str = "data/excluded_commits.csv"
37
  DIFF_OUTPUT_DIR: str = "data/types"
38
 
39
 
40
  def normalize_dataset(df: pd.DataFrame) -> pd.DataFrame:
41
+ """Normalize CI labels to CICD for consistent categorization."""
 
42
  df[COLUMN_ANNOTATED_TYPE] = (
43
  df[COLUMN_ANNOTATED_TYPE]
44
  .str.lower()
45
  .str.strip()
46
  .replace("ci", CI_TO_CICD_REPLACEMENT)
47
  )
48
+ print("Applied CI -> CICD normalization")
49
  return df
50
 
51
 
52
  def apply_token_filtering(df: pd.DataFrame) -> pd.DataFrame:
53
+ """Filter commits exceeding token limit to prevent context overflow."""
54
  encoding = tiktoken.get_encoding(ENCODING_MODEL)
55
 
 
56
  combined_text = (
57
  df[COLUMN_GIT_DIFF].astype(str)
58
  + " "
59
  + df[COLUMN_MASKED_COMMIT_MESSAGE].astype(str)
60
  )
61
 
 
62
  token_counts = combined_text.apply(lambda x: len(encoding.encode(x)))
63
+ filtered_df = df[token_counts <= TARGET_TOKEN_LIMIT].copy()
 
 
 
64
 
65
  removed_count = len(df) - len(filtered_df)
66
  if removed_count > 0:
67
+ print(f"Token filtering: removed {removed_count} commits exceeding {TARGET_TOKEN_LIMIT} tokens")
 
 
68
 
69
  print(f"Token filtering: kept {len(filtered_df)} commits")
70
  return filtered_df
71
 
72
 
73
  def apply_sha_deduplication(df: pd.DataFrame, excluded_shas: Set[str]) -> pd.DataFrame:
74
+ """Remove previously sampled commits to avoid training data contamination."""
75
  original_count = len(df)
76
+
 
77
  sha_mask = ~df[COLUMN_SHA].astype(str).isin(excluded_shas)
78
  filtered_df = df[sha_mask].copy()
79
 
80
  removed_count = original_count - len(filtered_df)
81
+ print(f"SHA deduplication: removed {removed_count} duplicate commits")
 
 
82
  return filtered_df
83
 
84
 
85
+ def load_shas(file_path: str) -> Set[str]:
86
  """Load existing SHAs from sampled dataset to exclude duplicates."""
87
  try:
88
  df = pd.read_csv(file_path)
 
98
 
99
 
100
  def load_ccs_dataset(file_path: str) -> pd.DataFrame:
101
+ """Load and validate CCS dataset structure."""
102
  try:
103
  df = pd.read_csv(file_path)
104
  if df.empty:
105
  raise ValueError("Dataset is empty")
106
 
107
+ missing_columns = set(OUTPUT_COLUMNS) - set(df.columns)
 
 
 
108
  if missing_columns:
109
  raise ValueError(f"Missing required columns: {missing_columns}")
110
 
111
+ print(f"Loaded {len(df)} records from CCS dataset")
 
 
112
  return df
113
  except Exception as e:
114
  print(f"Error loading dataset: {e}")
 
118
  def save_to_csv(
119
  data: List[Dict[str, str]], output_path: str, columns: List[str]
120
  ) -> None:
121
+ """Append new samples to existing dataset or create new file."""
122
  import os
123
 
124
  os.makedirs(os.path.dirname(output_path), exist_ok=True)
 
140
  def group_commits_by_type(
141
  df: pd.DataFrame, valid_types: List[str]
142
  ) -> Dict[str, pd.DataFrame]:
143
+ """Group commits by concern type for balanced sampling."""
 
144
  type_mask = df[COLUMN_ANNOTATED_TYPE].isin(valid_types)
145
  valid_df = df[type_mask].copy()
146
 
147
  excluded_count = len(df) - len(valid_df)
148
+ print(f"Type filtering: excluded {excluded_count} records (invalid types)")
 
 
149
 
 
150
  commits_by_type = {}
151
  for commit_type, group_df in valid_df.groupby(COLUMN_ANNOTATED_TYPE):
152
  commits_by_type[commit_type] = group_df
 
158
  def sample_commits_for_type(
159
  df: pd.DataFrame, count: int, output_columns: List[str]
160
  ) -> List[Dict[str, str]]:
161
+ """Sample fixed number of commits per type for balanced dataset."""
 
162
  sampled_df = df.sample(n=count, random_state=None)
163
+ return sampled_df[output_columns].to_dict("records")
 
 
 
164
 
165
 
166
  def extract_diffs(sampled_data: List[Dict[str, str]], output_dir: str) -> None:
 
201
 
202
  print(f"Extracted {len(sampled_data)} diff files to {output_dir}")
203
 
204
+ def remove_excluded_commits(df: pd.DataFrame, excluded_shas: Set[str]) -> pd.DataFrame:
205
+ """Remove manually excluded commits based on quality issues."""
206
+ before_count = len(df)
207
+ print(f"Initial commit count: {before_count}")
208
+
209
+ mask = ~df[COLUMN_SHA].astype(str).isin(excluded_shas)
210
+ excluded_count = before_count - mask.sum()
211
+ print(f"Excluded {excluded_count} commits by SHA")
212
+
213
+ filtered_df = df[mask].copy()
214
+ print(f"Remaining commit count: {len(filtered_df)}")
215
+ return filtered_df
216
+
217
 
218
  def main() -> None:
219
  """
 
230
 
231
  # Step 1: Load dataset and backup SHAs
232
  print("Step 1: Loading dataset and backup SHAs")
233
+ existing_shas = load_shas(SAMPLED_CSV_PATH)
234
+ excluded_shas = load_shas(EXCLUDED_COMMITS_PATH)
235
  ccs_df = load_ccs_dataset(CCS_SOURCE_PATH)
236
 
237
+ # Step 2: Remove excluded commits
238
+ print("\nStep 2: Removing excluded commits")
239
+ ccs_df = remove_excluded_commits(ccs_df, excluded_shas)
240
+
241
+ # Step 3: Apply CI->CICD normalization
242
+ print("\nStep 3: Applying CI->CICD normalization")
243
  ccs_df = normalize_dataset(ccs_df)
244
 
245
+ # Step 4: Apply token-based filtering
246
+ print("\nStep 4: Applying token-based filtering")
247
  ccs_df = apply_token_filtering(ccs_df)
248
 
249
+ # Step 5: Apply SHA deduplication
250
+ print("\nStep 5: Applying SHA deduplication")
251
+ ccs_df = apply_sha_deduplication(ccs_df, existing_shas)
252
 
253
+ # Step 6: Group by type and randomly sample
254
+ print("\nStep 6: Grouping by type and random sampling")
255
  commits_by_type = group_commits_by_type(ccs_df, CONVENTIONAL_COMMIT_TYPES)
256
 
257
  all_sampled_data = []
 
263
 
264
  print(f"Random sampling: generated {len(all_sampled_data)} samples total")
265
 
266
+ # Step 7: Save results and extract diffs
267
+ print("\nStep 7: Saving results and extracting diffs")
268
  save_to_csv(all_sampled_data, SAMPLED_CSV_PATH, OUTPUT_COLUMNS)
269
  extract_diffs(all_sampled_data, DIFF_OUTPUT_DIR)
270