Berom0227 commited on
Commit
f8534bb
·
verified ·
1 Parent(s): 73839cf

Upload scripts/sample_atomic_commites.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. scripts/sample_atomic_commites.py +290 -0
scripts/sample_atomic_commites.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Script to sample atomic commits from CCS dataset for concern extraction.
4
+ Applies atomic sampling strategy with token filtering and SHA deduplication.
5
+ """
6
+
7
+ import pandas as pd
8
+ import tiktoken
9
+ from typing import Dict, List, Set
10
+
11
+ # Processing configuration
12
+ CONVENTIONAL_COMMIT_TYPES: List[str] = ["cicd", "refactor", "fix", "test"]
13
+ SAMPLES_PER_TYPE: int = 2
14
+ TARGET_TOKEN_LIMIT: int = 12288 # 16384 - 4096
15
+ ENCODING_MODEL: str = "cl100k_base" # GPT-4 encoding
16
+
17
+ # Column name constants
18
+ COLUMN_SHA: str = "sha"
19
+ COLUMN_ANNOTATED_TYPE: str = "annotated_type"
20
+ COLUMN_GIT_DIFF: str = "git_diff"
21
+ COLUMN_MASKED_COMMIT_MESSAGE: str = "masked_commit_message"
22
+ OUTPUT_COLUMNS: List[str] = [
23
+ COLUMN_ANNOTATED_TYPE,
24
+ COLUMN_MASKED_COMMIT_MESSAGE,
25
+ COLUMN_GIT_DIFF,
26
+ COLUMN_SHA,
27
+ ]
28
+
29
+ # Data transformation constants
30
+ CI_TO_CICD_REPLACEMENT: str = "cicd"
31
+
32
+ # File paths
33
+ CCS_SOURCE_PATH: str = "data/CCS Dataset Training Data.csv"
34
+ SAMPLED_CSV_PATH: str = "data/sampled_ccs_dataset.csv"
35
+ DIFF_OUTPUT_DIR: str = "data/types"
36
+
37
+
38
+ def normalize_dataset(df: pd.DataFrame) -> pd.DataFrame:
39
+ """Apply CI to CICD label normalization using pandas vectorized operations."""
40
+ # Use pandas replace for vectorized string replacement
41
+ df[COLUMN_ANNOTATED_TYPE] = (
42
+ df[COLUMN_ANNOTATED_TYPE]
43
+ .str.lower()
44
+ .str.strip()
45
+ .replace("ci", CI_TO_CICD_REPLACEMENT)
46
+ )
47
+ print("Applied CI -> CICD normalization using pandas replace()")
48
+ return df
49
+
50
+
51
+ def apply_token_filtering(df: pd.DataFrame) -> pd.DataFrame:
52
+ """Apply token-based filtering using GPT-4 tokenizer with pandas operations."""
53
+ encoding = tiktoken.get_encoding(ENCODING_MODEL)
54
+
55
+ # Create combined text column for token counting using pandas string operations
56
+ combined_text = (
57
+ df[COLUMN_GIT_DIFF].astype(str)
58
+ + " "
59
+ + df[COLUMN_MASKED_COMMIT_MESSAGE].astype(str)
60
+ )
61
+
62
+ # Apply token counting function and create boolean mask using pandas apply()
63
+ token_counts = combined_text.apply(lambda x: len(encoding.encode(x)))
64
+ token_mask = token_counts <= TARGET_TOKEN_LIMIT
65
+
66
+ # Filter using pandas boolean indexing
67
+ filtered_df = df[token_mask].copy()
68
+
69
+ removed_count = len(df) - len(filtered_df)
70
+ if removed_count > 0:
71
+ print(
72
+ f"Token filtering: removed {removed_count} commits exceeding {TARGET_TOKEN_LIMIT} tokens using pandas boolean indexing"
73
+ )
74
+
75
+ print(f"Token filtering: kept {len(filtered_df)} commits")
76
+ return filtered_df
77
+
78
+
79
+ def apply_sha_deduplication(df: pd.DataFrame, excluded_shas: Set[str]) -> pd.DataFrame:
80
+ """Apply SHA deduplication using pandas isin() for efficient filtering."""
81
+ original_count = len(df)
82
+
83
+ # Use pandas isin() for vectorized membership testing
84
+ sha_mask = ~df[COLUMN_SHA].astype(str).isin(excluded_shas)
85
+ filtered_df = df[sha_mask].copy()
86
+
87
+ removed_count = original_count - len(filtered_df)
88
+ print(
89
+ f"SHA deduplication: removed {removed_count} duplicate commits using pandas isin()"
90
+ )
91
+ return filtered_df
92
+
93
+
94
+ def load_existing_shas(file_path: str) -> Set[str]:
95
+ """Load existing SHAs from sampled dataset to exclude duplicates."""
96
+ try:
97
+ df = pd.read_csv(file_path)
98
+ sha_set = set(df[COLUMN_SHA].astype(str))
99
+ print(f"Loaded {len(sha_set)} SHAs for deduplication")
100
+ return sha_set
101
+ except FileNotFoundError:
102
+ print(f"No existing samples found at {file_path}")
103
+ return set()
104
+ except Exception as e:
105
+ print(f"Error loading existing SHAs: {e}")
106
+ return set()
107
+
108
+
109
+ def load_ccs_dataset(file_path: str) -> pd.DataFrame:
110
+ """Load CCS dataset CSV file as pandas DataFrame."""
111
+ try:
112
+ df = pd.read_csv(file_path)
113
+ if df.empty:
114
+ raise ValueError("Dataset is empty")
115
+
116
+ required_columns = set(OUTPUT_COLUMNS)
117
+ available_columns = set(df.columns)
118
+
119
+ missing_columns = required_columns - available_columns
120
+ if missing_columns:
121
+ raise ValueError(f"Missing required columns: {missing_columns}")
122
+
123
+ print(f"Dataset validation passed: {len(df)} records with required columns")
124
+
125
+ print(f"Loaded {len(df)} records from CCS dataset as DataFrame")
126
+ return df
127
+ except Exception as e:
128
+ print(f"Error loading dataset: {e}")
129
+ raise
130
+
131
+
132
+ def save_to_csv(
133
+ data: List[Dict[str, str]], output_path: str, columns: List[str]
134
+ ) -> None:
135
+ """Save processed data to CSV file."""
136
+ import os
137
+
138
+ os.makedirs(os.path.dirname(output_path), exist_ok=True)
139
+
140
+ if data:
141
+ df = pd.DataFrame(data, columns=columns)
142
+ file_exists = os.path.exists(output_path)
143
+
144
+ df.to_csv(
145
+ output_path,
146
+ mode="a" if file_exists else "w",
147
+ header=not file_exists,
148
+ index=False,
149
+ )
150
+
151
+ print(f"Saved {len(data)} records to {output_path}")
152
+
153
+
154
+ def group_commits_by_type(
155
+ df: pd.DataFrame, valid_types: List[str]
156
+ ) -> Dict[str, pd.DataFrame]:
157
+ """Group commits by their concern type using pandas groupby."""
158
+ # Filter valid types using pandas isin() for vectorized filtering
159
+ type_mask = df[COLUMN_ANNOTATED_TYPE].isin(valid_types)
160
+ valid_df = df[type_mask].copy()
161
+
162
+ excluded_count = len(df) - len(valid_df)
163
+ print(
164
+ f"Type filtering: excluded {excluded_count} records (invalid types) using pandas isin()"
165
+ )
166
+
167
+ # Use pandas groupby for efficient grouping
168
+ commits_by_type = {}
169
+ for commit_type, group_df in valid_df.groupby(COLUMN_ANNOTATED_TYPE):
170
+ commits_by_type[commit_type] = group_df
171
+ print(f" {commit_type}: {len(group_df)} commits")
172
+
173
+ return commits_by_type
174
+
175
+
176
+ def sample_commits_for_type(
177
+ df: pd.DataFrame, count: int, output_columns: List[str]
178
+ ) -> List[Dict[str, str]]:
179
+ """Randomly sample specified number of commits using pandas sample()."""
180
+ # Use pandas sample() for efficient random sampling
181
+ sampled_df = df.sample(n=count, random_state=None)
182
+
183
+ # Convert only the final result to dict list for compatibility
184
+ sampled_data = sampled_df[output_columns].to_dict("records")
185
+ return sampled_data
186
+
187
+
188
+ def extract_diffs(sampled_data: List[Dict[str, str]], output_dir: str) -> None:
189
+ """Extract git diff files organized by type into separate directories."""
190
+ import os
191
+
192
+ type_counts = {}
193
+
194
+ for record in sampled_data:
195
+ commit_type = record[COLUMN_ANNOTATED_TYPE]
196
+
197
+ # Create type directory if needed
198
+ type_dir = os.path.join(output_dir, commit_type)
199
+ os.makedirs(type_dir, exist_ok=True)
200
+
201
+ # Count entries for this type
202
+ if commit_type not in type_counts:
203
+ type_counts[commit_type] = 0
204
+ type_counts[commit_type] += 1
205
+
206
+ # Generate filename
207
+ filename = f"{commit_type}_{type_counts[commit_type]}_{record[COLUMN_SHA]}.diff"
208
+ filepath = os.path.join(type_dir, filename)
209
+
210
+ # Create file content with metadata
211
+ content_lines = [
212
+ f"# Type: {commit_type}",
213
+ f"# Commit Message: {record[COLUMN_MASKED_COMMIT_MESSAGE]}",
214
+ f"# SHA: {record[COLUMN_SHA]}",
215
+ "",
216
+ "# === Git Diff Content ===",
217
+ "",
218
+ record[COLUMN_GIT_DIFF],
219
+ ]
220
+
221
+ with open(filepath, "w") as f:
222
+ f.write("\n".join(content_lines))
223
+
224
+ print(f"Extracted {len(sampled_data)} diff files to {output_dir}")
225
+
226
+
227
+ def main() -> None:
228
+ """
229
+ Main function implementing atomic sampling strategy:
230
+ 1. Load dataset and backup SHAs
231
+ 2. Apply CI->CICD normalization
232
+ 3. Apply token-based filtering
233
+ 4. Apply SHA deduplication
234
+ 5. Group by type and randomly sample
235
+ 6. Save results and extract diffs
236
+ """
237
+ print("Starting atomic sampling strategy for CCS dataset")
238
+ print("=" * 50)
239
+
240
+ # Step 1: Load dataset and backup SHAs
241
+ print("Step 1: Loading dataset and backup SHAs")
242
+ excluded_shas = load_existing_shas(SAMPLED_CSV_PATH)
243
+ ccs_df = load_ccs_dataset(CCS_SOURCE_PATH)
244
+
245
+ # Step 2: Apply CI->CICD normalization
246
+ print("\nStep 2: Applying CI->CICD normalization")
247
+ ccs_df = normalize_dataset(ccs_df)
248
+
249
+ # Step 3: Apply token-based filtering
250
+ print("\nStep 3: Applying token-based filtering")
251
+ ccs_df = apply_token_filtering(ccs_df)
252
+
253
+ # Step 4: Apply SHA deduplication
254
+ print("\nStep 4: Applying SHA deduplication")
255
+ ccs_df = apply_sha_deduplication(ccs_df, excluded_shas)
256
+
257
+ # Step 5: Group by type and randomly sample
258
+ print("\nStep 5: Grouping by type and random sampling")
259
+ commits_by_type = group_commits_by_type(ccs_df, CONVENTIONAL_COMMIT_TYPES)
260
+
261
+ all_sampled_data = []
262
+ for commits_df in commits_by_type.values():
263
+ sampled_data = sample_commits_for_type(
264
+ commits_df, SAMPLES_PER_TYPE, OUTPUT_COLUMNS
265
+ )
266
+ all_sampled_data.extend(sampled_data)
267
+
268
+ print(f"Random sampling: generated {len(all_sampled_data)} samples total")
269
+
270
+ # Step 6: Save results and extract diffs
271
+ print("\nStep 6: Saving results and extracting diffs")
272
+ save_to_csv(all_sampled_data, SAMPLED_CSV_PATH, OUTPUT_COLUMNS)
273
+ extract_diffs(all_sampled_data, DIFF_OUTPUT_DIR)
274
+
275
+ # Final summary
276
+ print("\n" + "=" * 50)
277
+ print("Atomic sampling completed successfully!")
278
+
279
+ type_counts = {}
280
+ for record in all_sampled_data:
281
+ commit_type = record.get(COLUMN_ANNOTATED_TYPE, "")
282
+ type_counts[commit_type] = type_counts.get(commit_type, 0) + 1
283
+
284
+ print("Final sample distribution:")
285
+ for commit_type in sorted(type_counts.keys()):
286
+ print(f" {commit_type}: {type_counts[commit_type]} samples")
287
+
288
+
289
+ if __name__ == "__main__":
290
+ main()