formatted code

This commit is contained in:
Karma Riuk
2025-03-26 13:05:05 +01:00
parent fa3b7f82a1
commit 0d8b81054d
2 changed files with 135 additions and 41 deletions

View File

@ -2,11 +2,14 @@ from dataclasses import dataclass, field
from typing import Dict, List from typing import Dict, List
import json import json
# fmt: off
@dataclass @dataclass
class FileData: class FileData:
path: str path: str
content: str = "" # Not sure about this, maybe we should just keep the path and extract the contents dynamically (boh) content: str = "" # Not sure about this, maybe we should just keep the path and extract the contents dynamically (boh)
@dataclass @dataclass
class Metadata: class Metadata:
repo: str # the name of the repo, with style XXX/YYY repo: str # the name of the repo, with style XXX/YYY
@ -19,6 +22,7 @@ class Metadata:
reason_for_failure: str = "" reason_for_failure: str = ""
last_cmd_error_msg: str = "" last_cmd_error_msg: str = ""
@dataclass @dataclass
class DatasetEntry: class DatasetEntry:
metadata: Metadata metadata: Metadata
@ -27,6 +31,8 @@ class DatasetEntry:
comments: List[str] comments: List[str]
diffs_after: Dict[str, str] # filename -> diff, changes after the comment diffs_after: Dict[str, str] # filename -> diff, changes after the comment
# fmt: on
@dataclass @dataclass
class Dataset: class Dataset:
entries: List[DatasetEntry] = field(default_factory=list) entries: List[DatasetEntry] = field(default_factory=list)
@ -48,19 +54,20 @@ class Dataset:
for entry_data in data["entries"]: for entry_data in data["entries"]:
metadata_data = entry_data["metadata"] metadata_data = entry_data["metadata"]
metadata = Metadata(**metadata_data) metadata = Metadata(**metadata_data)
if not keep_still_in_progress and metadata.reason_for_failure == "Was still being processed": if (
not keep_still_in_progress
and metadata.reason_for_failure == "Was still being processed"
):
continue continue
files = { files = {fname: FileData(**fdata) for fname, fdata in entry_data["files"].items()}
fname: FileData(**fdata) for fname, fdata in entry_data["files"].items()
}
entry = DatasetEntry( entry = DatasetEntry(
metadata=metadata, metadata=metadata,
files=files, files=files,
diffs_before=entry_data["diffs_before"], diffs_before=entry_data["diffs_before"],
comments=entry_data["comments"], comments=entry_data["comments"],
diffs_after=entry_data["diffs_after"] diffs_after=entry_data["diffs_after"],
) )
entries.append(entry) entries.append(entry)

View File

@ -27,13 +27,17 @@ def get_good_projects(csv_file: str) -> pd.DataFrame:
df = pd.read_csv(csv_file) df = pd.read_csv(csv_file)
return df.loc[(df['good_repo_for_crab'] == True) & (df['n_tests'] > 0)] return df.loc[(df['good_repo_for_crab'] == True) & (df['n_tests'] > 0)]
def is_pull_good(pull: PullRequest, verbose: bool = False): def is_pull_good(pull: PullRequest, verbose: bool = False):
return ( return (
has_only_1_comment(pull.get_commits(), pull.get_review_comments(), verbose=verbose) has_only_1_comment(pull.get_commits(), pull.get_review_comments(), verbose=verbose)
and pull.user.type != "Bot" and pull.user.type != "Bot"
) )
def get_good_prs(repo: Repository, cache: dict[str, dict[int, DatasetEntry]] = {}) -> list[PullRequest]:
def get_good_prs(
repo: Repository, cache: dict[str, dict[int, DatasetEntry]] = {}
) -> list[PullRequest]:
good_prs = [] good_prs = []
potenially_good_prs = repo.get_pulls(state="closed") potenially_good_prs = repo.get_pulls(state="closed")
@ -42,7 +46,11 @@ def get_good_prs(repo: Repository, cache: dict[str, dict[int, DatasetEntry]] = {
if number_of_prs == 0: if number_of_prs == 0:
return [] return []
with tqdm(total=number_of_prs, desc=f"Extracting good PRs from {repo.full_name}", leave=False) as pbar: with tqdm(
total=number_of_prs,
desc=f"Extracting good PRs from {repo.full_name}",
leave=False,
) as pbar:
for pr in potenially_good_prs: for pr in potenially_good_prs:
pbar.set_postfix({"new good found": len(good_prs), "pr_number": pr.number}) pbar.set_postfix({"new good found": len(good_prs), "pr_number": pr.number})
if pr.merged_at is None or pr.number in cache.get(repo.full_name, set()): if pr.merged_at is None or pr.number in cache.get(repo.full_name, set()):
@ -54,8 +62,15 @@ def get_good_prs(repo: Repository, cache: dict[str, dict[int, DatasetEntry]] = {
return good_prs return good_prs
def run_git_cmd(cmd: list[str], repo_path: str) -> subprocess.CompletedProcess: def run_git_cmd(cmd: list[str], repo_path: str) -> subprocess.CompletedProcess:
return subprocess.run(["git", "-C", repo_path] + cmd, check=True, capture_output=True, text=True) return subprocess.run(
["git", "-C", repo_path] + cmd,
check=True,
capture_output=True,
text=True,
)
def ensure_full_history(repo_path: str) -> None: def ensure_full_history(repo_path: str) -> None:
result = run_git_cmd(["rev-parse", "--is-shallow-repository"], repo_path) result = run_git_cmd(["rev-parse", "--is-shallow-repository"], repo_path)
@ -63,11 +78,19 @@ def ensure_full_history(repo_path: str) -> None:
if result.stdout.strip() == "true": if result.stdout.strip() == "true":
run_git_cmd(["fetch", "--unshallow"], repo_path) run_git_cmd(["fetch", "--unshallow"], repo_path)
def reset_repo_to_latest_commit(repo_path: str) -> None: def reset_repo_to_latest_commit(repo_path: str) -> None:
current_branch = run_git_cmd(["rev-parse", "--abbrev-ref", "HEAD"], repo_path).stdout.strip() current_branch = run_git_cmd(["rev-parse", "--abbrev-ref", "HEAD"], repo_path).stdout.strip()
run_git_cmd(["reset", "--hard", current_branch], repo_path) run_git_cmd(["reset", "--hard", current_branch], repo_path)
def process_pull(repo: Repository, pr: PullRequest, dataset: Dataset, repos_dir: str, cache: dict[str, dict[int, DatasetEntry]] = {}):
def process_pull(
repo: Repository,
pr: PullRequest,
dataset: Dataset,
repos_dir: str,
cache: dict[str, dict[int, DatasetEntry]] = {},
):
if pr.number in cache.get(repo.full_name, set()): if pr.number in cache.get(repo.full_name, set()):
dataset.entries.append(cache[repo.full_name][pr.number]) dataset.entries.append(cache[repo.full_name][pr.number])
return return
@ -80,7 +103,9 @@ def process_pull(repo: Repository, pr: PullRequest, dataset: Dataset, repos_dir:
last_commit = commits[-1] last_commit = commits[-1]
try: try:
diffs_before = {file.filename: file.patch for file in repo.compare(pr.base.sha, first_commit.sha).files} diffs_before = {
file.filename: file.patch for file in repo.compare(pr.base.sha, first_commit.sha).files
}
except GithubException as e: except GithubException as e:
return return
@ -91,12 +116,21 @@ def process_pull(repo: Repository, pr: PullRequest, dataset: Dataset, repos_dir:
commented_file_path = comment.path commented_file_path = comment.path
try: try:
diffs_after = {file.filename: file.patch for file in repo.compare(first_commit.sha, last_commit.sha).files} diffs_after = {
file.filename: file.patch
for file in repo.compare(first_commit.sha, last_commit.sha).files
}
except GithubException as e: except GithubException as e:
return return
entry = DatasetEntry( entry = DatasetEntry(
metadata=Metadata(repo.full_name, pr.number, pr.merge_commit_sha, {comment_text: commented_file_path}, reason_for_failure="Was still being processed"), metadata=Metadata(
repo.full_name,
pr.number,
pr.merge_commit_sha,
{comment_text: commented_file_path},
reason_for_failure="Was still being processed",
),
files={file.filename: FileData(file.filename) for file in pr.get_files()}, files={file.filename: FileData(file.filename) for file in pr.get_files()},
diffs_before=diffs_before, diffs_before=diffs_before,
comments=[comment_text], comments=[comment_text],
@ -126,16 +160,25 @@ def process_pull(repo: Repository, pr: PullRequest, dataset: Dataset, repos_dir:
# raise e # raise e
return entry.metadata.successful return entry.metadata.successful
if not _try_cmd(lambda: ensure_full_history(repo_path), "Couldn't ensure the full history of the repo (fetch --unshallow)"): if not _try_cmd(
lambda: ensure_full_history(repo_path),
"Couldn't ensure the full history of the repo (fetch --unshallow)",
):
return return
try: try:
run_git_cmd(["checkout", pr.merge_commit_sha], repo_path) run_git_cmd(["checkout", pr.merge_commit_sha], repo_path)
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
if not _try_cmd(lambda: run_git_cmd(["fetch", "origin", f"pull/{pr.number}/merge"], repo_path), "Couldn't fetch the PR's merge commit"): if not _try_cmd(
lambda: run_git_cmd(["fetch", "origin", f"pull/{pr.number}/merge"], repo_path),
"Couldn't fetch the PR's merge commit",
):
return return
if not _try_cmd(lambda: run_git_cmd(["checkout", pr.merge_commit_sha], repo_path), "Coudln't checkout the PR's merge commit (even after fetching the pull/<pr_number>/merge)"): if not _try_cmd(
lambda: run_git_cmd(["checkout", pr.merge_commit_sha], repo_path),
"Coudln't checkout the PR's merge commit (even after fetching the pull/<pr_number>/merge)",
):
return return
build_handler = get_build_handler(repos_dir, repo.full_name, updates) build_handler = get_build_handler(repos_dir, repo.full_name, updates)
@ -162,7 +205,12 @@ def process_pull(repo: Repository, pr: PullRequest, dataset: Dataset, repos_dir:
with build_handler, tqdm(total=len(steps), desc="Processing PR", leave=False) as pbar: with build_handler, tqdm(total=len(steps), desc="Processing PR", leave=False) as pbar:
try: try:
for message, action in steps: for message, action in steps:
pbar.set_postfix({"doing": message, "started at": datetime.now().strftime("%d/%m, %H:%M:%S")}) pbar.set_postfix(
{
"doing": message,
"started at": datetime.now().strftime("%d/%m, %H:%M:%S"),
}
)
action() action()
pbar.update(1) pbar.update(1)
except HandlerException as e: except HandlerException as e:
@ -177,7 +225,13 @@ def process_pull(repo: Repository, pr: PullRequest, dataset: Dataset, repos_dir:
entry.metadata.reason_for_failure = "" # was set to 'still processing', since it's done being processed and was successful, there are no reasons for failure entry.metadata.reason_for_failure = "" # was set to 'still processing', since it's done being processed and was successful, there are no reasons for failure
dataset.to_json(args.output) dataset.to_json(args.output)
def process_repo(repo_name: str, dataset: Dataset, repos_dir: str, cache: dict[str, dict[int, DatasetEntry]] = {}):
def process_repo(
repo_name: str,
dataset: Dataset,
repos_dir: str,
cache: dict[str, dict[int, DatasetEntry]] = {},
):
repo = g.get_repo(repo_name) repo = g.get_repo(repo_name)
if repo.full_name in cache: if repo.full_name in cache:
dataset.entries.extend(cache[repo.full_name].values()) dataset.entries.extend(cache[repo.full_name].values())
@ -191,7 +245,13 @@ def process_repo(repo_name: str, dataset: Dataset, repos_dir: str, cache: dict[s
pbar.set_postfix({"pr": pr.number}) pbar.set_postfix({"pr": pr.number})
process_pull(repo, pr, dataset, repos_dir, cache) process_pull(repo, pr, dataset, repos_dir, cache)
def process_repos(df: pd.DataFrame, dataset: Dataset, repos_dir: str, cache: dict[str, dict[int, DatasetEntry]] = {}):
def process_repos(
df: pd.DataFrame,
dataset: Dataset,
repos_dir: str,
cache: dict[str, dict[int, DatasetEntry]] = {},
):
""" """
Processes the repos in the given csv file, extracting the good ones and Processes the repos in the given csv file, extracting the good ones and
creating the "triplets" for the dataset. creating the "triplets" for the dataset.
@ -206,23 +266,50 @@ def process_repos(df: pd.DataFrame, dataset: Dataset, repos_dir: str, cache: dic
for _, row in df.iterrows(): for _, row in df.iterrows():
repo_name = row["name"] repo_name = row["name"]
assert isinstance(repo_name, str) assert isinstance(repo_name, str)
pbar.set_postfix({ pbar.set_postfix(
{
"repo": repo_name, "repo": repo_name,
"started at": datetime.now().strftime("%d/%m, %H:%M:%S"), "started at": datetime.now().strftime("%d/%m, %H:%M:%S"),
"# triplets": f"{len(dataset)}/{len(dataset.entries)} ({len(dataset)/len(dataset.entries) if len(dataset.entries) > 0 else 0:.2%})" "# triplets": f"{len(dataset)}/{len(dataset.entries)} ({len(dataset)/len(dataset.entries) if len(dataset.entries) > 0 else 0:.2%})",
}) }
)
process_repo(repo_name, dataset, repos_dir, cache) process_repo(repo_name, dataset, repos_dir, cache)
pbar.update(1) pbar.update(1)
if __name__ == "__main__": if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Creates the triplets for the CRAB dataset.') parser = argparse.ArgumentParser(description='Creates the triplets for the CRAB dataset.')
parser.add_argument('csv_file', type=str, help='The csv file containing the projects (the results from clone_repos.py).') parser.add_argument(
parser.add_argument('-o', '--output', type=str, default="./dataset.json", help='The file in which the dataset will be contained. Default is "./dataset.json"') 'csv_file',
parser.add_argument('-r', '--repos', type=str, default="./results/", help='The directory in which the repos were cloned (will be cloned if they aren\'t there already). Default: "./results/"') type=str,
parser.add_argument('-c', '--cache', type=str, help="The name of the output file from another run of this script. This is for when the script unexpectedly got interrupted and you want to resume from where you left off.") help='The csv file containing the projects (the results from clone_repos.py).',
)
parser.add_argument(
'-o',
'--output',
type=str,
default="./dataset.json",
help='The file in which the dataset will be contained. Default is "./dataset.json"',
)
parser.add_argument(
'-r',
'--repos',
type=str,
default="./results/",
help='The directory in which the repos were cloned (will be cloned if they aren\'t there already). Default: "./results/"',
)
parser.add_argument(
'-c',
'--cache',
type=str,
help="The name of the output file from another run of this script. This is for when the script unexpectedly got interrupted and you want to resume from where you left off.",
)
# parser.add_argument('-v', '--verbose', action='store_true', help='Prints the number of good projects.') # parser.add_argument('-v', '--verbose', action='store_true', help='Prints the number of good projects.')
parser.add_argument("--only-repo", type=str, help="If this argument is not provided, all the repos in the '--repos' csv will be processed. If instead you want to run the script on a single repo (for testing purposes mainly) provide a string of form 'XXX/YYY' to this argument, where XXX is the owner of the repo and YYY is the name of the repo") parser.add_argument(
"--only-repo",
type=str,
help="If this argument is not provided, all the repos in the '--repos' csv will be processed. If instead you want to run the script on a single repo (for testing purposes mainly) provide a string of form 'XXX/YYY' to this argument, where XXX is the owner of the repo and YYY is the name of the repo",
)
args = parser.parse_args() args = parser.parse_args()
g = Github(os.environ["GITHUB_AUTH_TOKEN_CRAB"]) g = Github(os.environ["GITHUB_AUTH_TOKEN_CRAB"])