Skip to content

Commit

Permalink
refactor: remove unused code
Browse files Browse the repository at this point in the history
  • Loading branch information
UniverseFly committed Aug 22, 2023
1 parent 995f4c8 commit 1db6f0b
Show file tree
Hide file tree
Showing 6 changed files with 4 additions and 279 deletions.
5 changes: 2 additions & 3 deletions src/rectify/analyzer.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
import itertools
import pickle
import subprocess
from multiprocessing import Process
from multiprocessing.connection import Connection
from os import PathLike
from pathlib import Path
from typing import Any, Dict, Optional, cast
from typing import Any, Optional, cast

from rectify import utils
from rectify.generation_defs import GenerationContext, Memorization
from rectify.generation_defs import GenerationContext
from rectify.lsp import LSPClient, TextFile, spec
from rectify.model import ModelType

Expand Down
1 change: 0 additions & 1 deletion src/rectify/d4j.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
from unidiff.patch import Line

from rectify import utils
from rectify.utils import chunked

Metadata = Dict[str, list[Dict[str, str]]]

Expand Down
136 changes: 2 additions & 134 deletions src/rectify/repair.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import json
import logging
import os
import pickle
import random
import shutil
import time
Expand All @@ -17,145 +16,14 @@
from . import generation as gen
from . import utils
from .analyzer import JdtLspAnalyzer, Message
from .config import MetaConfig, RepairConfig
from .d4j import D4J1_HUNK_SPECIAL, Bug, Change, Defects4J
from .config import MetaConfig
from .d4j import Bug, Change, Defects4J
from .lsp import TextFile, spec
from .model import CodeT5Large, Incoder, ModelType
from .report import Report
from .results import HunkRepairResult, RepairResult
from .template import generate_templates

DATA_DIR = Path(os.getenv("LSP", ".lsp_data"))
# DIAGNOSTICS = {}
# BUG_IDS = []
SKIP = [
"Cli-8",
"JacksonDatabind-1",
"Jsoup-46",
"Codec-4",
"Jsoup-41",
"Jsoup-77",
"Jsoup-40",
"Csv-11",
"Cli-11",
"Codec-17",
"Cli-28",
"Cli-17",
"JacksonCore-11",
"Jsoup-62",
"JacksonDatabind-17",
"Jsoup-55",
"JacksonDatabind-16",
"Gson-11",
"Jsoup-39",
"Codec-7",
"Compress-1",
"JacksonDatabind-99",
"Jsoup-86",
"Jsoup-43",
"Jsoup-88",
"JacksonXml-5",
"Jsoup-26",
"Cli-25",
"Cli-40",
"Compress-19",
"JacksonCore-25",
"Jsoup-57",
"JacksonCore-5",
]
Done = [
"Jsoup-25",
"Collections-26",
"JacksonDatabind-71",
"Jsoup-15",
"Csv-4",
"JacksonDatabind-70",
"Jsoup-37",
"JacksonDatabind-34",
"Compress-23",
"JacksonDatabind-57",
"Jsoup-45",
"JacksonDatabind-96",
"Jsoup-61",
"Jsoup-47",
"Gson-13",
"Codec-3",
"Codec-2",
"JacksonDatabind-97",
"Jsoup-9",
"Codec-10",
"Jsoup-17",
"JacksonCore-8",
"JacksonDatabind-27",
"JacksonDatabind-37",
"JacksonDatabind-46",
"Jsoup-51",
"Jsoup-32",
"Compress-38",
"JacksonDatabind-107",
"JacksonDatabind-82",
"Jsoup-24",
"Jsoup-34",
]
BUGS_TO_DO = {
"JacksonDatabind-71",
"JacksonDatabind-34",
"JacksonCore-26",
"Jsoup-37",
"JacksonDatabind-70",
"Collections-26",
"Csv-4",
"Jsoup-15",
"Compress-23",
"Jsoup-25",
"Gson-5",
"Jsoup-45",
"Jsoup-61",
"Codec-18",
"JacksonDatabind-96",
"Codec-3",
"Jsoup-9",
"Jsoup-47",
"Codec-2",
"Codec-10",
"Csv-14",
"Jsoup-54",
"JacksonDatabind-97",
"Jsoup-17",
"Jsoup-76",
"JxPath-10",
"JacksonDatabind-57",
"Jsoup-93",
"Gson-13",
"Closure-168",
"Jsoup-34",
"Codec-16",
"Jsoup-24",
"Csv-12",
"JacksonDatabind-37",
"Jsoup-32",
"Compress-38",
"Gson-15",
"JacksonDatabind-27",
"Jsoup-33",
"JacksonDatabind-82",
"JacksonDatabind-46",
"JacksonDatabind-107",
"Csv-1",
"Codec-9",
"Jsoup-51",
"Compress-25",
"JacksonCore-8",
"Jsoup-35",
"Jsoup-2",
} - set(Done)
# print(len(BUGS_TO_DO), BUGS_TO_DO)
# needs_re_gen: dict[str, list[tuple[int, int]]] = json.loads(
# Path("d4j1_multi_hunk_comment.json").read_text()
# )
# needs_re_gen = {
# proj: idx for proj, idx in needs_re_gen.items() if proj not in D4J1_HUNK_SPECIAL
# }

if utils.INCODER:
INCODER_PREFIX_SUFFIX: dict = json.loads(
Expand Down
49 changes: 0 additions & 49 deletions src/rectify/results/repair_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -237,52 +237,3 @@ def dump(self, path: Path):
continue
tagged_result.is_dumpped = True
tagged_result.save_json(hunk_path / f"{idx}.json")


# for result in result_batch.results:
# idx += 1
# if isinstance(result, Unfinished):
# (save_dir / str(Unfinished)).touch(exist_ok=True)
# elif isinstance(result, PrunedHalfway):
# (save_dir / str(PrunedHalfway)).touch(exist_ok=True)
# else:
# assert isinstance(result, SynthesisSuccessful)
# debug_dir = save_dir / "debug"
# debug_dir.mkdir(exist_ok=True, parents=False)
# buggy_file_path = tagged_result.buggy_file_path
# with open(buggy_file_path) as f:
# buggy_file_lines = f.readlines()
# assert isinstance(result, SynthesisSuccessful)
# unified_diff = difflib.unified_diff(
# buggy_file_lines,
# result.patch.content.splitlines(keepends=True),
# fromfile="bug",
# tofile="patch",
# )
# with open(
# save_dir / result.patch.path.with_suffix(".json").name,
# "w",
# ) as f:
# json.dump(
# {
# "path": str(result.patch.path.absolute()),
# "content": result.patch.content,
# "time": avg_time,
# "hunk": hunk_idx,
# "synthesis_config": len(self.repair_configs)
# - 1,
# },
# f,
# indent=2,
# )
# with open(
# debug_dir / buggy_file_path.with_suffix(".hunk").name,
# "w",
# ) as f:
# f.write(result.hunk)
# with open(
# debug_dir / buggy_file_path.with_suffix(".diff").name,
# "w",
# ) as f:
# f.writelines(unified_diff)
# tagged_result.is_dumpped = True
16 changes: 0 additions & 16 deletions src/rectify/results/repair_transformation_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,19 +165,3 @@ def concat_hunks(file_patches: list[AvgFilePatch], delim: str = "") -> str:
for file_patch in file_patches
for hunk_patch in file_patch.hunks
)


# @dataclass(frozen=True)
# class RepairAnalysisResults(JsonSpecificDirectoryDumpable):
# results: list[RepairAnalysisResult]

# @classmethod
# def name(cls):
# return ANALYSIS_FNAME

# def to_json(self) -> Any:
# return [result.to_json() for result in self.results]

# @classmethod
# def from_json(cls, d: list) -> "RepairAnalysisResults":
# return RepairAnalysisResults([RepairAnalysisResult.from_json(r) for r in d])
76 changes: 0 additions & 76 deletions src/rectify/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -452,82 +452,6 @@ def evaluate_generation_summary(self) -> GenerationDatapoint:
GenerationDatapoint.zero(),
)

# def evaluate_validation_first_one_grouped(
# self, get_number: Callable[[ValidationDatapoint], int]
# ) -> list[tuple[str, dict[str, ValidationDatapoint]]]:
# return Defects4J.group_by_project(
# self.evaluate_validation_first_one(get_number)
# )

# def evaluate_validation_first_one(
# self, get_number: Callable[[ValidationDatapoint], int]
# ) -> dict[str, ValidationDatapoint]:
# assert self.report.validation_result is not None

# def reduce_fn(
# lhs: ValidationDatapoint, rhs: ValidationDatapoint
# ) -> ValidationDatapoint:
# if get_number(lhs) == 1:
# return lhs
# return lhs + rhs

# return {
# bug_id: functools.reduce(
# reduce_fn,
# map(map_to_validation_datapoint, patches),
# ValidationDatapoint.zero(),
# )
# for bug_id, patches in self.get_validation_items()
# }

# def evaluate(
# self,
# patch_infos: "Callable[[Runner], Iterable[tuple[str, Iterable[PatchInfo]]]]",
# map_to_datapoint: Callable[[PatchInfo], Datapoint],
# add_reduce: Callable[[EvaluationResult, Datapoint], EvaluationResult],
# initial_result: EvaluationResult,
# ) -> dict[str, EvaluationResult]:
# # assert self.report.repair_result is not None
# # self.transform_with_message()
# # transformed_result = self.report.transformed_result
# # assert transformed_result is not None
# # transformed_result_dict = transformed_result.result_dict
# result: dict[str, EvaluationResult] = {}
# for bug_id, patches in patch_infos(self):
# mapped_generation_datapoints = map(map_to_datapoint, patches)
# bug_id_result = functools.reduce(
# add_reduce, mapped_generation_datapoints, initial_result
# )
# result[bug_id] = bug_id_result
# return result

# def evaluate_validation(self) -> dict[str, list[ValidationDatapoint]]:
# generation_result = self.evaluate_generation()
# assert self.report.repair_result is not None
# validation_result = self.report.validation_result
# assert validation_result is not None
# validation_result_dict = validation_result.result_dict
# result: dict[str, list[ValidationResult]] = {}
# for bug_id, patches in list(validation_result_dict.items()):
# generation_datapoints = generation_result[bug_id]
# mapped_validation_datapoints = map(
# lambda patch: ValidationDatapoint(
# n_parse_success=,
# n_comp_success=,
# n_test_success=,
# total_time_consumed=
# gen_datapoint=
# ),
# generation_datapoints,
# )
# datapoints = functools.reduce(
# lambda points, point: points + [points[-1] + point],
# mapped_validation_datapoints,
# [GenerationDatapoint.zero()],
# )
# result[bug_id] = datapoints
# return result


def validate_patch(
d4j: Defects4J, bug_id: str, bugs: list[TextFile], patch: AvgPatch
Expand Down

0 comments on commit 1db6f0b

Please sign in to comment.