Skip to content

Commit

Permalink
Merge pull request #1157 from ATheorell/main
Browse files Browse the repository at this point in the history
Small fixes to cli interface of gpte and bench applications
  • Loading branch information
ATheorell authored May 22, 2024
2 parents e23e02f + 15b559e commit 6698656
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 8 deletions.
8 changes: 5 additions & 3 deletions gpt_engineer/applications/cli/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@
import typer

from dotenv import load_dotenv
from langchain.cache import SQLiteCache
from langchain.globals import set_llm_cache
from langchain_community.cache import SQLiteCache
from termcolor import colored

from gpt_engineer.applications.cli.cli_agent import CliAgent
Expand All @@ -60,7 +60,9 @@
from gpt_engineer.core.prompt import Prompt
from gpt_engineer.tools.custom_steps import clarified_gen, lite_gen, self_heal

app = typer.Typer() # creates a CLI app
app = typer.Typer(
context_settings={"help_option_names": ["-h", "--help"]}
) # creates a CLI app


def load_env_if_needed():
Expand Down Expand Up @@ -247,7 +249,7 @@ def prompt_yesno() -> bool:
)
def main(
project_path: str = typer.Argument(".", help="path"),
model: str = typer.Argument("gpt-4o", help="model id string"),
model: str = typer.Option("gpt-4o", "--model", "-m", help="model id string"),
temperature: float = typer.Option(
0.1,
"--temperature",
Expand Down
25 changes: 20 additions & 5 deletions gpt_engineer/benchmark/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,20 +21,23 @@
"""
import importlib
import os.path
import sys

from typing import Annotated, Optional

import typer

from langchain.cache import SQLiteCache
from langchain.globals import set_llm_cache
from langchain_community.cache import SQLiteCache

from gpt_engineer.applications.cli.main import load_env_if_needed
from gpt_engineer.benchmark.bench_config import BenchConfig
from gpt_engineer.benchmark.benchmarks.load import get_benchmark
from gpt_engineer.benchmark.run import export_yaml_results, print_results, run

app = typer.Typer() # creates a CLI app
app = typer.Typer(
context_settings={"help_option_names": ["-h", "--help"]}
) # creates a CLI app


def get_agent(path):
Expand All @@ -52,6 +55,7 @@ def get_agent(path):
An instance of the imported default configuration agent.
"""
# Dynamically import the python module at path
sys.path.append(os.path.dirname(path))
agent_module = importlib.import_module(path.replace("/", ".").replace(".py", ""))
return agent_module.default_config_agent()

Expand Down Expand Up @@ -79,8 +83,16 @@ def main(
typer.Option(help="print results for each task", show_default=False),
] = None,
verbose: Annotated[
bool, typer.Option(help="print results for each task", show_default=False)
Optional[bool],
typer.Option(help="print results for each task", show_default=False),
] = False,
use_cache: Annotated[
Optional[bool],
typer.Option(
help="Speeds up computations and saves tokens when running the same prompt multiple times by caching the LLM response.",
show_default=False,
),
] = True,
):
"""
The main function that runs the specified benchmarks with the given agent and outputs the results to the console.
Expand All @@ -93,13 +105,16 @@ def main(
Configuration file for choosing which benchmark problems to run. See default config for more details.
yaml_output: Optional[str], default=None
Pass a path to a yaml file to have results written to file.
verbose : bool, default=False
verbose : Optional[bool], default=False
A flag to indicate whether to print results for each task.
use_cache : Optional[bool], default=True
Speeds up computations and saves tokens when running the same prompt multiple times by caching the LLM response.
Returns
-------
None
"""
set_llm_cache(SQLiteCache(database_path=".langchain.db"))
if use_cache:
set_llm_cache(SQLiteCache(database_path=".langchain.db"))
load_env_if_needed()
config = BenchConfig.from_toml(bench_config)
print("using config file: " + bench_config)
Expand Down

0 comments on commit 6698656

Please sign in to comment.