Skip to content

Commit

Permalink
Handle -p -e and -m in the r2ai.py
Browse files Browse the repository at this point in the history
  • Loading branch information
radare committed Sep 16, 2024
1 parent f12dd71 commit 768c7ff
Show file tree
Hide file tree
Showing 4 changed files with 44 additions and 11 deletions.
33 changes: 32 additions & 1 deletion main.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,46 @@ def main():
parser.add_argument("bin", nargs="?", type=str)
parser.add_argument("-w", "--webserver", action="store_true",
help="Start the r2ai webserver. Same as r2ai -c=-w")
parser.add_argument("-p", "--port", type=str, nargs="?", const="default",
help="Change listen port number")
parser.add_argument("-e", "--eval", type=str, nargs="?", const="default",
help="Change configuration variable")
parser.add_argument("-m", "--model", type=str, nargs="?", const="default",
help="Select model name")
parser.add_argument("-c", "--command", action="append",
help="Command to be executed. Can be passed multiple times.")
args = parser.parse_args()
runrepl = True
if args.webserver:
if args.command is None:
args.command = []
args.command.append("-w")
if args.eval:
if args.command is None:
args.command = []
if args.eval == "default":
args.command.append("-e")
runrepl = False
else:
args.command.append(f"-e {args.eval}")
if args.port:
if args.command is None:
args.command = []
if args.port == "default":
print("8080")
return
else:
args.command.append(f"-e http.port={args.port}")
if args.model:
if args.command is None:
args.command = []
if args.model == "default":
args.command.append("-mm")
runrepl = False
else:
args.command.append(f"-m {args.model}")

r2ai_main(args, args.command)
r2ai_main(args, args.command, runrepl)

if __name__ == "__main__":
main()
15 changes: 8 additions & 7 deletions r2ai-server/r2ai-server
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,13 @@ main() {
llamafile --server --nobrowser -c 0 --port ${PORT} -m "${MODELPATH}"
;;
llamacpp)
llama-server -c 0 --port ${PORT} -m "${MODELPATH}"
llama-server -c 0 --port "${PORT}" -m "${MODELPATH}"
;;
koboldcpp)
koboldcpp -c 0 --port ${PORT} -m "${MODELPATH}"
koboldcpp -c 0 --port "${PORT}" -m "${MODELPATH}"
;;
r2ai)
${R2AI} -c="-e http.port=${PORT}" -m "${MODEL}" -w
${R2AI} --port "${PORT}" -m "${MODEL}" -w
;;
*)
echo "Invalid llama server selected."
Expand All @@ -55,6 +55,7 @@ models() {
}

llamas() {
# TODO check for those bins in path before suggesting them
echo r2ai
echo llamafile
echo llamacpp
Expand All @@ -65,16 +66,16 @@ while : ; do
case "$1" in
-l)
if [ -z "$2" ]; then
llamas
break
llamas
break
fi
LLAMA="$2"
shift
;;
-m)
if [ -z "$2" ]; then
models
break
models
break
fi
MODEL="$2"
shift
Expand Down
5 changes: 3 additions & 2 deletions r2ai/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def run_rcfile_once():
RCFILE_LOADED = True


def main(args, commands):
def main(args, commands, dorepl=True):
global within_r2

os.environ["TOKENIZERS_PARALLELISM"] = "false"
Expand Down Expand Up @@ -120,7 +120,8 @@ def main(args, commands):
runline(ai, "-" + c[1:])
else:
runline(ai, c)
r2ai_repl(ai)
if dorepl:
r2ai_repl(ai)
# elif HAVE_RLANG and HAVE_R2PIPE:
# r2ai_repl(ai)
# os.exit(0)
Expand Down
2 changes: 1 addition & 1 deletion r2ai/repl.py
Original file line number Diff line number Diff line change
Expand Up @@ -471,7 +471,7 @@ def runline(ai, usertext):
else:
print("{0:.0f}".format(LOGGER.level / 10))
elif usertext.startswith("-"):
print("Unknown flag. See 'r2ai -h' for help", file=sys.stderr)
print(f"Unknown flag '{usertext}'. See 'r2ai -h' for help", file=sys.stderr)
else:
ai.chat(usertext)

Expand Down

0 comments on commit 768c7ff

Please sign in to comment.