Skip to content

Commit

Permalink
Fix CI
Browse files Browse the repository at this point in the history
Replaces: #186

Use ubuntu 22.04, maybe we can move to 24.04 also in another PR.

Signed-off-by: Eric Curtin <ecurtin@redhat.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
  • Loading branch information
ericcurtin authored and rhatdan committed Sep 25, 2024
1 parent 209dc6c commit 4c82750
Show file tree
Hide file tree
Showing 11 changed files with 53 additions and 50 deletions.
9 changes: 4 additions & 5 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,15 @@ name: ci
on: [push, pull_request]
jobs:
linux:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- name: install bats
shell: bash
run: |
sudo apt update
sudo apt -y install bats
apt-get bash
- name: Run a one-line script
sudo apt-get update
sudo apt-get install bats bash
- name: run test
run: make test

macos:
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ ci:
test/ci.sh

.PHONY: test
test: validate bats ci
test: validate bats ci codespell
make clean
hack/tree_status.sh

Expand Down
6 changes: 3 additions & 3 deletions docs/ramalama-serve.1.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,12 @@ The default is TRUE. The --nocontainer option forces this option to False.

Use the `ramalama stop` command to stop the container running the served ramalama Model.

#### **--generate**=quadlet
Generate specified configuration format for running the AI Model as a service

#### **--help**, **-h**
show this help message and exit

#### **--generate** ['quadlet']
Generate specified configuration format for running the AI Model as a service

#### **--name**, **-n**
Name of the container to run the Model in.

Expand Down
9 changes: 7 additions & 2 deletions install.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,12 @@ def check_platform():
print("This script is intended to run as non-root on macOS")
return 1
if not available("brew"):
print("Please install brew and add the directory containing brew to the PATH before continuing install on macOS")
print(
"""
RamaLama requires brew to complete installation. Install brew and add the
directory containing brew to the PATH before continuing to install RamaLama
"""
)
return 2
elif sys.platform == "linux":
if os.geteuid() != 0:
Expand All @@ -71,7 +76,7 @@ def check_platform():


def install_mac_dependencies():
subprocess.run(["pip3", "install", "huggingface_hub[cli]==0.25.1"], check=True)
subprocess.run(["pip3", "install", "huggingface_hub[cli]"], check=True)
subprocess.run(["pip3", "install", "omlmd==0.1.4"], check=True)
subprocess.run(["brew", "install", "llama.cpp"], check=True)

Expand Down
13 changes: 10 additions & 3 deletions ramalama/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,11 @@ def serve_parser(subparsers):
"-n", "--name", dest="name", default=_name(), help="name of container in which the Model will be run"
)
parser.add_argument("-p", "--port", default="8080", help="port for AI Model server to listen on")
parser.add_argument("--generate", choices=["quadlet"], help="generate spectified configuration format for running the AI Model as a service")
parser.add_argument(
"--generate",
choices=["quadlet"],
help="generate specified configuration format for running the AI Model as a service",
)
parser.add_argument("MODEL") # positional argument
parser.set_defaults(func=serve_cli)

Expand Down Expand Up @@ -434,7 +438,7 @@ def get_store():


def run_container(args):
if hasattr(args, "generate") and args.generate != "":
if hasattr(args, "generate") and args.generate:
return False

if args.nocontainer:
Expand Down Expand Up @@ -462,7 +466,7 @@ def run_container(args):
conman,
"run",
"--rm",
"-it",
"-i",
"--label",
"RAMALAMA container",
"--security-opt=label=disable",
Expand All @@ -475,6 +479,9 @@ def run_container(args):
f"-v{wd}:/usr/share/ramalama/ramalama:ro",
]

if sys.stdout.isatty():
conman_args += ["-t"]

if hasattr(args, "detach") and args.detach is True:
conman_args += ["-d"]

Expand Down
2 changes: 1 addition & 1 deletion ramalama/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def exec_cmd(args, stderr=True):

if not stderr:
# Redirecting stderr to /dev/null
with open(os.devnull, 'w') as devnull:
with open(os.devnull, "w") as devnull:
os.dup2(devnull.fileno(), sys.stderr.fileno())

try:
Expand Down
32 changes: 18 additions & 14 deletions ramalama/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,17 +110,17 @@ def serve(self, args):

exec_cmd(exec_args)


def quadlet(self, args, exec_args):
port_string=""
port_string = ""
if hasattr(args, "port"):
port_string=f"PublishPort={args.port}"
port_string = f"PublishPort={args.port}"

name_string=""
name_string = ""
if hasattr(args, "name") and args.name != "":
name_string=f"Name={args.name}"
name_string = f"Name={args.name}"

print("""
print(
"""
[Unit]
Description=RamaLama %s AI Model Service
After=local-fs.target
Expand All @@ -141,11 +141,15 @@ def quadlet(self, args, exec_args):
[Install]
# Start by default on boot
WantedBy=multi-user.target default.target
""" % (args.UNRESOLVED_MODEL,
self.type,
" ".join(exec_args),
default_image(),
name_string,
find_working_directory(),
sys.argv[0],
port_string))
"""
% (
args.UNRESOLVED_MODEL,
self.type,
" ".join(exec_args),
default_image(),
name_string,
find_working_directory(),
sys.argv[0],
port_string,
)
)
1 change: 1 addition & 0 deletions test/system/010-list.bats
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ load helpers
@test "ramalama list - basic output" {
headings="NAME *MODIFIED *SIZE"

run_ramalama pull ollama://tinyllama
run_ramalama list
is "${lines[0]}" "$headings" "header line"

Expand Down
2 changes: 1 addition & 1 deletion test/system/030-run.bats
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ load helpers
model=m_$(safename)
image=m_$(safename)

verify_begin="podman run --rm -it --label \"RAMALAMA container\" --security-opt=label=disable -e RAMALAMA_TRANSPORT --name"
verify_begin="podman run --rm -i --label \"RAMALAMA container\" --security-opt=label=disable -e RAMALAMA_TRANSPORT --name"

run_ramalama --dryrun run ${model}
is "$output" "${verify_begin} ramalama_.*" "dryrun correct"
Expand Down
8 changes: 6 additions & 2 deletions test/system/040-serve.bats
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

load helpers

verify_begin="podman run --rm -it --label \"RAMALAMA container\" --security-opt=label=disable -e RAMALAMA_TRANSPORT --name"
verify_begin="podman run --rm -i --label \"RAMALAMA container\" --security-opt=label=disable -e RAMALAMA_TRANSPORT --name"

@test "ramalama --dryrun serve basic output" {
model=m_$(safename)
Expand Down Expand Up @@ -39,6 +39,7 @@ verify_begin="podman run --rm -it --label \"RAMALAMA container\" --security-opt=

run_ramalama serve --name ${container1} --detach ${model}
cid="$output"
run podman wait --condition=running $cid

run_ramalama ps
is "$output" ".*${container1}" "list correct"
Expand All @@ -49,6 +50,7 @@ verify_begin="podman run --rm -it --label \"RAMALAMA container\" --security-opt=

run_ramalama serve --name ${container2} -d ${model}
cid="$output"
run podman wait --condition=running $cid
run_ramalama containers -n
is "$output" ".*${cid:0:10}" "list correct with cid"
run_ramalama ps --noheading
Expand All @@ -64,9 +66,11 @@ verify_begin="podman run --rm -it --label \"RAMALAMA container\" --security-opt=

run_ramalama serve --detach ${model}
cid="$output"
run podman wait --condition=running $cid

run_ramalama serve -p 8081 --detach ${model}
cid="$output"
run podman wait --condition=running $cid

run_ramalama containers --noheading
is ${#lines[@]} 2 "two containers should be running"
Expand Down Expand Up @@ -94,7 +98,7 @@ verify_begin="podman run --rm -it --label \"RAMALAMA container\" --security-opt=
@test "ramalama serve --generate=quadlet" {
model=tiny
name=c_$(safename)

run_ramalama pull ${model}
run_ramalama serve --name=${name} --port 1234 --generate=quadlet ${model}
is "$output" ".*PublishPort=1234" "PublishPort should match"
is "$output" ".*Name=${name}" "Quadlet should have name field"
Expand Down
19 changes: 1 addition & 18 deletions test/system/helpers.bash
Original file line number Diff line number Diff line change
Expand Up @@ -252,8 +252,6 @@ function clean_setup() {
"rm -t 0 --all --force --ignore"
)
for action in "${actions[@]}"; do
#FIXME _run_ramalama_quiet $action

# The -f commands should never exit nonzero, but if they do we want
# to know about it.
# FIXME: someday: also test for [[ -n "$output" ]] - can't do this
Expand All @@ -275,14 +273,6 @@ function clean_setup() {
fi
done

# Clean up all models except those desired.
# 2023-06-26 REMINDER: it is tempting to think that this is clunky,
# wouldn't it be safer/cleaner to just 'rm -a' then '_prefetch $IMAGE'?
# Yes, but it's also tremendously slower: 29m for a CI run, to 39m.
# Image loads are slow.
found_needed_image=
_run_ramalama_quiet list

for line in "${lines[@]}"; do
set $line
if [[ "$1" == "$RAMALAMA_TEST_IMAGE_FQN" ]]; then
Expand Down Expand Up @@ -826,14 +816,7 @@ function random_string() {
# String is lower-case so it can be used as an image name
#
function safename() {
# FIXME: I don't think these can ever fail. Remove checks once I'm sure.
test -n "$BATS_SUITE_TMPDIR"
test -n "$BATS_SUITE_TEST_NUMBER"
safenamepath=$BATS_SUITE_TMPDIR/.safename.$BATS_SUITE_TEST_NUMBER
if [[ ! -e $safenamepath ]]; then
echo -n "t${BATS_SUITE_TEST_NUMBER}-$(random_string 8 | tr A-Z a-z)" >$safenamepath
fi
cat $safenamepath
echo -n "$(random_string 8 | tr A-Z a-z)"
}

#########################
Expand Down

0 comments on commit 4c82750

Please sign in to comment.