From 0c98a44257fd8303ac9f68c4cca7013d80cb3af2 Mon Sep 17 00:00:00 2001 From: Zhao Shenyang Date: Wed, 28 Aug 2024 18:07:55 +0800 Subject: [PATCH] init commit --- .gitignore | 161 ++++++++++++++++++++++++++ llama3.1-8b-instruct/.bentoignore | 5 + llama3.1-8b-instruct/bentofile.yaml | 14 +++ llama3.1-8b-instruct/requirements.txt | 4 + llama3.1-8b-instruct/service.py | 77 ++++++++++++ 5 files changed, 261 insertions(+) create mode 100644 .gitignore create mode 100644 llama3.1-8b-instruct/.bentoignore create mode 100644 llama3.1-8b-instruct/bentofile.yaml create mode 100644 llama3.1-8b-instruct/requirements.txt create mode 100644 llama3.1-8b-instruct/service.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..7751b09 --- /dev/null +++ b/.gitignore @@ -0,0 +1,161 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ +*.whl diff --git a/llama3.1-8b-instruct/.bentoignore b/llama3.1-8b-instruct/.bentoignore new file mode 100644 index 0000000..d9cf115 --- /dev/null +++ b/llama3.1-8b-instruct/.bentoignore @@ -0,0 +1,5 @@ +__pycache__/ +*.py[cod] +*$py.class +.ipynb_checkpoints +venv/ diff --git a/llama3.1-8b-instruct/bentofile.yaml b/llama3.1-8b-instruct/bentofile.yaml new file mode 100644 index 0000000..84b1a70 --- /dev/null +++ b/llama3.1-8b-instruct/bentofile.yaml @@ -0,0 +1,14 @@ +service: 'service:DeepSpeed' +labels: + owner: bentoml-team + stage: demo +include: + - '*.py' + - 'bentovllm_openai/*.py' +python: + requirements_txt: './requirements.txt' + lock_packages: false +envs: + - name: HF_TOKEN +docker: + python_version: 3.11 diff --git a/llama3.1-8b-instruct/requirements.txt b/llama3.1-8b-instruct/requirements.txt new file mode 100644 index 0000000..c590ddc --- /dev/null +++ b/llama3.1-8b-instruct/requirements.txt @@ -0,0 +1,4 @@ +accelerate==0.33.0 +bentoml>=1.3.2 +deepspeed==0.15.0 +deepspeed-mii==0.3.0 diff --git a/llama3.1-8b-instruct/service.py b/llama3.1-8b-instruct/service.py new file mode 100644 index 0000000..5c9f722 --- /dev/null +++ b/llama3.1-8b-instruct/service.py @@ -0,0 +1,77 @@ +import uuid +from typing import AsyncGenerator, Optional + +import bentoml +from annotated_types import Ge, Le +from typing_extensions import Annotated + +# import nest_asyncio +# nest_asyncio.apply() + + +MAX_TOKENS = 1024 +SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. + +If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""" + +PROMPT_TEMPLATE = """<|begin_of_text|><|start_header_id|>system<|end_header_id|> + +{system_prompt}<|eot_id|><|start_header_id|>user<|end_header_id|> + +{user_prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|> + +""" + +MODEL_ID = "meta-llama/Meta-Llama-3.1-8B-Instruct" + +@bentoml.service( + name="deepspeed-llama3.1-8b-instruct-service", + traffic={ + "timeout": 300, + "concurrency": 256, # Matches the default max_num_seqs in the VLLM engine + }, + resources={ + "gpu": 1, + "gpu_type": "nvidia-l4", + }, +) +class DeepSpeed: + + def __init__(self) -> None: + import mii + from transformers import AutoTokenizer + + import asyncio + self.event_loop = asyncio.get_event_loop() + self.server = mii.serve(MODEL_ID) + self.client = mii.client(MODEL_ID) + + tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) + self.stop_token_ids = [ + tokenizer.eos_token_id, + tokenizer.convert_tokens_to_ids("<|eot_id|>"), + ] + + @bentoml.api + async def generate( + self, + prompt: str = "Explain superconductors in plain English", + system_prompt: Optional[str] = SYSTEM_PROMPT, + max_tokens: Annotated[int, Ge(128), Le(MAX_TOKENS)] = MAX_TOKENS, + ) -> AsyncGenerator[str, None]: + + if system_prompt is None: + system_prompt = SYSTEM_PROMPT + prompt = PROMPT_TEMPLATE.format(user_prompt=prompt, system_prompt=system_prompt) + + # stream still WIP + stream = self.client._request_async_response_stream([prompt], max_length=max_tokens) + async for resp in stream: + yield resp[0].generated_text + + + @bentoml.on_shutdown + def shutdown(self): + print("shutting down!") + self.client.terminate_server() + print("shutdown finished!")