Skip to content

Commit

Permalink
Merge pull request #1 from Cinnamon/misc/config-model-from-file
Browse files Browse the repository at this point in the history
Misc/config model from file
  • Loading branch information
lone17 authored Mar 28, 2024
2 parents d22ae88 + e3498a4 commit b208924
Show file tree
Hide file tree
Showing 18 changed files with 145 additions and 73 deletions.
18 changes: 18 additions & 0 deletions .env
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
# settings for OpenAI
OPENAI_API_BASE=https://api.openai.com/v1
OPENAI_API_KEY=
OPENAI_CHAT_MODEL=gpt-3.5-turbo
OPENAI_EMBEDDINGS_MODEL=text-embedding-ada-002

# settings for Azure OpenAI
AZURE_OPENAI_ENDPOINT=
AZURE_OPENAI_API_KEY=
OPENAI_API_VERSION=2024-02-15-preview
AZURE_OPENAI_CHAT_DEPLOYMENT=gpt-35-turbo
AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT=text-embedding-ada-002

# settings for Cohere
COHERE_API_KEY=

# settings for local models
LOCAL_MODEL=
Binary file removed .env.secret
Binary file not shown.
4 changes: 1 addition & 3 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -332,7 +332,6 @@ celerybeat.pid
*.sage.py

# Environments
.env
.venv
env/
venv/
Expand Down Expand Up @@ -457,7 +456,6 @@ $RECYCLE.BIN/
logs/
.gitsecret/keys/random_seed
!*.secret
.env
.envrc

S.gpg-agent*
Expand All @@ -467,4 +465,4 @@ storage/*

# Conda and env storages
*install_dir/
doc_env
doc_env/
Binary file removed .gitsecret/keys/pubring.kbx
Binary file not shown.
Binary file removed .gitsecret/keys/trustdb.gpg
Binary file not shown.
1 change: 0 additions & 1 deletion .gitsecret/paths/mapping.cfg

This file was deleted.

4 changes: 2 additions & 2 deletions libs/kotaemon/kotaemon/embeddings/langchain_based.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,14 +137,14 @@ def __init__(
azure_endpoint: Optional[str] = None,
deployment: Optional[str] = None,
openai_api_key: Optional[str] = None,
openai_api_version: Optional[str] = None,
api_version: Optional[str] = None,
request_timeout: Optional[float] = None,
**params,
):
super().__init__(
azure_endpoint=azure_endpoint,
deployment=deployment,
openai_api_version=openai_api_version,
api_version=api_version,
openai_api_key=openai_api_key,
request_timeout=request_timeout,
**params,
Expand Down
3 changes: 2 additions & 1 deletion libs/kotaemon/kotaemon/llms/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

from .base import BaseLLM
from .branching import GatedBranchingPipeline, SimpleBranchingPipeline
from .chats import AzureChatOpenAI, ChatLLM, EndpointChatLLM, LlamaCppChat
from .chats import AzureChatOpenAI, ChatLLM, ChatOpenAI, EndpointChatLLM, LlamaCppChat
from .completions import LLM, AzureOpenAI, LlamaCpp, OpenAI
from .cot import ManualSequentialChainOfThought, Thought
from .linear import GatedLinearPipeline, SimpleLinearPipeline
Expand All @@ -17,6 +17,7 @@
"HumanMessage",
"AIMessage",
"SystemMessage",
"ChatOpenAI",
"AzureChatOpenAI",
"LlamaCppChat",
# completion-specific components
Expand Down
3 changes: 2 additions & 1 deletion libs/kotaemon/kotaemon/llms/chats/__init__.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
from .base import ChatLLM
from .endpoint_based import EndpointChatLLM
from .langchain_based import AzureChatOpenAI, LCChatMixin
from .langchain_based import AzureChatOpenAI, ChatOpenAI, LCChatMixin
from .llamacpp import LlamaCppChat

__all__ = [
"ChatLLM",
"EndpointChatLLM",
"ChatOpenAI",
"AzureChatOpenAI",
"LCChatMixin",
"LlamaCppChat",
Expand Down
29 changes: 29 additions & 0 deletions libs/kotaemon/kotaemon/llms/chats/langchain_based.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,36 @@ def specs(self, path: str):
raise ValueError(f"Invalid param {path}")


class ChatOpenAI(LCChatMixin, ChatLLM): # type: ignore
def __init__(
self,
openai_api_base: str | None = None,
openai_api_key: str | None = None,
model: str | None = None,
temperature: float = 0.7,
request_timeout: float | None = None,
**params,
):
super().__init__(
openai_api_base=openai_api_base,
openai_api_key=openai_api_key,
model=model,
temperature=temperature,
request_timeout=request_timeout,
**params,
)

def _get_lc_class(self):
try:
from langchain_openai import ChatOpenAI
except ImportError:
from langchain.chat_models import ChatOpenAI

return ChatOpenAI


class AzureChatOpenAI(LCChatMixin, ChatLLM): # type: ignore

def __init__(
self,
azure_endpoint: str | None = None,
Expand Down
144 changes: 86 additions & 58 deletions libs/ktem/flowsettings.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,70 +31,98 @@
"__type__": "kotaemon.storages.ChromaVectorStore",
"path": str(user_cache_dir / "vectorstore"),
}
KH_LLMS = {
# example for using Azure OpenAI, the config variables can set as environment
# variables or in the .env file
# "gpt4": {
# "def": {
# "__type__": "kotaemon.llms.AzureChatOpenAI",
# "temperature": 0,
# "azure_endpoint": config("AZURE_OPENAI_ENDPOINT", default=""),
# "openai_api_key": config("AZURE_OPENAI_API_KEY", default=""),
# "openai_api_version": config("OPENAI_API_VERSION", default=""),
# "deployment_name": "<your deployment name>",
# "stream": True,
# },
# "accuracy": 10,
# "cost": 10,
# "default": False,
# },
# "gpt35": {
# "def": {
# "__type__": "kotaemon.llms.AzureChatOpenAI",
# "temperature": 0,
# "azure_endpoint": config("AZURE_OPENAI_ENDPOINT", default=""),
# "openai_api_key": config("AZURE_OPENAI_API_KEY", default=""),
# "openai_api_version": config("OPENAI_API_VERSION", default=""),
# "deployment_name": "<your deployment name>",
# "request_timeout": 10,
# "stream": False,
# },
# "accuracy": 5,
# "cost": 5,
# "default": False,
# },
"local": {
KH_LLMS = {}
KH_EMBEDDINGS = {}

# populate options from config
if config("AZURE_OPENAI_API_KEY", default="") and config(
"AZURE_OPENAI_ENDPOINT", default=""
):
if config("AZURE_OPENAI_CHAT_DEPLOYMENT", default=""):
KH_LLMS["azure"] = {
"def": {
"__type__": "kotaemon.llms.AzureChatOpenAI",
"temperature": 0,
"azure_endpoint": config("AZURE_OPENAI_ENDPOINT", default=""),
"openai_api_key": config("AZURE_OPENAI_API_KEY", default=""),
"api_version": config("OPENAI_API_VERSION", default="")
or "2024-02-15-preview",
"deployment_name": config("AZURE_OPENAI_CHAT_DEPLOYMENT", default=""),
"request_timeout": 10,
"stream": False,
},
"default": False,
"accuracy": 5,
"cost": 5,
}
if config("AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT", default=""):
KH_EMBEDDINGS["azure"] = {
"def": {
"__type__": "kotaemon.embeddings.AzureOpenAIEmbeddings",
"azure_endpoint": config("AZURE_OPENAI_ENDPOINT", default=""),
"openai_api_key": config("AZURE_OPENAI_API_KEY", default=""),
"api_version": config("OPENAI_API_VERSION", default="")
or "2024-02-15-preview",
"deployment": config("AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT", default=""),
"request_timeout": 10,
"chunk_size": 16,
},
"default": False,
"accuracy": 5,
"cost": 5,
}

if config("OPENAI_API_KEY", default=""):
KH_LLMS["openai"] = {
"def": {
"__type__": "kotaemon.llms.EndpointChatLLM",
"endpoint_url": "http://localhost:31415/v1/chat/completions",
"__type__": "kotaemon.llms.ChatOpenAI",
"temperature": 0,
"openai_api_base": config("OPENAI_API_BASE", default="")
or "https://api.openai.com/v1",
"openai_api_key": config("OPENAI_API_KEY", default=""),
"model": config("OPENAI_CHAT_MODEL", default="") or "gpt-3.5-turbo",
"request_timeout": 10,
"stream": False,
},
"default": False,
},
}
KH_EMBEDDINGS = {
# example for using Azure OpenAI, the config variables can set as environment
# variables or in the .env file
# "ada": {
# "def": {
# "__type__": "kotaemon.embeddings.AzureOpenAIEmbeddings",
# "model": "text-embedding-ada-002",
# "azure_endpoint": config("AZURE_OPENAI_ENDPOINT", default=""),
# "openai_api_key": config("AZURE_OPENAI_API_KEY", default=""),
# "deployment": "<your deployment name>",
# "chunk_size": 16,
# },
# "accuracy": 5,
# "cost": 5,
# "default": True,
# },
"local": {
}
if len(KH_EMBEDDINGS) < 1:
KH_EMBEDDINGS["openai"] = {
"def": {
"__type__": "kotaemon.embeddings.OpenAIEmbeddings",
"openai_api_base": config("OPENAI_API_BASE", default="")
or "https://api.openai.com/v1",
"openai_api_key": config("OPENAI_API_KEY", default=""),
"model": config(
"OPENAI_EMBEDDINGS_MODEL", default="text-embedding-ada-002"
)
or "text-embedding-ada-002",
"request_timeout": 10,
"chunk_size": 16,
},
"default": False,
}

if config("LOCAL_MODEL", default=""):
KH_LLMS["local"] = {
"def": {
"__type__": "kotaemon.embeddings.EndpointEmbeddings",
"endpoint_url": "http://localhost:31415/v1/embeddings",
"__type__": "kotaemon.llms.EndpointChatLLM",
"endpoint_url": "http://localhost:31415/v1/chat/completions",
},
"default": False,
},
}
"cost": 0,
}
if len(KH_EMBEDDINGS) < 1:
KH_EMBEDDINGS["local"] = {
"def": {
"__type__": "kotaemon.embeddings.EndpointEmbeddings",
"endpoint_url": "http://localhost:31415/v1/embeddings",
},
"default": False,
"cost": 0,
}


KH_REASONINGS = ["ktem.reasoning.simple.FullQAPipeline"]


Expand Down
File renamed without changes.
File renamed without changes.
6 changes: 2 additions & 4 deletions scripts/serve_local.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,7 @@
from inspect import currentframe, getframeinfo
from pathlib import Path

import dotenv

configs = dotenv.dotenv_values(".env")
from decouple import config

system_name = platform.system()

Expand Down Expand Up @@ -53,7 +51,7 @@ def guess_chat_format(local_model_file):


def main():
local_model_file = configs.get("LOCAL_MODEL", "")
local_model_file = config("LOCAL_MODEL", default="")

if not local_model_file:
print("LOCAL_MODEL not set in the `.env` file.")
Expand Down
2 changes: 1 addition & 1 deletion scripts/server_llamacpp_linux.sh
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ activate_environment

# install dependencies
# ver 0.2.56 produces segment error for /embeddings on MacOS
python -m pip install llama-cpp-python[server]!=0.2.56
python -m pip install llama-cpp-python[server]==0.2.55

# start the server with passed params
python -m llama_cpp.server $@
Expand Down
2 changes: 1 addition & 1 deletion scripts/server_llamacpp_macos.sh
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ activate_environment

# install dependencies
# ver 0.2.56 produces segment error for /embeddings on MacOS
python -m pip install llama-cpp-python[server]!=0.2.56
python -m pip install llama-cpp-python[server]==0.2.55

# start the server with passed params
python -m llama_cpp.server $@
Expand Down
2 changes: 1 addition & 1 deletion scripts/server_llamacpp_windows.bat
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ call :activate_environment

@rem install dependencies
@rem ver 0.2.56 produces segment error for /embeddings on MacOS
call python -m pip install llama-cpp-python[server]!=0.2.56
call python -m pip install llama-cpp-python[server]==0.2.55

@REM @rem start the server with passed params
call python -m llama_cpp.server %*
Expand Down

0 comments on commit b208924

Please sign in to comment.