From da86fa463f9ea32aac81b450169e508809235220 Mon Sep 17 00:00:00 2001 From: ian Date: Wed, 27 Mar 2024 18:56:06 +0700 Subject: [PATCH 1/5] rename test dir --- libs/ktem/{khapptests => tests}/__init__.py | 0 libs/ktem/{khapptests => tests}/resources/embedding_openai.json | 0 libs/ktem/{khapptests => tests}/test_qa.py | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename libs/ktem/{khapptests => tests}/__init__.py (100%) rename libs/ktem/{khapptests => tests}/resources/embedding_openai.json (100%) rename libs/ktem/{khapptests => tests}/test_qa.py (100%) diff --git a/libs/ktem/khapptests/__init__.py b/libs/ktem/tests/__init__.py similarity index 100% rename from libs/ktem/khapptests/__init__.py rename to libs/ktem/tests/__init__.py diff --git a/libs/ktem/khapptests/resources/embedding_openai.json b/libs/ktem/tests/resources/embedding_openai.json similarity index 100% rename from libs/ktem/khapptests/resources/embedding_openai.json rename to libs/ktem/tests/resources/embedding_openai.json diff --git a/libs/ktem/khapptests/test_qa.py b/libs/ktem/tests/test_qa.py similarity index 100% rename from libs/ktem/khapptests/test_qa.py rename to libs/ktem/tests/test_qa.py From c6db7f5d01c97538dcc115e712b2b8ab3e41193b Mon Sep 17 00:00:00 2001 From: ian Date: Wed, 27 Mar 2024 18:58:19 +0700 Subject: [PATCH 2/5] pin llama-cpp-python to 0.2.55 due to https://github.com/abetlen/llama-cpp-python/issues/1288 --- scripts/serve_local.py | 6 ++---- scripts/server_llamacpp_linux.sh | 2 +- scripts/server_llamacpp_macos.sh | 2 +- scripts/server_llamacpp_windows.bat | 2 +- 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/scripts/serve_local.py b/scripts/serve_local.py index 61b8f778..f1f4831f 100644 --- a/scripts/serve_local.py +++ b/scripts/serve_local.py @@ -3,9 +3,7 @@ from inspect import currentframe, getframeinfo from pathlib import Path -import dotenv - -configs = dotenv.dotenv_values(".env") +from decouple import config system_name = platform.system() @@ -53,7 +51,7 @@ def guess_chat_format(local_model_file): def main(): - local_model_file = configs.get("LOCAL_MODEL", "") + local_model_file = config("LOCAL_MODEL", default="") if not local_model_file: print("LOCAL_MODEL not set in the `.env` file.") diff --git a/scripts/server_llamacpp_linux.sh b/scripts/server_llamacpp_linux.sh index f72ccde5..a45e670a 100755 --- a/scripts/server_llamacpp_linux.sh +++ b/scripts/server_llamacpp_linux.sh @@ -87,7 +87,7 @@ activate_environment # install dependencies # ver 0.2.56 produces segment error for /embeddings on MacOS -python -m pip install llama-cpp-python[server]!=0.2.56 +python -m pip install llama-cpp-python[server]==0.2.55 # start the server with passed params python -m llama_cpp.server $@ diff --git a/scripts/server_llamacpp_macos.sh b/scripts/server_llamacpp_macos.sh index 4ed9ac2a..13d0784e 100755 --- a/scripts/server_llamacpp_macos.sh +++ b/scripts/server_llamacpp_macos.sh @@ -88,7 +88,7 @@ activate_environment # install dependencies # ver 0.2.56 produces segment error for /embeddings on MacOS -python -m pip install llama-cpp-python[server]!=0.2.56 +python -m pip install llama-cpp-python[server]==0.2.55 # start the server with passed params python -m llama_cpp.server $@ diff --git a/scripts/server_llamacpp_windows.bat b/scripts/server_llamacpp_windows.bat index 48779dbe..97c12924 100644 --- a/scripts/server_llamacpp_windows.bat +++ b/scripts/server_llamacpp_windows.bat @@ -28,7 +28,7 @@ call :activate_environment @rem install dependencies @rem ver 0.2.56 produces segment error for /embeddings on MacOS -call python -m pip install llama-cpp-python[server]!=0.2.56 +call python -m pip install llama-cpp-python[server]==0.2.55 @REM @rem start the server with passed params call python -m llama_cpp.server %* From c1b1371a68216e8fc8c2e1f37a9dec989dd8a562 Mon Sep 17 00:00:00 2001 From: ian Date: Wed, 27 Mar 2024 19:04:48 +0700 Subject: [PATCH 3/5] enable config through .env --- .env | 18 +++ .../kotaemon/embeddings/langchain_based.py | 4 +- libs/kotaemon/kotaemon/llms/__init__.py | 3 +- libs/kotaemon/kotaemon/llms/chats/__init__.py | 3 +- .../kotaemon/llms/chats/langchain_based.py | 29 ++++ libs/ktem/flowsettings.py | 144 +++++++++++------- 6 files changed, 139 insertions(+), 62 deletions(-) create mode 100644 .env diff --git a/.env b/.env new file mode 100644 index 00000000..e033553a --- /dev/null +++ b/.env @@ -0,0 +1,18 @@ +# settings for OpenAI +OPENAI_API_BASE=https://api.openai.com/v1 +OPENAI_API_KEY= +OPENAI_CHAT_MODEL=gpt-3.5-turbo +OPENAI_EMBEDDINGS_MODEL=text-embedding-ada-002 + +# settings for Azure OpenAI +AZURE_OPENAI_ENDPOINT= +AZURE_OPENAI_API_KEY= +OPENAI_API_VERSION=2024-02-15-preview +AZURE_OPENAI_CHAT_DEPLOYMENT=gpt-35-turbo +AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT=text-embedding-ada-002 + +# settings for Cohere +COHERE_API_KEY= + +# settings for local models +LOCAL_MODEL= diff --git a/libs/kotaemon/kotaemon/embeddings/langchain_based.py b/libs/kotaemon/kotaemon/embeddings/langchain_based.py index 9bd0e7b5..14cb2a82 100644 --- a/libs/kotaemon/kotaemon/embeddings/langchain_based.py +++ b/libs/kotaemon/kotaemon/embeddings/langchain_based.py @@ -137,14 +137,14 @@ def __init__( azure_endpoint: Optional[str] = None, deployment: Optional[str] = None, openai_api_key: Optional[str] = None, - openai_api_version: Optional[str] = None, + api_version: Optional[str] = None, request_timeout: Optional[float] = None, **params, ): super().__init__( azure_endpoint=azure_endpoint, deployment=deployment, - openai_api_version=openai_api_version, + api_version=api_version, openai_api_key=openai_api_key, request_timeout=request_timeout, **params, diff --git a/libs/kotaemon/kotaemon/llms/__init__.py b/libs/kotaemon/kotaemon/llms/__init__.py index 4e81d21e..d7547a67 100644 --- a/libs/kotaemon/kotaemon/llms/__init__.py +++ b/libs/kotaemon/kotaemon/llms/__init__.py @@ -2,7 +2,7 @@ from .base import BaseLLM from .branching import GatedBranchingPipeline, SimpleBranchingPipeline -from .chats import AzureChatOpenAI, ChatLLM, EndpointChatLLM, LlamaCppChat +from .chats import AzureChatOpenAI, ChatLLM, ChatOpenAI, EndpointChatLLM, LlamaCppChat from .completions import LLM, AzureOpenAI, LlamaCpp, OpenAI from .cot import ManualSequentialChainOfThought, Thought from .linear import GatedLinearPipeline, SimpleLinearPipeline @@ -17,6 +17,7 @@ "HumanMessage", "AIMessage", "SystemMessage", + "ChatOpenAI", "AzureChatOpenAI", "LlamaCppChat", # completion-specific components diff --git a/libs/kotaemon/kotaemon/llms/chats/__init__.py b/libs/kotaemon/kotaemon/llms/chats/__init__.py index 53d44b2b..5b503176 100644 --- a/libs/kotaemon/kotaemon/llms/chats/__init__.py +++ b/libs/kotaemon/kotaemon/llms/chats/__init__.py @@ -1,11 +1,12 @@ from .base import ChatLLM from .endpoint_based import EndpointChatLLM -from .langchain_based import AzureChatOpenAI, LCChatMixin +from .langchain_based import AzureChatOpenAI, ChatOpenAI, LCChatMixin from .llamacpp import LlamaCppChat __all__ = [ "ChatLLM", "EndpointChatLLM", + "ChatOpenAI", "AzureChatOpenAI", "LCChatMixin", "LlamaCppChat", diff --git a/libs/kotaemon/kotaemon/llms/chats/langchain_based.py b/libs/kotaemon/kotaemon/llms/chats/langchain_based.py index 14064bae..6c87c720 100644 --- a/libs/kotaemon/kotaemon/llms/chats/langchain_based.py +++ b/libs/kotaemon/kotaemon/llms/chats/langchain_based.py @@ -165,7 +165,36 @@ def specs(self, path: str): raise ValueError(f"Invalid param {path}") +class ChatOpenAI(LCChatMixin, ChatLLM): # type: ignore + def __init__( + self, + openai_api_base: str | None = None, + openai_api_key: str | None = None, + model: str | None = None, + temperature: float = 0.7, + request_timeout: float | None = None, + **params, + ): + super().__init__( + openai_api_base=openai_api_base, + openai_api_key=openai_api_key, + model=model, + temperature=temperature, + request_timeout=request_timeout, + **params, + ) + + def _get_lc_class(self): + try: + from langchain_openai import ChatOpenAI + except ImportError: + from langchain.chat_models import ChatOpenAI + + return ChatOpenAI + + class AzureChatOpenAI(LCChatMixin, ChatLLM): # type: ignore + def __init__( self, azure_endpoint: str | None = None, diff --git a/libs/ktem/flowsettings.py b/libs/ktem/flowsettings.py index 52ebf863..a3589fec 100644 --- a/libs/ktem/flowsettings.py +++ b/libs/ktem/flowsettings.py @@ -31,70 +31,98 @@ "__type__": "kotaemon.storages.ChromaVectorStore", "path": str(user_cache_dir / "vectorstore"), } -KH_LLMS = { - # example for using Azure OpenAI, the config variables can set as environment - # variables or in the .env file - # "gpt4": { - # "def": { - # "__type__": "kotaemon.llms.AzureChatOpenAI", - # "temperature": 0, - # "azure_endpoint": config("AZURE_OPENAI_ENDPOINT", default=""), - # "openai_api_key": config("AZURE_OPENAI_API_KEY", default=""), - # "openai_api_version": config("OPENAI_API_VERSION", default=""), - # "deployment_name": "", - # "stream": True, - # }, - # "accuracy": 10, - # "cost": 10, - # "default": False, - # }, - # "gpt35": { - # "def": { - # "__type__": "kotaemon.llms.AzureChatOpenAI", - # "temperature": 0, - # "azure_endpoint": config("AZURE_OPENAI_ENDPOINT", default=""), - # "openai_api_key": config("AZURE_OPENAI_API_KEY", default=""), - # "openai_api_version": config("OPENAI_API_VERSION", default=""), - # "deployment_name": "", - # "request_timeout": 10, - # "stream": False, - # }, - # "accuracy": 5, - # "cost": 5, - # "default": False, - # }, - "local": { +KH_LLMS = {} +KH_EMBEDDINGS = {} + +# populate options from config +if config("AZURE_OPENAI_API_KEY", default="") and config( + "AZURE_OPENAI_ENDPOINT", default="" +): + if config("AZURE_OPENAI_CHAT_DEPLOYMENT", default=""): + KH_LLMS["azure"] = { + "def": { + "__type__": "kotaemon.llms.AzureChatOpenAI", + "temperature": 0, + "azure_endpoint": config("AZURE_OPENAI_ENDPOINT", default=""), + "openai_api_key": config("AZURE_OPENAI_API_KEY", default=""), + "api_version": config("OPENAI_API_VERSION", default="") + or "2024-02-15-preview", + "deployment_name": config("AZURE_OPENAI_CHAT_DEPLOYMENT", default=""), + "request_timeout": 10, + "stream": False, + }, + "default": False, + "accuracy": 5, + "cost": 5, + } + if config("AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT", default=""): + KH_EMBEDDINGS["azure"] = { + "def": { + "__type__": "kotaemon.embeddings.AzureOpenAIEmbeddings", + "azure_endpoint": config("AZURE_OPENAI_ENDPOINT", default=""), + "openai_api_key": config("AZURE_OPENAI_API_KEY", default=""), + "api_version": config("OPENAI_API_VERSION", default="") + or "2024-02-15-preview", + "deployment": config("AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT", default=""), + "request_timeout": 10, + "chunk_size": 16, + }, + "default": False, + "accuracy": 5, + "cost": 5, + } + +if config("OPENAI_API_KEY", default=""): + KH_LLMS["openai"] = { "def": { - "__type__": "kotaemon.llms.EndpointChatLLM", - "endpoint_url": "http://localhost:31415/v1/chat/completions", + "__type__": "kotaemon.llms.ChatOpenAI", + "temperature": 0, + "openai_api_base": config("OPENAI_API_BASE", default="") + or "https://api.openai.com/v1", + "openai_api_key": config("OPENAI_API_KEY", default=""), + "model": config("OPENAI_CHAT_MODEL", default="") or "gpt-3.5-turbo", + "request_timeout": 10, + "stream": False, }, "default": False, - }, -} -KH_EMBEDDINGS = { - # example for using Azure OpenAI, the config variables can set as environment - # variables or in the .env file - # "ada": { - # "def": { - # "__type__": "kotaemon.embeddings.AzureOpenAIEmbeddings", - # "model": "text-embedding-ada-002", - # "azure_endpoint": config("AZURE_OPENAI_ENDPOINT", default=""), - # "openai_api_key": config("AZURE_OPENAI_API_KEY", default=""), - # "deployment": "", - # "chunk_size": 16, - # }, - # "accuracy": 5, - # "cost": 5, - # "default": True, - # }, - "local": { + } + if len(KH_EMBEDDINGS) < 1: + KH_EMBEDDINGS["openai"] = { + "def": { + "__type__": "kotaemon.embeddings.OpenAIEmbeddings", + "openai_api_base": config("OPENAI_API_BASE", default="") + or "https://api.openai.com/v1", + "openai_api_key": config("OPENAI_API_KEY", default=""), + "model": config( + "OPENAI_EMBEDDINGS_MODEL", default="text-embedding-ada-002" + ) + or "text-embedding-ada-002", + "request_timeout": 10, + "chunk_size": 16, + }, + "default": False, + } + +if config("LOCAL_MODEL", default=""): + KH_LLMS["local"] = { "def": { - "__type__": "kotaemon.embeddings.EndpointEmbeddings", - "endpoint_url": "http://localhost:31415/v1/embeddings", + "__type__": "kotaemon.llms.EndpointChatLLM", + "endpoint_url": "http://localhost:31415/v1/chat/completions", }, "default": False, - }, -} + "cost": 0, + } + if len(KH_EMBEDDINGS) < 1: + KH_EMBEDDINGS["local"] = { + "def": { + "__type__": "kotaemon.embeddings.EndpointEmbeddings", + "endpoint_url": "http://localhost:31415/v1/embeddings", + }, + "default": False, + "cost": 0, + } + + KH_REASONINGS = ["ktem.reasoning.simple.FullQAPipeline"] From b6ac35029fc73b12cc819c2631290e188dae8d62 Mon Sep 17 00:00:00 2001 From: ian Date: Thu, 28 Mar 2024 16:04:12 +0700 Subject: [PATCH 4/5] remove git secret --- .env.secret | Bin 2310 -> 0 bytes .gitignore | 4 +--- .gitsecret/keys/pubring.kbx | Bin 10879 -> 0 bytes .gitsecret/keys/trustdb.gpg | Bin 1200 -> 0 bytes .gitsecret/paths/mapping.cfg | 1 - 5 files changed, 1 insertion(+), 4 deletions(-) delete mode 100644 .env.secret delete mode 100644 .gitsecret/keys/pubring.kbx delete mode 100644 .gitsecret/keys/trustdb.gpg delete mode 100644 .gitsecret/paths/mapping.cfg diff --git a/.env.secret b/.env.secret deleted file mode 100644 index d4e172f8d947af0b554d06661c32b4f8e396a954..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2310 zcmV+h3HkPg0gMB9>i0uNB-FA23;rfnU%xy8IYlBnXSjH*HZ9TB+|z^v{rF6zxyoge z_-Gj`vyGI8dc8jLAyhC4@_xg-S*yDF@eQ$>bXdY8NHOfzO-7&=DbR0dZ3E#V5FKY| zc2k_n(aj(B!0hU4@>xJ77)w8G`tny{s13kf(hGO{s!4q0Gk-ldxA>GGOc7&%Pj<1odSzTJ?H?apbGd3 zZ0+41ff2zow4>2|A$xh2?iXDQVQl|Cw{Cda56OFDFiNh=2246ok{D4U2K>9^!`@o} z*5Tguly1@z4<=wzhL3B-wEOSvoMpd$3_{W!tc0t67=o2O(119kAwCsCEN%-;(%a<@ zOj|436xXb^y0A2S52EkEjcny!L_Joo1iq=#_Q$h|Z%B~Tz2O3n6;J`648$lsR- zJmfbv$3wN1+hfuDSpSc-vs|@^cE{Z1MLC7+7HOLrG|JzE3~F0&b|?AKX+Z`*`5%F1 zuAK?CP>=Y`R~y6zlacB`xPJkj-DCZ*_z;B0RE__rTGoXGV&*#2+!fUp63I89J^i)& zd>&i{4Z62tq%g&kiv&XOAA4NR5r^-oW&5BWQ!OJLhMACyPJY#mbUS}d1dqkB%%S6Q zyS|nK+T=PS5)WVOwO!B>BA@-KO@k+$YGd=5xBEHEtLFHah`I#MZjx9@il!dIK0;)rA zL25KFVc}z?+5YXE@JSH>S9M?E1JCRqM!RD?E#%d`x%ypK`2Wy`4dx(Dc>iLzegk8b zK0H6yaV1k8V|o)}5y`tY)W@LpCrtk7%Y_0A1O6%n@6X$#+yM{%AhogAUnGY$IuKBm zKYY`5hy^0wZPN@qJ%tZVUZNo{Kc_x8lbmi`>Z}C!Tty_<9MI_ZdjE$Pwx-Xjr-zrS ziVGR|$YZ~qWy-tvO)S+I5%_vmtL^qAMaGgb0|hZYpdz4^>xQ=k7Z^PhE?3ehNB}5a z*+AxrM^nkW&N*ED!ICFl`L)tMm^UfljbY=5#6|_YBaoiM|G)>}jQZ(0Wm4 zrq7Xq@|oiGPR&0zf>59YwdY`Cm#dU(&4}4VIA%yMJF&1OJ<3qJoX}GvlrKXpF=ZB11r8|0sxz9?sMx{9NFe^ zS{45YMy8w8e{GU9Nv{tE4Hs)39;U)+peHoQX8E_y2V zsH2$zVm~0Mzm#Dn-;j8l0)`SIDDYZn4y0@Et>ts|Q0}AMwkzDS@ap?i2#?OE<+U&` zEl2UUcvuwlaDu45Kw{$Vb%6yD#U~RxeTQRPWh?c6l%I9~=rSm_lF0#aIrj1#t)Ht^ z>9^B?Aqw?7gxVJYoTvrR^E*D&7oZs_|I2jZohDM@e2#ODS8NSXwUPCb zNBj-;_4Xu4^=<)$S`Ah$Zv3g#nBMw)PfJ7lLub0So_M;iSHS z@}i;`i+y|_gtcl|G~(BxJ$J5d*Y?2cJxAdPM?a96A=H8jGs1Kw0UYaj#S}!FZ#!b* z`;BFTG{&cMqBOue(?b(@dKOFCGVCfw8+X5CwmiYMzb%Wqy|cV1b_UwXb#!FZ==9fv zM0PYsj-S8zQzL}G?Urk-m+6W+9w}2}!b5Vk0#JBt;I`Lvhcdy^YMAw$u;3(9*;zO|^&Wlq7=2AeS~*4TLhZPobPp`tl@jesH4VLrhzk`0HokE zTm4^o=Osy!km&mFhFeF1%bZg~(64MLkRCR7Rz$R(uSQE7pLvQdx$Z`JIWTa7^IUUXhCFEGE{>b z)!^@)wXhh_LXnS6L&8HS1YgfESfU=pm3SNb?XGNou$3+ODOQe;r#u4QQ7Np|L?R3`4ZwwOPk z>oq!TBFJEHP%Bta{O2XAESc`!WBg&S%VT9NjC1F%_p%}`jeX>cPuhqyYiOqtm5b83 z0qz{_MXS)^C~oeJ{cH3>G`!k^<^m3%m@emdrlZFWWzo#LoF~Nt)Ko%6xiTyrbwhUz zvDa1!vbYaau!u?H7ja!c4~$h29B!#oe2C9cmM!_lMwUa3SO^pabo1Y-?#v<>4d>aBOQFNC*b3roBfD$a!V@sivS92eER{5?lfaZVCFr+{v2zpSg42 zIUi0{SJj7JtDa~7cI`R<0DuGx3;>iCQa1&>JjTa+u_kc6&aeGH9_VNw*gtyS007hj z004xE~?P2H`_jjypR%HI#l8TS`vV!DwRYbJSoe~R}@=D_}nCsE4uo(G#-ODiV z>)gu&20#D+fJ84noPYcV0wDg;{@d&2UwnEUN(GC8Fm`!Y4u$}zoHG@r?DV)2eHa08 zuhSd`&8|i)2?SBbl*jo*Bg{(VlL*CZi_KSW_4>;3Pz9ORu%dhRw|8mHUvh*wnl~`~ ze456UI4~sygDPT7(lq8Gob*+tz-cu;kBSp`*G8$MLup116W<19Tj)&llM>e2A9&>$ zX-LumJfBG6jTbEtit=al2NkbqxhYR=cr0Fo`@&6yTj~VyTLSE&O_X%Dz2Ck^{)i|{ zB)#P7G8o5`*Mc125C^FvjEY}4u+94I@P0oWg8$@PFrvovE&VWYPdRckrB&+HFDM%X z#*EtcppM*h>ue1uGn?3Eocn3E)0qh3Gzv6eu6mH5)jl7AZRVzr{00h~l4>OH%G6y* zaV#D4M!{(wjywTpzG@_+sjffDfqan&2Fd-Gm_j~QKAqdOkID))${tZlnI$Qej+^9& zsy1B)=~m1PEj^OLr%zXDpT3DTdFqcR%pENe7@F)-0R!OWPmi6%gK6+fo^Zl4(PCjLi0#LyKU^OqhXJ&8qhaLb8IFbgoBx=qiycPzw0!9|LwgxtKwjhfS@BXr*2SUBEqIqxHaRnKibCBR3)ZPhZ(DcS-d1$kz~icb zyS~H*+yP2Y&v~omrp@)^Gk+~PgZs}XEH=|xI&kH>4w-bc@0>|D1a6HRvPK46tdno@ zC{;$?L{;Bwga3$e%s$x_87EdK(Xdb0VKu;-k(Qdns(}g#($WJ=SY%-5@!~3aQN*A&s`i{n+1ngE5m)iJ8S5w`7P$NupZL>Ut2K;C5w_FYMS(^Bxerk_`xL0K` z2Yrrufg4#6;jrlv50^@7MpxU=O;nd#{L=7l`aL_%AxF=CkgB=&0O{tF8do&YubPoL z7G71W!pH)+T*i^-iu)d91I^XQ_w+nlnzYEp%}SmQ279GaZ`o+Rw{oSj_=f?!M)FdM zn+n<1{9F?TG!HP@{HAa2F2S-_B6IWS-W zl+g%Yf*Io%DE-YqG_vg^2_j zsuRtyop(R61r`IK*O?tM);8BPC+rnSdT`hhB>H_4nt0IzX{z_}HIKeVAOOMS(-aIR z*Dl{ofQxH3qAG7Rrg+B|_$^|oKp1(w;nEe4>V*=tt(WH^B$QXKznwX95~krSQCm(E zZr@fe$G=B)zS-@Bud9UhF$doy!%{187Leb6js%?gej#lyw@MoP@f&k(V{vD>Ix`EsbW3D&IP` zNh`9yBd!Z?&VbhBrpKQCrKiS48IZhtS%-XiT94vvf6i0oTf`|vLpFeGO6R#`6Glnb z-sGS*AO9FM$`eY}5zfc#yDmkBu6m~tq1%&0|A6fd0X0P{I^hq zwM-{XGFbWjQVnY<{6_tlpF;FP-?_f?NY}@z%pBV~;wccERkUmOl_IIGT^pbm-H$r- zcQC2KbReGV_+=D6^aR_VZ(Ch1*AJ#*ItB2U%sT@~_L#Wsm3;vK=!AdK7#jFW<0{b)sG1Ci{a#H_ z_>rlr3KkJe5ifC<f72rUv^m(zaxald$7J3$7IBKv{Rf>-rl7;LBe8wp(tDGOm!)KL=uYk zfm+%Qgo_}j^-U`pgRi7|ljV1ND$v9#m3#Ywzs4mr%5mk}m`Mm%*RMstEBJ0?2sGM} zZZqH$2r8Ex^^6Kr(BCzaZu@j(nbZxVjlKzVsC|U~D;g6?*_qpt$eB62nb?x>TE6`F zUzq%g-TwmIm-z8l;(Eb#rrIVXEY!um&};DcExNTt1VFJUhtR<(B!TtLgXcyu0|gDP#8&VIhO9yIZde3|9(Al zat^D5`&$UZBt2dzM3S->gaE4rqrys<_Sc$}G$Xf^x5qM{sVF$N?-mXGQ=+YUi)cj; zT6E>|11?DXYk2T8fdh573d`(?0f<&pR2|h7fn0hbk)x7GbUM)j*m6?-E#o1}@A0i< zB}5-an{C72dP{HUTm_S!Bm*9+@IjxH>A|94EqW-D=WlTLPvs;VBY~VUgfMYy;KoNc zp3Q6BVdFXKjA1pK)Ua@{pjFMe%;2S!VwCHdrtMqv3{iza)Gl@Ua9}=i4SNyGD}tC#=p88xBim96YBKaR$V4z5Fg!}Fn%)TFh%h?+nrS`YXATUg#To6 zGynka@yg_~_#N!%o7U@C(Z)UQc;6g^QDD>yKuKOHipJ_DNn?YASBv0l_e2YMI<=Pn zVO5WcPk03g|3UVrq#|C-OQtBh|kR$kjwAR>g}q;dop3Se?4ANvxe z=}k_^G0(z<`c6$Q1OoHL0@E&7=?qM-J%$>g2Aauy#A0fblFWlRLk{)RU3aTjqFTD2 zgh-#)qbm$pprcqtQX(fG7|vAo{Dnr;xtbNlCzpUCZ*nC2QN_iMoQiR}uc}(cekF>YPHt_=Jk#CL^@5C*V3O*;OEX`X~ zD9vhLB?QC@$>$Mt%FpWkVtcm{SmM!93yvyPUU2e34fWwyo)H`&LmW9EqJ@3rC4x0EcF`%xY^=s!io z%Nfxt0vTDA1@CxAH?Y#TN}P86>li;q%Aj#Ps5cFgV510(-w)+g9LYYpqWO>d)K=_2 zHW>Egw%C7)^fWtD(4R||8VG0>42hjHRLE5{tWJtIy?=v*MKSdCuW|%J6$4{~59Ssm zyiNx8j&>jHOq~SGYz!={K}L2q|9)%-%Do)hVE|k&0qd^>4GA=yR9XI;{NXi<{Z{`< z8uGJd&1T#=28Eg}34@r`_h*D`qkhRL%6T(kGyfajB{rjYmqg)(!O2Mh*tQffq$|P) zR-Cj@mosRo;=zG&IS)Uc0mRQA%~7cAX*J;sceeP;*(X}RT@BmL{LnVS%;24Y^pjw} zr%Uf0-@Lr$>85V}F&05#QM53%;u%V5W-@1#!{=uESPP@JIUVarbS}zt)RJq0ajmwq z61El;I=t*9*rF zo@Sky95Uh#?!?jU^cH9*NybCJ=&GQoM^Gaz!~+ugMVD{L>}1on>Z3J4RmzK(!bLNn zTaBIJl6&g`^OmWYTy7&LD^Wpe4F0V`P(Y~z*r#fmuaWNf?{+;2ma)Kwbl~a)NR3D)n)Uq|LA8kA-Ny6|BS-pc8j9dn&*q9k zy*BT*M`d%b*zT4hpF&O)HEPtJba$)VwL)F3!edVQefs1UiAkL)!QU8*$9)S{ zf_QhHeKDa4@KI#YHf`{}c)}A1AGdSuOH^nuhneMWs_Cxj8eaxTrVy5j<0St4@jHq( z{8@$nkmKJas{exu&Ds7(6>=3IKe6eustKussk`tU4>Le=V)e zF{hfM6n33W86wHFg33_v!_h>ZKh`cXnhaI5m9P)?gj$eWo2X@>1$kbEhrDk)Mn_2n z8QzX(Z^A~F4#?IV=cKD(Tu7gM>7MdSDk7!cLYn*FMBn=*I9}tPmf1~V2xeUVf%@H? z7cfV&T1h*M<%@(;V(WOY&taQ^Z7o!oDY%PcWV4Qf=fSu}+nmoZa-q*mU&V^mu}k*gCydz~8GmCJ4=G~*Y6qD3rz~! zIR{+YCMC6fo$Xt)JdfLdtFZN7q|9BAZh@Naq`k{;)?9aQy~8aT`e18Q#Dy6tHAf%8 z!iDi2<=lXtU5@k9ev2e@z+16}2#!+vV{I+YqKr>|gRw0*l`8IE=a^yKvkUQV*hjbs zHsQQ$~^Zo@>Tmc&I`)TI9;})=IJ(0#wmw@2QPo4aC^iN zFp>JMHGz{xs$Qk36B?zoau@8sJ|Is>r<#PE1z z*`p?z+-`zmf}B9UsnmslWH!EZPZ4g^+3`o8)nNzr>iqv5}NM_P1?~K{Q1_?waj!mtM8(l;&T@x%l*8?;7 zQ^y=X!Bi#`vS4@nw%u6$k)i`RUdm=L0FuAKopQuW{{CmIea=Gxloqkyd?Qr(0iofW zrQ9-Kk}kl&2dlN?G5Aw{;W8xRW)h3bo_o%uQD;0xL&|_>sPeM@r~e3w|FJVQ_g$&q zdpk*f_Cc7it6|tO7Eq6o!`4H3oXPS&VZ)}YU}93(TTbNRbcKdaI%$j*ul$Wbs1LX4ZDnrA;bmOTZCx6kGBI` z=pPbXX?d@B$H!YtPOflsF5&(N`Di}Zq)E}T;XR*-&nzi3tI>}ccExqo(BoN1v3fNm zl%&kKgcA|~!iy4y(Yrt(B8>_W%4}H_Mli!HDe!(^v@NN9xiUFP-)RRWEQvD<1!jD| zYK+%zLVX94=6JqHc5L=(U|YYfS-FqL>#g78X#6hgw8U@8c7tlhL)+zc^Yrs*Y+u9M z7@;-$0bV;?yg@6;ZXLBVBdQRtIx>cMD|x5SoaxtQvDTL(Gi>PEFk=QD3Y0y>uSN%_ zVt1cyg*w4JzL4f%fQfe3jxsxV`YG9N_3qw+KaDCoA4);V6E?u-HXBMikYRns*|#kG zbhgUhUWPD^<0>#&v_2XnF+C7yJY$4f1I?IRZ_hM^CQI2F3KzQ7A%jBO^Ero7CwZcx z2;B=~gfNig>UL5T$Q0DkX!pFQ?Hzwo?Ja6QG#+C30~Y&tKsjph#;-bD4QxW~H&B%+ z=H|MXeM!loWBygJgL-^D93TkVFdBpj85&s>z!4O0!xuT|s=s9JB(Lc^6c;SA7!O$Z zDEF`UrW-7IY=4J=m*L-sgMVG0;l8f^uLZkXRmj)r*Mgn#Ed$L{o?r{lsYIKuyx08xk2FW`Hk7c_{7^;|1zga|Y4^ zWegUn6)}Q?(u}vN0+M(nC-Be@iivI;n`l2vj&qRj1gvzIVEke^ASY8{?V(B3p47Nury&XjdMTcqSFo zP%YgWU!=)aXcgjuYuVCj7)oW(m~q-29kMnpTY+>X$x^d@x&K4W6>1Kl7;GJalcyQx zOYu}B7bC}&nt$bACF8i321ZtHFQvNI=I>IVKi!}Gm3UqXaxa_wKi!}G*4qd(0wCD# z{`{TM{b2=j3Q3uun=^!eG5g?t)g#d>Yn%xR&4Be3JP4S z&9E)cEAAbo(&}N)Ae`(oCx%J#^!~8Uo2;=`YIUUE;;@eSdOad$-t$)B$=Mto)FIJd zr}b^yxN3RTE}v}7RU?zVMH}M?(8yp^uRNpFkZ^Dr@6njk7-3ttaf{eYXTJGAM+urH z**@53SS76l0%z(N+&u4ZjcL`tK&q1TS0Ve+5z-qe_kxNaxJGyg`O4kL;ROvm4kMvU z2rPwJx}Qy~;6tWl3=K}qtJfFwJ zqZx=8!DAOXZx7rJWx-sv>S)&CPQ6ANo6xSUwM?P(yp}YnXA4mg9uw=8;lM+>r&1L$%&C9SVIZq`X;S0)es$ixAZS6WMYdOy~ z4YY3XUFcL3$| zp?vh<29CeSbFVpQVXo+CUMjb}deQIk0rPnc?6R#j6Tx`no#*qr+dld)GK}c6RmsCY zb>?W>r#d3YO&ZekLhho&?&5U9v9lRy@@S2i(M@ZazhR&=sP6WOdLKnVb}pXr(m1-Y zkgyDSP|XBKE2z7}cWYvHffAwSVt2XO4X7DclkpN5DRx6=!!9|e#OveB3dp+Nl~ zRUW~&qNscotTvoB&+5@WUejq}xyVyT&`|`#Kc4Ug80V-~`72VQ6oZ~Fs*g%*k<&!^ z$UhqQLri+4(($NDCu+`Uro%gLF{$>gaqs;ioPSo`?iP`vLY%YbS!4I+> zq8kb~n8~$b-MJ1!*xO>2j*@l>qiHT9PDm_u_s%{H84mJK6Vjt`3GOmp>xYdi|F>56 zk_h~r-Tric_S;q;WZ|FfpV}N?aj`i`Wj2o61}+inLF;?52{+m>>aAUgIW|1}R8lE# zO6UXDudN1S5O?uC4F=N)n!_YHmsJI;`W_kDaNH7TE~uyBxQ`zq-V_KIyWFu~ITszd zP3J{Ac5)PpSmX9HIA(FXe5l^D5Q07;DAI7zZ`85=yt@7AL7(Z;6(d-x`KNzAvSEq$ zWps02udE*28nimvv;C>GtVyoC0c{x{J>~jmD)YU8OXux})-vUQE*y8pn(MAhtheps z*Zu}>horP+kQdEKnAWbCaQ-JDg>rjXv`RId`rQhGkI=B@KNQnMF)3Jk>58Qoq}`Y` z9nR{&=%%UgR@vX6((D{}TNQC?INqGBUzsN_y{j!bBp-XWFK?%ie_vm`>deQ6kh0`@ zRI2`%yH7QoXoTlyBf;vb!rBU9!H&0=fUq7r?@1#S!2$Cr@*!$xhO9*t9}yn3=-qrO zuIOu%JG_#|mSj77(_7dqwSKoMIWVCA$X`rB3mksdj%O2ynBeP|G5?8U{MXh04aX)^B3`f4uQ)cD;(dI>>&Y1sm0FI zx*X_Q1|S{dhK^=|*6MZ8cf?JOxAWl%3nT_o1h?-$e3E0a8Yxj$@~EuXO7814bTGRb zUKzA-E3EjKH|^!$fWPe3b+p-%Kq98udB1#MYYy>WcIyqo7J;nM~;&dUPw*uHYyBJ4X*-#dDE z$E3H&d@r%gS-Z>V*9O?GHQJ|U1m)Q+RHNDmopghE!I-%(S4s@>0B)vQ zx7Vf*^7{}TdPLafi|TUbRtIdZoLcoPPbmyEg`_;AdD0Swa456UG8;fSgy~D8_a7Ot zPjZ`Xu3C*UCmKMjOTxP! zx7*Q6VCkq%pD~ZPLS$u#*+o2n<_=)&5nd`){n|}QHCrdg?M-9%Z10qiE-`rcS}IL& zXX1UJ4^H?E7O8*!jvTK*{-;mp5^cTj=WxGt4GR6^cdQM=CT5W+JDU2UkDD45mo z3tBT%B|I#9kmbD;w^tsVlA6Ux0f`**Q=dH(#;U|7M#m=ud-0EmBy-Un01qI^&LDH% zo^@HfN!a@bDt(0scdp??=?)bjO&ZA@p?~m4EGz*WRZq&dxCW7*X;q!PZ2T`7)`5je%;sdac^Pr@~p9oC$z0&suRY<`2V*xeHJ?J(i-PiuHVUeo()o*&CAnV)Jm+ zpGzm$I=+^UR$psXlG$kvOncx2KKGAjwI1OBIYyGJ16d@IZ;s#2wRLL614~nk8!c6c zw(j-1Meb|{@%d*@TnM*f>4X>Qsz1>rwE3=SOE|Zd#dwyeoHV>B8_BShWcZ{ggWcEHGmT^vJnR2@c;fCWawWi(tsAuyT_C`$(br&|R$ diff --git a/.gitsecret/paths/mapping.cfg b/.gitsecret/paths/mapping.cfg deleted file mode 100644 index ae4cf293..00000000 --- a/.gitsecret/paths/mapping.cfg +++ /dev/null @@ -1 +0,0 @@ -.env:555d804179d7207ad6784a84afb88d2ec44f90ea3b7a061d0e38f9dd53fe7211 From e3498a4958b80a6d207d5e74a1af323b0b8e5b2b Mon Sep 17 00:00:00 2001 From: ian Date: Thu, 28 Mar 2024 16:27:05 +0700 Subject: [PATCH 5/5] rename ktem test dir --- libs/ktem/{tests => ktem_tests}/__init__.py | 0 libs/ktem/{tests => ktem_tests}/resources/embedding_openai.json | 0 libs/ktem/{tests => ktem_tests}/test_qa.py | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename libs/ktem/{tests => ktem_tests}/__init__.py (100%) rename libs/ktem/{tests => ktem_tests}/resources/embedding_openai.json (100%) rename libs/ktem/{tests => ktem_tests}/test_qa.py (100%) diff --git a/libs/ktem/tests/__init__.py b/libs/ktem/ktem_tests/__init__.py similarity index 100% rename from libs/ktem/tests/__init__.py rename to libs/ktem/ktem_tests/__init__.py diff --git a/libs/ktem/tests/resources/embedding_openai.json b/libs/ktem/ktem_tests/resources/embedding_openai.json similarity index 100% rename from libs/ktem/tests/resources/embedding_openai.json rename to libs/ktem/ktem_tests/resources/embedding_openai.json diff --git a/libs/ktem/tests/test_qa.py b/libs/ktem/ktem_tests/test_qa.py similarity index 100% rename from libs/ktem/tests/test_qa.py rename to libs/ktem/ktem_tests/test_qa.py