Skip to content

Commit

Permalink
Brings back old functionality
Browse files Browse the repository at this point in the history
  • Loading branch information
kgrofelnik committed Jun 26, 2024
1 parent aae1448 commit 017cc50
Show file tree
Hide file tree
Showing 6 changed files with 95 additions and 50 deletions.
81 changes: 49 additions & 32 deletions oracles/abi/ChatOracle.json

Large diffs are not rendered by default.

25 changes: 25 additions & 0 deletions oracles/src/domain/llm/basic_llm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
from typing import List
from typing import Optional

import backoff
import openai
from openai import AsyncOpenAI
from openai.types.chat import ChatCompletion

import settings
from src.domain.llm.utils import TIMEOUT


@backoff.on_exception(
backoff.expo, (openai.RateLimitError, openai.APITimeoutError), max_tries=3
)
async def execute(model: str, messages: List[dict]) -> Optional[str]:
client = AsyncOpenAI(
api_key=settings.OPEN_AI_API_KEY,
timeout=TIMEOUT,
)
chat_completion: ChatCompletion = await client.chat.completions.create(
messages=messages,
model=model,
)
return chat_completion.choices[0].message.content
14 changes: 4 additions & 10 deletions oracles/src/domain/llm/generate_response_use_case.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,28 +4,22 @@

from src.entities import Chat
from src.entities import PromptType
from src.entities import GroqModelType
from src.entities import OpenAiModelType
from src.entities import AnthropicModelType
from src.domain.llm import basic_llm
from src.domain.llm import groq_llm
from src.domain.llm import openai_llm
from src.domain.llm import anthropic_llm
from src.domain.llm.entities import LLMResult
from src.domain.llm.utils import TIMEOUT


async def execute(chat: Chat) -> LLMResult:
async def execute(model: str, chat: Chat) -> LLMResult:
try:
if not chat.config or chat.prompt_type == PromptType.DEFAULT:
chat.prompt_type = PromptType.DEFAULT
if chat.config.model in get_args(OpenAiModelType):
response = await openai_llm.execute(chat)
elif chat.config.model in get_args(GroqModelType):
response = await groq_llm.execute(chat)
elif chat.config.model in get_args(AnthropicModelType):
if chat.config and chat.config.model in get_args(AnthropicModelType):
response = await anthropic_llm.execute(chat)
else:
response = "Invalid model"
response = await basic_llm.execute(model, messages=chat.messages)
elif chat.prompt_type == PromptType.OPENAI:
response = await openai_llm.execute(chat)
elif chat.prompt_type == PromptType.GROQ:
Expand Down
1 change: 0 additions & 1 deletion oracles/src/domain/llm/openai_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
import openai
from openai import AsyncOpenAI
from openai.types.chat import ChatCompletion
from openai.types.chat.chat_completion import ChatCompletion

from src.entities import Chat
from src.domain.llm.utils import TIMEOUT
Expand Down
20 changes: 14 additions & 6 deletions oracles/src/repositories/web3/chat_repository.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,12 +213,20 @@ async def _build_response_tx(self, chat: Chat):
).build_transaction(tx_data)
# Eventually more options here
else:
tx = await self.oracle_contract.functions.addResponse(
chat.id,
chat.callback_id,
_format_llm_response(chat.response),
chat.error_message,
).build_transaction(tx_data)
if chat.config:
tx = await self.oracle_contract.functions.addResponse(
chat.id,
chat.callback_id,
_format_llm_response(chat.response),
chat.error_message,
).build_transaction(tx_data)
else:
tx = await self.oracle_contract.functions.addResponse(
chat.id,
chat.callback_id,
chat.response,
chat.error_message,
).build_transaction(tx_data)
return tx

async def _get_llm_config(self, i: int) -> Optional[LlmConfig]:
Expand Down
4 changes: 3 additions & 1 deletion oracles/src/service/chat_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,9 @@ async def _answer_chat(
print(f"Answering chat {chat.id}", flush=True)
await _cache_ipfs_urls(chat, ipfs_repository)
if chat.response is None:
response = await generate_response_use_case.execute(chat)
response = await generate_response_use_case.execute(
"gpt-4-turbo-preview", chat
)
chat.response = response.chat_completion
chat.error_message = response.error

Expand Down

0 comments on commit 017cc50

Please sign in to comment.