Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

adds a langserve/langgraph example with code execution #658

Merged
merged 5 commits into from
Mar 20, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Empty file.
63 changes: 63 additions & 0 deletions 06_gpu_and_ml/langchains/codelangchain/agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
"""This module defines our agent and attaches it to the Modal Stub.

Our agent is defined as a graph: a collection of nodes and edges,
where nodes represent actions and edges represent transitions between actions.

The meat of the logic is therefore in the edges and nodes modules.

We have a very simple "context-stuffing" retrieval approach in the retrieval module.
Replace this with something that retrieves your documentation and adjust the prompts accordingly.

You can test the agent from the command line with `modal run agent.py --question` followed by your query"""

import edges
import nodes
import retrieval
from common import stub


@stub.local_entrypoint()
def main(question: str = "How do I build a RAG pipeline?", debug: bool = False):
"""Sends a question to the LCEL code generation agent.

Switch to debug mode for shorter context and smaller model."""
if debug:
if question == "How do I build a RAG pipeline?":
question = "gm king, how are you?"
print(go.remote(question, debug=debug)["keys"]["response"])


@stub.function()
def go(question: str = "How do I build a RAG pipeline?", debug: bool = False):
"""Compiles the LCEL code generation agent graph and runs it, returning the result."""
graph = construct_graph(debug=debug)
runnable = graph.compile()
result = runnable.invoke(
{"keys": {"question": question, "iterations": 0}},
config={"recursion_limit": 50},
)

return result


def construct_graph(debug=False):
from common import GraphState
from langgraph.graph import StateGraph

context = retrieval.retrieve_docs(debug=debug)

graph = StateGraph(GraphState)

# attach our nodes to the graph
graph_nodes = nodes.Nodes(context, debug=debug)
for key, value in graph_nodes.node_map.items():
graph.add_node(key, value)

# construct the graph by adding edges
graph = edges.enrich(graph)

# set the starting and ending nodes of the graph
graph.set_entry_point(key="generate")
graph.set_finish_point(key="finish")

return graph
52 changes: 52 additions & 0 deletions 06_gpu_and_ml/langchains/codelangchain/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import agent
import modal
from agent import nodes, stub
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware

web_app = FastAPI(
title="CodeLangChain Server",
version="1.0",
description="Answers questions about LangChain Expression Language (LCEL).",
)


# Set all CORS enabled origins
web_app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
expose_headers=["*"],
)


@stub.function(keep_warm=1)
@modal.asgi_app()
def serve():
from langchain_core.runnables import RunnableLambda
from langserve import add_routes

def inp(question: str) -> dict:
return {"keys": {"question": question, "iterations": 0}}

def out(state: dict) -> str:
if "keys" in state:
return state["keys"]["response"]
elif "generate" in state:
return nodes.extract_response(state["generate"])
else:
return str(state)

graph = agent.construct_graph(debug=False).compile()

chain = RunnableLambda(inp) | graph | RunnableLambda(out)

add_routes(
web_app,
chain,
path="/codelangchain",
)

return web_app
51 changes: 51 additions & 0 deletions 06_gpu_and_ml/langchains/codelangchain/common.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import os
from typing import Dict, TypedDict

import modal

image = modal.Image.debian_slim(python_version="3.11").pip_install(
"beautifulsoup4~=4.12.3",
"langchain==0.1.11",
"langgraph==0.0.26",
"langchain_community==0.0.27",
"langchain-openai==0.0.8",
"langserve[all]==0.0.46",
)

agent_image = image.pip_install(
"chromadb==0.4.24",
"langchainhub==0.1.15",
"faiss-cpu~=1.8.0",
"tiktoken==0.6.0",
)

stub = modal.Stub(
"code-langchain",
image=image,
secrets=[
modal.Secret.from_name("my-openai-secret"),
modal.Secret.from_name("my-langsmith-secret"),
],
)


class GraphState(TypedDict):
"""
Represents the state of our graph.

Attributes:
keys: A dictionary where each key is a string.
"""

keys: Dict[str, any]


os.environ["LANGCHAIN_PROJECT"] = "codelangchain"

COLOR = {
"HEADER": "\033[95m",
"BLUE": "\033[94m",
"GREEN": "\033[92m",
"RED": "\033[91m",
"ENDC": "\033[0m",
}
94 changes: 94 additions & 0 deletions 06_gpu_and_ml/langchains/codelangchain/edges.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
"""Defines functions that transition our agent from one state to another."""

from typing import Callable

from common import GraphState

EXPECTED_NODES = [
"generate",
"check_code_imports",
"check_code_execution",
"finish",
]


def enrich(graph):
"""Adds transition edges to the graph."""

for node_name in set(EXPECTED_NODES):
assert node_name in graph.nodes, f"Node {node_name} not found in graph"

graph.add_edge("generate", "check_code_imports")
graph.add_conditional_edges(
"check_code_imports",
EDGE_MAP["decide_to_check_code_exec"],
{
"check_code_execution": "check_code_execution",
"generate": "generate",
},
)
graph.add_conditional_edges(
"check_code_execution",
EDGE_MAP["decide_to_finish"],
{
"finish": "finish",
"generate": "generate",
},
)
return graph


def decide_to_check_code_exec(state: GraphState) -> str:
"""
Determines whether to test code execution, or re-try answer generation.

Args:
state (dict): The current graph state

Returns:
str: Next node to call
"""

print("---DECIDE TO TEST CODE EXECUTION---")
state_dict = state["keys"]
error = state_dict["error"]

if error == "None":
# All documents have been filtered check_relevance
# We will re-generate a new query
print("---DECISION: TEST CODE EXECUTION---")
return "check_code_execution"
else:
# We have relevant documents, so generate answer
print("---DECISION: RE-TRY SOLUTION---")
return "generate"


def decide_to_finish(state: GraphState) -> str:
"""
Determines whether to finish (re-try code 3 times).

Args:
state (dict): The current graph state

Returns:
str: Next node to call
"""

print("---DECIDE TO TEST CODE EXECUTION---")
state_dict = state["keys"]
error = state_dict["error"]
iter = state_dict["iterations"]

if error == "None" or iter >= 3:
print("---DECISION: FINISH---")
return "finish"
else:
print("---DECISION: RE-TRY SOLUTION---")
return "generate"


EDGE_MAP: dict[str, Callable] = {
"decide_to_check_code_exec": decide_to_check_code_exec,
"decide_to_finish": decide_to_finish,
}
Loading
Loading