diff --git a/docs/extra/components/choose_evaluvator_llm.md b/docs/extra/components/choose_evaluator_llm.md similarity index 77% rename from docs/extra/components/choose_evaluvator_llm.md rename to docs/extra/components/choose_evaluator_llm.md index ed672a2c2..bfb7f9555 100644 --- a/docs/extra/components/choose_evaluvator_llm.md +++ b/docs/extra/components/choose_evaluator_llm.md @@ -16,8 +16,11 @@ ```python from ragas.llms import LangchainLLMWrapper + from ragas.embeddings import LangchainEmbeddingsWrapper from langchain_openai import ChatOpenAI + from langchain_openai import OpenAIEmbeddings evaluator_llm = LangchainLLMWrapper(ChatOpenAI(model="gpt-4o")) + evaluator_embeddings = LangchainEmbeddingsWrapper(OpenAIEmbeddings()) ``` @@ -44,7 +47,10 @@ ```python from langchain_aws import ChatBedrockConverse + from langchain_aws import BedrockEmbeddings from ragas.llms import LangchainLLMWrapper + from ragas.embeddings import LangchainEmbeddingsWrapper + evaluator_llm = LangchainLLMWrapper(ChatBedrockConverse( credentials_profile_name=config["credentials_profile_name"], region_name=config["region_name"], @@ -52,6 +58,11 @@ model=config["llm"], temperature=config["temperature"], )) + evaluator_embeddings = LangchainEmbeddingsWrapper(BedrockEmbeddings( + credentials_profile_name=config["credentials_profile_name"], + region_name=config["region_name"], + model_id=config["embeddings"], + )) ``` If you want more information on how to use other AWS services, please refer to the [langchain-aws](https://python.langchain.com/docs/integrations/providers/aws/) documentation. \ No newline at end of file diff --git a/docs/extra/components/choose_generator_llm.md b/docs/extra/components/choose_generator_llm.md index 765171bb9..fc2358eb9 100644 --- a/docs/extra/components/choose_generator_llm.md +++ b/docs/extra/components/choose_generator_llm.md @@ -17,7 +17,9 @@ ```python from ragas.llms import LangchainLLMWrapper from langchain_openai import ChatOpenAI + from langchain_openai import OpenAIEmbeddings generator_llm = LangchainLLMWrapper(ChatOpenAI(model="gpt-4o")) + generator_embeddings = LangchainEmbeddingsWrapper(OpenAIEmbeddings()) ``` @@ -44,7 +46,10 @@ ```python from langchain_aws import ChatBedrockConverse + from langchain_aws import BedrockEmbeddings from ragas.llms import LangchainLLMWrapper + from ragas.embeddings import LangchainEmbeddingsWrapper + generator_llm = LangchainLLMWrapper(ChatBedrockConverse( credentials_profile_name=config["credentials_profile_name"], region_name=config["region_name"], @@ -52,6 +57,11 @@ model=config["llm"], temperature=config["temperature"], )) + generator_embeddings = LangchainEmbeddingsWrapper(BedrockEmbeddings( + credentials_profile_name=config["credentials_profile_name"], + region_name=config["region_name"], + model_id=config["embeddings"], + )) ``` If you want more information on how to use other AWS services, please refer to the [langchain-aws](https://python.langchain.com/docs/integrations/providers/aws/) documentation. \ No newline at end of file diff --git a/docs/getstarted/rag_evaluation.md b/docs/getstarted/rag_evaluation.md index cebac70db..f1fc512dd 100644 --- a/docs/getstarted/rag_evaluation.md +++ b/docs/getstarted/rag_evaluation.md @@ -33,15 +33,20 @@ Since all of the metrics we have chosen are LLM-based metrics, we need to choose ### Choosing evaluator LLM --8<-- -choose_evaluvator_llm.md +choose_evaluator_llm.md --8<-- ### Running Evaluation ```python -metrics = [LLMContextRecall(), FactualCorrectness(), Faithfulness()] -results = evaluate(dataset=eval_dataset, metrics=metrics, llm=evaluator_llm,) +metrics = [ + LLMContextRecall(llm=evaluator_llm), + FactualCorrectness(llm=evaluator_llm), + Faithfulness(llm=evaluator_llm), + SemanticSimilarity(embeddings=evaluator_embeddings) +] +results = evaluate(dataset=eval_dataset, metrics=metrics) ``` ### Exporting and analyzing results diff --git a/docs/getstarted/rag_testset_generation.md b/docs/getstarted/rag_testset_generation.md index b0ecfe5d5..96acb6c79 100644 --- a/docs/getstarted/rag_testset_generation.md +++ b/docs/getstarted/rag_testset_generation.md @@ -101,12 +101,11 @@ But you can mix and match transforms or build your own as needed. ```python from ragas.testset.transforms import default_transforms -# choose your LLM and Embedding Model -from ragas.llms import llm_factory -from ragas.embeddings import embedding_factory -transformer_llm = llm_factory("gpt-4o") -embedding_model = embedding_factory("text-embedding-3-large") +# define your LLM and Embedding Model +# here we are using the same LLM and Embedding Model that we used to generate the testset +transformer_llm = generator_llm +embedding_model = generator_embeddings trans = default_transforms(llm=transformer_llm, embedding_model=embedding_model) apply_transforms(kg, trans)