Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from langchain_core.pydantic_v1 import BaseModel,Field
from pydantic import BaseModel, Field
# Data models
class GradeDocuments(BaseModel):
"""Binary score for relevance check on retrieved documents."""
Expand Down
4 changes: 2 additions & 2 deletions community/log_analysis_multi_agent_rag/graphedges.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,8 @@ def grade_generation_vs_documents_and_question(state):

print("GRADE GENERATED vs QUESTION")
try:
score_text = automation.answer_grader.invoke({"question": question, "generation": generation})
if "yes" in score_text.lower():
score = automation.answer_grader.invoke({"question": question, "generation": generation})
if score and score.get("binary_score") == "yes":
print("DECISION: GENERATION ADDRESSES QUESTION")
return "useful"
else:
Expand Down
2 changes: 1 addition & 1 deletion community/log_analysis_multi_agent_rag/graphnodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def grade_documents(state):
score = automation.retrieval_grader.invoke(
{"question": question, "document": doc.page_content}
)
grade = score.binary_score
grade = score.get("binary_score") if score else "no"
if grade == "yes":
print("---GRADE: DOCUMENT RELEVANT---")
filtered_docs.append(doc)
Expand Down
16 changes: 12 additions & 4 deletions community/log_analysis_multi_agent_rag/utils.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,20 @@
from langchain_nvidia_ai_endpoints import ChatNVIDIA
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.output_parsers import StrOutputParser, JsonOutputParser
from binary_score_models import GradeAnswer,GradeDocuments,GradeHallucinations
import os
from dotenv import load_dotenv
load_dotenv()
import re
import json

def clean_text(text):
# Remove <think> blocks (including content)
text = re.sub(r'<think>.*?</think>', '', text, flags=re.DOTALL)
# Remove any standalone <think> tags just in case
text = text.replace('<think>', '').replace('</think>', '')
return text

class Nodeoutputs:
def __init__(self, api_key, model, prompts_file):
os.environ["NVIDIA_API_KEY"] = api_key
Expand Down Expand Up @@ -41,23 +49,23 @@ def setup_prompts(self):
("human", self.prompts["grade_human"]),
]
)
self.retrieval_grader = grade_prompt | self.llm.with_structured_output(GradeDocuments)
self.retrieval_grader = grade_prompt | self.llm | StrOutputParser() | clean_text | JsonOutputParser()

hallucination_prompt = ChatPromptTemplate.from_messages(
[
("system", self.prompts["hallucination_system"]),
("human", self.prompts["hallucination_human"]),
]
)
self.hallucination_grader = hallucination_prompt | self.llm.with_structured_output(GradeHallucinations)
self.hallucination_grader = hallucination_prompt | self.llm | StrOutputParser() | clean_text | JsonOutputParser()

answer_prompt = ChatPromptTemplate.from_messages(
[
("system", self.prompts["answer_system"]),
("human", self.prompts["answer_human"]),
]
)
self.answer_grader = answer_prompt | self.llm.with_structured_output(GradeAnswer)
self.answer_grader = answer_prompt | self.llm | StrOutputParser() | clean_text | JsonOutputParser()

def format_docs(self, docs):
return "\n\n".join(doc.page_content for doc in docs)
Expand Down