Skip to content

Commit 5c9843f

Browse files
committed
fix(schema): fixed json output
1 parent 4f53b09 commit 5c9843f

File tree

6 files changed

+14
-35
lines changed

6 files changed

+14
-35
lines changed

requirements-dev.lock

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -185,10 +185,6 @@ idna==3.7
185185
# via yarl
186186
imagesize==1.4.1
187187
# via sphinx
188-
importlib-metadata==7.1.0
189-
# via sphinx
190-
importlib-resources==6.4.0
191-
# via matplotlib
192188
iniconfig==2.0.0
193189
# via pytest
194190
jinja2==3.1.4
@@ -475,7 +471,6 @@ typing-extensions==4.12.0
475471
# via pyee
476472
# via sf-hamilton
477473
# via sqlalchemy
478-
# via starlette
479474
# via streamlit
480475
# via typer
481476
# via typing-inspect
@@ -507,6 +502,3 @@ win32-setctime==1.1.0
507502
# via loguru
508503
yarl==1.9.4
509504
# via aiohttp
510-
zipp==3.19.1
511-
# via importlib-metadata
512-
# via importlib-resources

scrapegraphai/nodes/generate_answer_csv_node.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
# Imports from Langchain
1010
from langchain.prompts import PromptTemplate
11-
from langchain_core.output_parsers import JsonOutputParser, PydanticOutputParser
11+
from langchain_core.output_parsers import JsonOutputParser
1212
from langchain_core.runnables import RunnableParallel
1313
from tqdm import tqdm
1414

@@ -96,7 +96,7 @@ def execute(self, state):
9696

9797
# Initialize the output parser
9898
if self.node_config.get("schema", None) is not None:
99-
output_parser = PydanticOutputParser(pydantic_object=self.node_config.get("schema", None))
99+
output_parser = JsonOutputParser(pydantic_object=self.node_config["schema"])
100100
else:
101101
output_parser = JsonOutputParser()
102102

@@ -150,9 +150,6 @@ def execute(self, state):
150150
single_chain = list(chains_dict.values())[0]
151151
answer = single_chain.invoke({"question": user_prompt})
152152

153-
if type(answer) == PydanticOutputParser:
154-
answer = answer.model_dump()
155-
156153
# Update the state with the generated answer
157154
state.update({self.output[0]: answer})
158155
return state

scrapegraphai/nodes/generate_answer_node.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,11 @@
77

88
# Imports from Langchain
99
from langchain.prompts import PromptTemplate
10-
from langchain_core.output_parsers import JsonOutputParser, PydanticOutputParser
10+
from langchain_core.output_parsers import JsonOutputParser
1111
from langchain_core.runnables import RunnableParallel
1212
from tqdm import tqdm
1313

14+
1415
from ..utils.logging import get_logger
1516
from ..models import Ollama
1617
# Imports from the library
@@ -81,8 +82,8 @@ def execute(self, state: dict) -> dict:
8182
doc = input_data[1]
8283

8384
# Initialize the output parser
84-
if self.node_config.get("schema",None) is not None:
85-
output_parser = PydanticOutputParser(pydantic_object=self.node_config.get("schema", None))
85+
if self.node_config.get("schema", None) is not None:
86+
output_parser = JsonOutputParser(pydantic_object=self.node_config["schema"])
8687
else:
8788
output_parser = JsonOutputParser()
8889

@@ -129,9 +130,6 @@ def execute(self, state: dict) -> dict:
129130
single_chain = list(chains_dict.values())[0]
130131
answer = single_chain.invoke({"question": user_prompt})
131132

132-
if type(answer) == PydanticOutputParser:
133-
answer = answer.model_dump()
134-
135133
# Update the state with the generated answer
136134
state.update({self.output[0]: answer})
137135
return state

scrapegraphai/nodes/generate_answer_omni_node.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
# Imports from Langchain
99
from langchain.prompts import PromptTemplate
10-
from langchain_core.output_parsers import JsonOutputParser, PydanticOutputParser
10+
from langchain_core.output_parsers import JsonOutputParser
1111
from langchain_core.runnables import RunnableParallel
1212
from tqdm import tqdm
1313
from ..models import Ollama
@@ -82,7 +82,7 @@ def execute(self, state: dict) -> dict:
8282

8383
# Initialize the output parser
8484
if self.node_config.get("schema", None) is not None:
85-
output_parser = PydanticOutputParser(pydantic_object=self.node_config.get("schema", None))
85+
output_parser = JsonOutputParser(pydantic_object=self.node_config["schema"])
8686
else:
8787
output_parser = JsonOutputParser()
8888

@@ -141,9 +141,6 @@ def execute(self, state: dict) -> dict:
141141
single_chain = list(chains_dict.values())[0]
142142
answer = single_chain.invoke({"question": user_prompt})
143143

144-
if type(answer) == PydanticOutputParser:
145-
answer = answer.model_dump()
146-
147144
# Update the state with the generated answer
148145
state.update({self.output[0]: answer})
149146
return state

scrapegraphai/nodes/generate_answer_pdf_node.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
# Imports from Langchain
99
from langchain.prompts import PromptTemplate
10-
from langchain_core.output_parsers import JsonOutputParser, PydanticOutputParser
10+
from langchain_core.output_parsers import JsonOutputParser
1111
from langchain_core.runnables import RunnableParallel
1212
from tqdm import tqdm
1313
from ..models import Ollama
@@ -96,8 +96,8 @@ def execute(self, state):
9696
doc = input_data[1]
9797

9898
# Initialize the output parser
99-
if self.node_config.get("schema",None) is not None:
100-
output_parser = PydanticOutputParser(pydantic_object=self.node_config.get("schema", None))
99+
if self.node_config.get("schema", None) is not None:
100+
output_parser = JsonOutputParser(pydantic_object=self.node_config["schema"])
101101
else:
102102
output_parser = JsonOutputParser()
103103

scrapegraphai/nodes/merge_answers_node.py

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
# Imports from Langchain
1010
from langchain.prompts import PromptTemplate
11-
from langchain_core.output_parsers import JsonOutputParser, PydanticOutputParser
11+
from langchain_core.output_parsers import JsonOutputParser
1212
from tqdm import tqdm
1313

1414
from ..utils.logging import get_logger
@@ -80,10 +80,8 @@ def execute(self, state: dict) -> dict:
8080
answers_str += f"CONTENT WEBSITE {i+1}: {answer}\n"
8181

8282
# Initialize the output parser
83-
if self.node_config["schema"] is not None:
84-
output_parser = PydanticOutputParser(
85-
pydantic_object=self.node_config["schema"]
86-
)
83+
if self.node_config.get("schema", None) is not None:
84+
output_parser = JsonOutputParser(pydantic_object=self.node_config["schema"])
8785
else:
8886
output_parser = JsonOutputParser()
8987

@@ -111,9 +109,6 @@ def execute(self, state: dict) -> dict:
111109
merge_chain = prompt_template | self.llm_model | output_parser
112110
answer = merge_chain.invoke({"user_prompt": user_prompt})
113111

114-
if type(answer) == PydanticOutputParser:
115-
answer = answer.model_dump()
116-
117112
# Update the state with the generated answer
118113
state.update({self.output[0]: answer})
119114
return state

0 commit comments

Comments
 (0)