From 5148c8066160c3a031eda64b016eb68a42a26f0e Mon Sep 17 00:00:00 2001 From: Yue Fei <59813791+moria97@users.noreply.github.com> Date: Wed, 10 Apr 2024 01:56:10 +0800 Subject: [PATCH] Fix incomplete json output in guideline evaluator (#12646) --- llama-index-core/llama_index/core/evaluation/guideline.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/llama-index-core/llama_index/core/evaluation/guideline.py b/llama-index-core/llama_index/core/evaluation/guideline.py index e8d44acc83..ac134322ff 100644 --- a/llama-index-core/llama_index/core/evaluation/guideline.py +++ b/llama-index-core/llama_index/core/evaluation/guideline.py @@ -59,6 +59,7 @@ class GuidelineEvaluator(BaseEvaluator): llm: Optional[LLM] = None, guidelines: Optional[str] = None, eval_template: Optional[Union[str, BasePromptTemplate]] = None, + output_parser: Optional[PydanticOutputParser] = None, # deprecated service_context: Optional[ServiceContext] = None, ) -> None: @@ -71,7 +72,9 @@ class GuidelineEvaluator(BaseEvaluator): else: self._eval_template = eval_template or DEFAULT_EVAL_TEMPLATE - self._output_parser = PydanticOutputParser(output_cls=EvaluationData) + self._output_parser = output_parser or PydanticOutputParser( + output_cls=EvaluationData + ) self._eval_template.output_parser = self._output_parser def _get_prompts(self) -> PromptDictType: -- GitLab