Skip to content

Commit 0053660

Browse files
fix: handle None response_text in Ollama sequential tool calls
- Add proper None checks before calling .strip() on response_text - Fixes 'NoneType' object has no attribute 'strip' error with Ollama models - Ensures backward compatibility by returning empty string instead of None - Applied fixes in both sync and async versions of get_response methods Fixes issue #846 Co-authored-by: Mervin Praison <MervinPraison@users.noreply.github.com>
1 parent 181d7e1 commit 0053660

File tree

1 file changed

+15
-11
lines changed
  • src/praisonai-agents/praisonaiagents/llm

1 file changed

+15
-11
lines changed

src/praisonai-agents/praisonaiagents/llm/llm.py

Lines changed: 15 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -772,7 +772,7 @@ def get_response(
772772
if formatted_tools and self._supports_streaming_tools():
773773
tool_calls = self._process_tool_calls_from_stream(delta, tool_calls)
774774

775-
response_text = response_text.strip()
775+
response_text = response_text.strip() if response_text else ""
776776

777777
# Create a mock final_response with the captured data
778778
final_response = {
@@ -904,7 +904,7 @@ def get_response(
904904

905905
# Set flag to indicate Ollama was handled
906906
ollama_handled = True
907-
final_response_text = response_text.strip()
907+
final_response_text = response_text.strip() if response_text else ""
908908
logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
909909

910910
# Display the response if we got one
@@ -962,7 +962,7 @@ def get_response(
962962
# No tool calls, we're done with this iteration
963963
# If we've executed tools in previous iterations, this response contains the final answer
964964
if iteration_count > 0:
965-
final_response_text = response_text.strip()
965+
final_response_text = response_text.strip() if response_text else ""
966966
break
967967

968968
except Exception as e:
@@ -993,7 +993,7 @@ def get_response(
993993
console=console
994994
)
995995

996-
response_text = response_text.strip()
996+
response_text = response_text.strip() if response_text else ""
997997

998998
# Return reasoning content if reasoning_steps is True and we have it
999999
if reasoning_steps and stored_reasoning_content:
@@ -1155,7 +1155,7 @@ def get_response(
11551155
if chunk and chunk.choices and chunk.choices[0].delta.content:
11561156
response_text += chunk.choices[0].delta.content
11571157

1158-
response_text = response_text.strip()
1158+
response_text = response_text.strip() if response_text else ""
11591159
continue
11601160

11611161
except json.JSONDecodeError:
@@ -1464,7 +1464,7 @@ async def get_response_async(
14641464

14651465
# Set flag to indicate Ollama was handled
14661466
ollama_handled = True
1467-
final_response_text = response_text.strip()
1467+
final_response_text = response_text.strip() if response_text else ""
14681468
logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
14691469

14701470
# Display the response if we got one
@@ -1571,7 +1571,7 @@ async def get_response_async(
15711571
# No tool calls, we're done with this iteration
15721572
# If we've executed tools in previous iterations, this response contains the final answer
15731573
if iteration_count > 0:
1574-
final_response_text = response_text.strip()
1574+
final_response_text = response_text.strip() if response_text else ""
15751575
break
15761576

15771577
# Handle output formatting
@@ -1808,6 +1808,8 @@ def _handle_ollama_model(self, response_text: str, tool_results: List[Any], mess
18081808

18091809
# Check if the response is just a JSON tool call
18101810
try:
1811+
if not response_text:
1812+
return None
18111813
json_response = json.loads(response_text.strip())
18121814
if not (('name' in json_response or 'function' in json_response) and
18131815
not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found'])):
@@ -2066,7 +2068,8 @@ def response(
20662068
response_text += content
20672069
else:
20682070
response = litellm.completion(**completion_params)
2069-
response_text = response.choices[0].message.content.strip()
2071+
response_text = response.choices[0].message.content
2072+
response_text = response_text.strip() if response_text else ""
20702073

20712074
if verbose:
20722075
display_interaction(
@@ -2077,7 +2080,7 @@ def response(
20772080
console=console or self.console
20782081
)
20792082

2080-
return response_text.strip()
2083+
return response_text.strip() if response_text else ""
20812084

20822085
except Exception as error:
20832086
display_error(f"Error in response: {str(error)}")
@@ -2154,7 +2157,8 @@ async def aresponse(
21542157
response_text += content
21552158
else:
21562159
response = await litellm.acompletion(**completion_params)
2157-
response_text = response.choices[0].message.content.strip()
2160+
response_text = response.choices[0].message.content
2161+
response_text = response_text.strip() if response_text else ""
21582162

21592163
if verbose:
21602164
display_interaction(
@@ -2165,7 +2169,7 @@ async def aresponse(
21652169
console=console or self.console
21662170
)
21672171

2168-
return response_text.strip()
2172+
return response_text.strip() if response_text else ""
21692173

21702174
except Exception as error:
21712175
display_error(f"Error in response_async: {str(error)}")

0 commit comments

Comments
 (0)