@@ -144,27 +144,41 @@ def _infer_model_name(llm: BaseLanguageModel):
144144def _filter_params_for_openai_reasoning_models (llm : BaseLanguageModel , llm_params : Optional [dict ]) -> Optional [dict ]:
145145 """Filter out unsupported parameters for OpenAI reasoning models.
146146
147- OpenAI reasoning models (o1, o3, gpt-5 excluding gpt-5-chat) only support
148- temperature=1. When using .bind() with other temperature values, the API
149- returns an error. This function removes the temperature parameter for these
150- models to allow the API default to apply.
147+ OpenAI reasoning models (o1, o3, gpt-5 excluding gpt-5-chat) do only allow
148+ specific parameters (e.g. temperature, which is always fixed at 1, or stop).
149+ When using .bind() with different values for these parameters, the API
150+ returns an error. This function removes the unsupported parameters for specific
151+ OpenAI reasoning models to ensure correct functionality for the API calls.
151152
152- See: https://github.com/langchain-ai/langchain/blob/master/libs/partners/openai/langchain_openai/chat_models/base.py
153+ See also: https://github.com/langchain-ai/langchain/blob/master/libs/partners/openai/langchain_openai/chat_models/base.py
154+
155+ Stop not supported as a parameter in the following models (as of Jan 26):
156+ gpt5+ (only gpt-5-chat-latest works), o3, o3-pro (but o3-mini works), o4-mini
153157 """
154- if not llm_params or "temperature" not in llm_params :
158+ if not llm_params or ( "temperature" not in llm_params and "stop" not in llm_params ) :
155159 return llm_params
156160
157161 model_name = _infer_model_name (llm ).lower ()
158162
159- is_openai_reasoning_model = (
163+ # Models that do not support temperature as a param, or changing its default value
164+ is_temperature_not_supported = (
160165 model_name .startswith ("o1" )
161166 or model_name .startswith ("o3" )
162167 or (model_name .startswith ("gpt-5" ) and "chat" not in model_name )
163168 )
169+ # Models that do not support stop as a param
170+ is_stop_not_supported = (
171+ (model_name .startswith ("o3" ) and "o3-mini" not in model_name )
172+ or model_name .startswith ("o4-mini" )
173+ or (model_name .startswith ("gpt-5" ) and "gpt-5-chat" not in model_name )
174+ )
164175
165- if is_openai_reasoning_model :
176+ if is_temperature_not_supported or is_stop_not_supported :
166177 filtered = llm_params .copy ()
167- filtered .pop ("temperature" , None )
178+ if is_temperature_not_supported :
179+ filtered .pop ("temperature" , None )
180+ if is_stop_not_supported :
181+ filtered .pop ("stop" , None )
168182 return filtered
169183
170184 return llm_params
@@ -202,18 +216,25 @@ async def llm_call(
202216 raise LLMCallException (ValueError ("No LLM provided to llm_call()" ))
203217 _setup_llm_call_info (llm , model_name , model_provider )
204218
205- filtered_params = _filter_params_for_openai_reasoning_models (llm , llm_params )
219+ llm_params_with_stop : Optional [dict ]
220+ if stop :
221+ llm_params_with_stop = llm_params .copy () if llm_params else {}
222+ llm_params_with_stop ["stop" ] = stop
223+ else :
224+ llm_params_with_stop = llm_params
225+
226+ filtered_params = _filter_params_for_openai_reasoning_models (llm , llm_params_with_stop )
206227 generation_llm : Union [BaseLanguageModel , Runnable ] = llm .bind (** filtered_params ) if filtered_params else llm
207228
208229 if streaming_handler :
209- return await _stream_llm_call (generation_llm , prompt , streaming_handler , stop )
230+ return await _stream_llm_call (generation_llm , prompt , streaming_handler )
210231 else :
211232 all_callbacks = _prepare_callbacks (custom_callback_handlers )
212233
213234 if isinstance (prompt , str ):
214- response = await _invoke_with_string_prompt (generation_llm , prompt , all_callbacks , stop )
235+ response = await _invoke_with_string_prompt (generation_llm , prompt , all_callbacks )
215236 else :
216- response = await _invoke_with_message_list (generation_llm , prompt , all_callbacks , stop )
237+ response = await _invoke_with_message_list (generation_llm , prompt , all_callbacks )
217238
218239 _store_reasoning_traces (response )
219240 _store_tool_calls (response )
@@ -225,7 +246,6 @@ async def _stream_llm_call(
225246 llm : Union [BaseLanguageModel , Runnable ],
226247 prompt : Union [str , List [dict ]],
227248 handler : "StreamingHandler" ,
228- stop : Optional [List [str ]],
229249) -> str :
230250 """Stream LLM response using astream().
231251
@@ -237,11 +257,17 @@ async def _stream_llm_call(
237257 else :
238258 messages = prompt
239259
240- handler .stop = stop or []
260+ stop = []
261+ if hasattr (llm , "kwargs" ):
262+ current_params = getattr (llm , "kwargs" , {})
263+ stop = current_params .get ("stop" , [])
264+ if not stop :
265+ stop = getattr (llm , "stop" , [])
266+ handler .stop = stop
241267 accumulated_metadata : Dict [str , Any ] = {}
242268
243269 try :
244- async for chunk in llm .astream (messages , stop = stop , config = RunnableConfig (callbacks = logging_callbacks )):
270+ async for chunk in llm .astream (messages , config = RunnableConfig (callbacks = logging_callbacks )):
245271 if hasattr (chunk , "content" ):
246272 content = chunk .content
247273 else :
@@ -351,11 +377,10 @@ async def _invoke_with_string_prompt(
351377 llm : Union [BaseLanguageModel , Runnable ],
352378 prompt : str ,
353379 callbacks : BaseCallbackManager ,
354- stop : Optional [List [str ]],
355380):
356381 """Invoke LLM with string prompt."""
357382 try :
358- return await llm .ainvoke (prompt , config = RunnableConfig (callbacks = callbacks ), stop = stop )
383+ return await llm .ainvoke (prompt , config = RunnableConfig (callbacks = callbacks ))
359384 except Exception as e :
360385 _raise_llm_call_exception (e , llm )
361386
@@ -364,13 +389,12 @@ async def _invoke_with_message_list(
364389 llm : Union [BaseLanguageModel , Runnable ],
365390 prompt : List [dict ],
366391 callbacks : BaseCallbackManager ,
367- stop : Optional [List [str ]],
368392):
369393 """Invoke LLM with message list after converting to LangChain format."""
370394 messages = _convert_messages_to_langchain_format (prompt )
371395
372396 try :
373- return await llm .ainvoke (messages , config = RunnableConfig (callbacks = callbacks ), stop = stop )
397+ return await llm .ainvoke (messages , config = RunnableConfig (callbacks = callbacks ))
374398 except Exception as e :
375399 _raise_llm_call_exception (e , llm )
376400
0 commit comments