We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 31b5e48 commit fe812e2Copy full SHA for fe812e2
libs/core/langchain_core/language_models/chat_models.py
@@ -1128,7 +1128,12 @@ def _generate_with_cache(
1128
if check_cache:
1129
if llm_cache:
1130
llm_string = self._get_llm_string(stop=stop, **kwargs)
1131
- prompt = dumps(messages)
+ normalized_messages = [
1132
+ msg.model_copy(update={"id": None})
1133
+ if getattr(msg, "id", None) is not None else msg
1134
+ for msg in messages
1135
+ ]
1136
+ prompt = dumps(normalized_messages)
1137
cache_val = llm_cache.lookup(prompt, llm_string)
1138
if isinstance(cache_val, list):
1139
converted_generations = self._convert_cached_generations(cache_val)
0 commit comments