litellm.exceptions.MidStreamFallbackError: litellm.Servic...
litellm.exceptions.MidStreamFallbackError: litellm.ServiceUnavailableError: litellm.MidStreamFallbackError: litellm.APIConnectionError: APIConnectionError: OpenAIException - Context size has been exceeded. Original exception: APIConnectionError: litellm.APIConnectionError: APIConnectionError: OpenAIException - Context size has been exceeded.
Traceback (most recent call last):
Traceback (most recent call last):
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/litellm_core_utils/streaming_handler.py", line 1812, in __anext__
async for chunk in self.completion_stream:
File "/opt/venv-a0/lib/python3.12/site-packages/openai/_streaming.py", line 147, in __aiter__
async for item in self._iterator:
File "/opt/venv-a0/lib/python3.12/site-packages/openai/_streaming.py", line 193, in __stream__
raise APIError(
openai.APIError: Context size has been exceeded.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/litellm_core_utils/streaming_handler.py", line 1996, in __anext__
raise exception_type(
^^^^^^^^^^^^^^^
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py", line 2328, in exception_type
raise e # it's already mapped
^^^^^^^
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py", line 569, in exception_type
raise APIConnectionError(
litellm.exceptions.APIConnectionError: litellm.APIConnectionError: APIConnectionError: OpenAIException - Context size has been exceeded.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/a0/agent.py", line 454, in monologue
agent_response, _reasoning = await self.call_chat_model(
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/a0/agent.py", line 808, in call_chat_model
response, reasoning = await model.unified_call(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/a0/models.py", line 511, in unified_call
async for chunk in _completion: # type: ignore
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/litellm_core_utils/streaming_handler.py", line 2006, in __anext__
raise MidStreamFallbackError(
Show more
open_in_full
content_copy
litellm.exceptions.BadRequestError: litellm.BadRequestError: Lm_studioException - Error code: 400 - {'error': 'Context size has been exceeded.'}
Traceback (most recent call last):
Traceback (most recent call last):
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/llms/openai/openai.py", line 823, in acompletion
headers, response = await self.make_openai_chat_completion_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/litellm_core_utils/logging_utils.py", line 190, in async_wrapper
result = await func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/llms/openai/openai.py", line 454, in make_openai_chat_completion_request
raise e
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/llms/openai/openai.py", line 436, in make_openai_chat_completion_request
await openai_aclient.chat.completions.with_raw_response.create(
File "/opt/venv-a0/lib/python3.12/site-packages/openai/_legacy_response.py", line 381, in wrapped
return cast(LegacyAPIResponse[R], await func(*args, **kwargs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/venv-a0/lib/python3.12/site-packages/openai/resources/chat/completions/completions.py", line 2589, in create
return await self._post(
^^^^^^^^^^^^^^^^^
File "/opt/venv-a0/lib/python3.12/site-packages/openai/_base_client.py", line 1794, in post
return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/venv-a0/lib/python3.12/site-packages/openai/_base_client.py", line 1594, in request
raise self._make_status_error_from_response(err.response) from None
openai.BadRequestError: Error code: 400 - {'error': 'Context size has been exceeded.'}
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/main.py", line 598, in acompletion
response = await init_response
^^^^^^^^^^^^^^^^^^^
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/llms/openai/openai.py", line 870, in acompletion
raise OpenAIError(
litellm.llms.openai.common_utils.OpenAIError: Error code: 400 - {'error': 'Context size has been exceeded.'}
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/a0/python/extensions/monologue_end/_51_memorize_solutions.py", line 48, in memorize
solutions_json = await self.agent.call_utility_model(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/a0/agent.py", line 784, in call_utility_model
response, _reasoning = await call_data["model"].unified_call(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/a0/models.py", line 502, in unified_call
_completion = await acompletion(
^^^^^^^^^^^^^^^^^^
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/utils.py", line 1638, in wrapper_async
raise e
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/utils.py", line 1484, in wrapper_async
result = await original_function(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/main.py", line 617, in acompletion
raise exception_type(
^^^^^^^^^^^^^^^
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py", line 2323, in exception_type
raise e
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py", line 468, in exception_type
raise BadRequestError(
volume_up
content_copy