litellm.exceptions.BadRequestError: litellm.BadRequestError: Lm_studioException - Error code: 400 - {'error': 'Context size has been exceeded.'}
Traceback (most recent call last):
Traceback (most recent call last):
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/llms/openai/openai.py", line 823, in acompletion headers, response = await self.make_openai_chat_completion_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/litellm_core_utils/logging_utils.py", line 190, in async_wrapper result = await func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/llms/openai/openai.py", line 454, in make_openai_chat_completion_request raise e
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/llms/openai/openai.py", line 436, in make_openai_chat_completion_request await openai_aclient.chat.completions.with_raw_response.create( File "/opt/venv-a0/lib/python3.12/site-packages/openai/_legacy_response.py", line 381, in wrapped return cast(LegacyAPIResponse[R], await func(*args, **kwargs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/venv-a0/lib/python3.12/site-packages/openai/resources/chat/completions/completions.py", line 2589, in create return await self._post(
^^^^^^^^^^^^^^^^^
File "/opt/venv-a0/lib/python3.12/site-packages/openai/_base_client.py", line 1794, in post return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/venv-a0/lib/python3.12/site-packages/openai/_base_client.py", line 1594, in request raise self._make_status_error_from_response(err.response) from None
openai.BadRequestError: Error code: 400 - {'error': 'Context size has been exceeded.'}
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/main.py", line 598, in acompletion response = await init_response
^^^^^^^^^^^^^^^^^^^
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/llms/openai/openai.py", line 870, in acompletion raise OpenAIError(
litellm.llms.openai.common_utils.OpenAIError: Error code: 400 - {'error': 'Context size has been exceeded.'}
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/a0/python/extensions/monologue_end/_50_memorize_fragments.py", line 50, in memorize ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/a0/agent.py", line 784, in call_utility_model response, _reasoning = await call_data["model"].unified_call(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/a0/models.py", line 502, in unified_call _completion = await acompletion(
^^^^^^^^^^^^^^^^^^
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/utils.py", line 1638, in wrapper_async raise e
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/utils.py", line 1484, in wrapper_async result = await original_function(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/main.py", line 617, in acompletion raise exception_type(
^^^^^^^^^^^^^^^
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py", line 2323, in exception_type raise e
File "/opt/venv-a0/lib/python3.12/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py", line 468, in exception_type raise BadRequestError(