runtime error
Exit code: 1. Reason: async for x in self.fn(*args): # noqa: B007 # type: ignore generated_values.append(x) File "/usr/local/lib/python3.13/site-packages/gradio/chat_interface.py", line 1057, in _examples_stream_fn async for response in generator: yield self._process_example(message, response) File "/usr/local/lib/python3.13/site-packages/gradio/utils.py", line 850, in __anext__ return await anyio.to_thread.run_sync( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ run_sync_iterator_async, self.iterator, limiter=self.limiter ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ) ^ File "/usr/local/lib/python3.13/site-packages/anyio/to_thread.py", line 63, in run_sync return await get_async_backend().run_sync_in_worker_thread( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ func, args, abandon_on_cancel=abandon_on_cancel, limiter=limiter ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ) ^ File "/usr/local/lib/python3.13/site-packages/anyio/_backends/_asyncio.py", line 2502, in run_sync_in_worker_thread return await future ^^^^^^^^^^^^ File "/usr/local/lib/python3.13/site-packages/anyio/_backends/_asyncio.py", line 986, in run result = context.run(func, *args) File "/usr/local/lib/python3.13/site-packages/gradio/utils.py", line 833, in run_sync_iterator_async return next(iterator) File "/usr/local/lib/python3.13/site-packages/gradio/external_utils.py", line 146, in chat_fn handle_hf_error(e) ~~~~~~~~~~~~~~~^^^ File "/usr/local/lib/python3.13/site-packages/gradio/external_utils.py", line 323, in handle_hf_error raise Error(str(e)) from e gradio.exceptions.Error: '(Request ID: Root=1-6966f05e-79c7813a0d21740078c2d380;5186c97f-9e54-4095-bd56-01fc5e0385bd)\n\nBad request:\n{\'message\': "The requested model \'DeepHat/DeepHat-V1-7B\' is not supported by any provider you have enabled.", \'type\': \'invalid_request_error\', \'param\': \'model\', \'code\': \'model_not_supported\'}'
Container logs:
Fetching error logs...