import { useEffect, useState } from "react"; import { Info, CaretDown, CaretUp } from "@phosphor-icons/react"; import paths from "@/utils/paths"; import System from "@/models/system"; import PreLoader from "@/components/Preloader"; import { LMSTUDIO_COMMON_URLS } from "@/utils/constants"; import useProviderEndpointAutoDiscovery from "@/hooks/useProviderEndpointAutoDiscovery"; export default function LMStudioOptions({ settings, showAlert = false }) { const { autoDetecting: loading, basePath, basePathValue, showAdvancedControls, setShowAdvancedControls, handleAutoDetectClick, } = useProviderEndpointAutoDiscovery({ provider: "lmstudio", initialBasePath: settings?.LMStudioBasePath, ENDPOINTS: LMSTUDIO_COMMON_URLS, }); const [maxTokens, setMaxTokens] = useState( settings?.LMStudioTokenLimit || 4096 ); const handleMaxTokensChange = (e) => { setMaxTokens(Number(e.target.value)); }; return (
LMStudio as your LLM requires you to set an embedding service to use.
Maximum number of tokens for context and response.
Enter the URL where LM Studio is running.
Select the LM Studio model you want to use. Models will load after entering a valid LM Studio URL.
Choose the LM Studio model you want to use for your conversations.