File size: 1,236 Bytes
aad097e
1
{"model": "", "model_param": "/Volumes/ai/llm/lmstudio-community/llm/Gemmasutra-Mini-2B-v1_Q6k.gguf", "port": 5001, "port_param": 5001, "host": "", "launch": true, "config": null, "threads": 4, "usecublas": null, "usevulkan": null, "useclblast": null, "noblas": true, "contextsize": 4096, "gpulayers": -1, "tensor_split": null, "ropeconfig": [0.0, 10000.0], "blasbatchsize": 512, "blasthreads": null, "lora": null, "noshift": false, "nommap": false, "usemlock": false, "noavx2": false, "debugmode": 0, "skiplauncher": false, "onready": "", "benchmark": null, "prompt": "", "promptlimit": 100, "multiuser": 1, "remotetunnel": false, "highpriority": false, "foreground": false, "preloadstory": null, "quiet": false, "ssl": null, "nocertify": false, "mmproj": null, "password": null, "ignoremissing": false, "chatcompletionsadapter": null, "flashattention": false, "quantkv": 0, "forceversion": 0, "smartcontext": false, "unpack": "", "nomodel": false, "hordemodelname": "", "hordeworkername": "", "hordekey": "", "hordemaxctx": 0, "hordegenlen": 0, "sdmodel": "", "sdthreads": 4, "sdclamped": 0, "sdvae": "", "sdvaeauto": false, "sdquant": false, "sdlora": "", "sdloramult": 1.0, "whispermodel": "", "hordeconfig": null, "sdconfig": null}