Spaces:
Sleeping
Sleeping
remove auto and use preferred instead
Browse files
src/lib/chat/triggerAiCall.ts
CHANGED
|
@@ -68,7 +68,7 @@ export async function triggerAiCall(ctx: TriggerAiCallContext): Promise<void> {
|
|
| 68 |
method: 'POST',
|
| 69 |
body: JSON.stringify({
|
| 70 |
model: model,
|
| 71 |
-
provider: modelSettings?.provider ?? '
|
| 72 |
messages: formattedMessages,
|
| 73 |
billingTo: billingOption,
|
| 74 |
...(modelSettings
|
|
|
|
| 68 |
method: 'POST',
|
| 69 |
body: JSON.stringify({
|
| 70 |
model: model,
|
| 71 |
+
provider: modelSettings?.provider ?? 'preferred',
|
| 72 |
messages: formattedMessages,
|
| 73 |
billingTo: billingOption,
|
| 74 |
...(modelSettings
|
src/lib/components/chat/Assistant.svelte
CHANGED
|
@@ -163,7 +163,7 @@
|
|
| 163 |
</div>
|
| 164 |
{#if usage && !loading && message}
|
| 165 |
{@const modelSettings = modelsState.models.find((m) => m.id === selectedModel)}
|
| 166 |
-
{@const provider = modelSettings?.provider ?? '
|
| 167 |
<div class="mt-3 flex items-center justify-between gap-2 border-t border-border pt-3">
|
| 168 |
<p class="text-xs text-muted-foreground">
|
| 169 |
{usage.total_tokens} tokens
|
|
|
|
| 163 |
</div>
|
| 164 |
{#if usage && !loading && message}
|
| 165 |
{@const modelSettings = modelsState.models.find((m) => m.id === selectedModel)}
|
| 166 |
+
{@const provider = modelSettings?.provider ?? 'preferred'}
|
| 167 |
<div class="mt-3 flex items-center justify-between gap-2 border-t border-border pt-3">
|
| 168 |
<p class="text-xs text-muted-foreground">
|
| 169 |
{usage.total_tokens} tokens
|
src/lib/components/model/SettingsModel.svelte
CHANGED
|
@@ -34,7 +34,7 @@
|
|
| 34 |
let temperature = $state<number | undefined>(undefined);
|
| 35 |
let max_tokens = $state<number | undefined>(undefined);
|
| 36 |
let top_p = $state<number | undefined>(undefined);
|
| 37 |
-
let provider = $state<string>('
|
| 38 |
let search = $state<string>('');
|
| 39 |
|
| 40 |
$effect(() => {
|
|
@@ -42,7 +42,7 @@
|
|
| 42 |
temperature = model.temperature ?? undefined;
|
| 43 |
max_tokens = model.max_tokens ?? undefined;
|
| 44 |
top_p = model.top_p ?? undefined;
|
| 45 |
-
provider = model.provider ?? '
|
| 46 |
}
|
| 47 |
});
|
| 48 |
|
|
@@ -56,7 +56,7 @@
|
|
| 56 |
);
|
| 57 |
|
| 58 |
let maxContentLength = $derived(
|
| 59 |
-
provider === '
|
| 60 |
? model?.providers[0]?.context_length
|
| 61 |
: model?.providers.find((p) => p.provider === provider)?.context_length
|
| 62 |
);
|
|
|
|
| 34 |
let temperature = $state<number | undefined>(undefined);
|
| 35 |
let max_tokens = $state<number | undefined>(undefined);
|
| 36 |
let top_p = $state<number | undefined>(undefined);
|
| 37 |
+
let provider = $state<string>('preferred');
|
| 38 |
let search = $state<string>('');
|
| 39 |
|
| 40 |
$effect(() => {
|
|
|
|
| 42 |
temperature = model.temperature ?? undefined;
|
| 43 |
max_tokens = model.max_tokens ?? undefined;
|
| 44 |
top_p = model.top_p ?? undefined;
|
| 45 |
+
provider = model.provider ?? 'preferred';
|
| 46 |
}
|
| 47 |
});
|
| 48 |
|
|
|
|
| 56 |
);
|
| 57 |
|
| 58 |
let maxContentLength = $derived(
|
| 59 |
+
provider === 'preferred'
|
| 60 |
? model?.providers[0]?.context_length
|
| 61 |
: model?.providers.find((p) => p.provider === provider)?.context_length
|
| 62 |
);
|
src/lib/consts.ts
CHANGED
|
@@ -35,8 +35,8 @@ export const SUGGESTIONS_PROMPT = [
|
|
| 35 |
|
| 36 |
export const PROVIDER_SELECTION_MODES = [
|
| 37 |
{
|
| 38 |
-
value: '
|
| 39 |
-
label: '
|
| 40 |
description: 'your HF preference order',
|
| 41 |
class: 'bg-yellow-500 dark:bg-yellow-500/10',
|
| 42 |
iconClass: 'text-white dark:text-yellow-500',
|
|
|
|
| 35 |
|
| 36 |
export const PROVIDER_SELECTION_MODES = [
|
| 37 |
{
|
| 38 |
+
value: 'preferred',
|
| 39 |
+
label: 'Preferred',
|
| 40 |
description: 'your HF preference order',
|
| 41 |
class: 'bg-yellow-500 dark:bg-yellow-500/10',
|
| 42 |
iconClass: 'text-white dark:text-yellow-500',
|
src/lib/state/models.svelte.ts
CHANGED
|
@@ -42,7 +42,7 @@ function mergeModelsWithStoredSettings(models: ChatModel[]): ChatModel[] {
|
|
| 42 |
temperature: custom.temperature ?? m.temperature,
|
| 43 |
top_p: custom.top_p ?? m.top_p,
|
| 44 |
max_tokens: custom.max_tokens ?? m.max_tokens,
|
| 45 |
-
provider: custom.provider ?? m.provider ?? '
|
| 46 |
};
|
| 47 |
});
|
| 48 |
}
|
|
|
|
| 42 |
temperature: custom.temperature ?? m.temperature,
|
| 43 |
top_p: custom.top_p ?? m.top_p,
|
| 44 |
max_tokens: custom.max_tokens ?? m.max_tokens,
|
| 45 |
+
provider: custom.provider ?? m.provider ?? 'preferred'
|
| 46 |
};
|
| 47 |
});
|
| 48 |
}
|
src/routes/api/+server.ts
CHANGED
|
@@ -6,7 +6,7 @@ export async function POST({ request }: RequestEvent) {
|
|
| 6 |
model,
|
| 7 |
messages,
|
| 8 |
options,
|
| 9 |
-
provider = '
|
| 10 |
billingTo = 'personal'
|
| 11 |
} = await request.json();
|
| 12 |
const token = request.headers.get('Authorization')?.split(' ')[1];
|
|
@@ -31,7 +31,7 @@ export async function POST({ request }: RequestEvent) {
|
|
| 31 |
try {
|
| 32 |
const stream = client.chatCompletionStream(
|
| 33 |
{
|
| 34 |
-
model:
|
| 35 |
...(options ?? {}),
|
| 36 |
messages: [
|
| 37 |
{
|
|
|
|
| 6 |
model,
|
| 7 |
messages,
|
| 8 |
options,
|
| 9 |
+
provider = 'preferred',
|
| 10 |
billingTo = 'personal'
|
| 11 |
} = await request.json();
|
| 12 |
const token = request.headers.get('Authorization')?.split(' ')[1];
|
|
|
|
| 31 |
try {
|
| 32 |
const stream = client.chatCompletionStream(
|
| 33 |
{
|
| 34 |
+
model: `${model}:${provider}`,
|
| 35 |
...(options ?? {}),
|
| 36 |
messages: [
|
| 37 |
{
|