File size: 5,183 Bytes
490b704
5e0949f
 
 
 
490b704
5e0949f
 
 
 
490b704
5e0949f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
490b704
5e0949f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
490b704
 
5e0949f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
import gradio as gr
import edge_tts
import asyncio
import tempfile
import os
from huggingface_hub import InferenceClient
import re
from streaming_stt_nemo import Model
import torch
import random

default_lang = "en"

engines = { default_lang: Model(default_lang) }

def transcribe(audio):
    lang = "en"
    model = engines[lang]
    text = model.stt_file(audio)[0]
    return text

HF_TOKEN = os.environ.get("HF_TOKEN", None)

def client_fn(model):
    if "Mixtral" in model:
        return InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
    elif "Mr-Bhaskar/FusionBot" in model:
        return InferenceClient("Mr-Bhaskar/FusionBot")
    elif "fbt-llama2-7b" in model:
        return InferenceClient("Mr-Bhaskar/fbt-llama2-7b")
    elif "Mr-Bhaskar/FBt" in model:
        return InferenceClient("Mr-Bhaskar/FBt")
    elif "fbt-mistral7b-instruct" in model:
        return InferenceClient("Mr-Bhaskar/fbt-mistral7b-instruct")
    elif "fbt-mistral-7b" in model:
        return InferenceClient("Mr-Bhaskar/fbt-mistral-7b")
    elif "fbt-llama3-8b" in model:
        return InferenceClient("Mr-Bhaskar/fbt-llama3-8b")
    elif "fbt-gemma-7b" in model:
        return InferenceClient("Mr-Bhaskar/fbt-gemma-7b")
    elif "llama-8b-inst" in model:
        return InferenceClient("Mr-Bhaskar/fbt-llama-8b-inst")
    elif "gemma-7b-inst" in model:
        return InferenceClient("Mr-Bhaskar/fbt-gemma-7b-inst")
    elif "Llama" in model:
        return InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
    elif "Mistral" in model:
        return InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
    elif "Phi" in model:
        return InferenceClient("microsoft/Phi-3-mini-4k-instruct")
    else: 
        return InferenceClient("microsoft/Phi-3-mini-4k-instruct")
    

def randomize_seed_fn(seed: int) -> int:
    seed = random.randint(0, 999999)
    return seed

system_instructions1 = "[SYSTEM] Answer as Real Jarvis JARVIS, Made by 'Tony Stark', Keep conversation very short, clear, friendly and concise. The text provided is a request for a specific type of response from you, the virtual assistant. The request asks you to provide friendly responses as if You are the character Jarvis, made by 'Tony Stark.' The expectation is that I will avoid introductions and start answering the query directly, Only answer the question asked by user, Do not say unnecessary things.[USER]"

def models(text, model="Mixtral 8x7B", seed=42):

    seed = int(randomize_seed_fn(seed))
    generator = torch.Generator().manual_seed(seed)  
    
    client = client_fn(model)
    
    generate_kwargs = dict(
        max_new_tokens=300,
        seed=seed
    )
    
    formatted_prompt = system_instructions1 + text + "[JARVIS]"
    stream = client.text_generation(
        formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
    output = ""
    for response in stream:
        if not response.token.text == "</s>":
            output += response.token.text

    return output

async def respond(audio, model, seed):
    user = transcribe(audio)
    reply = models(user, model, seed)
    communicate = edge_tts.Communicate(reply)
    with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
        tmp_path = tmp_file.name
        await communicate.save(tmp_path)
    yield tmp_path

DESCRIPTION = """ # <center><b>JARVIS⚡</b></center>
        ### <center>A personal Assistant of Tony Stark for YOU
        ### <center>Voice Chat with your personal Assistant</center>
        """

with gr.Blocks(css="style.css") as demo:    
    gr.Markdown(DESCRIPTION)
    with gr.Row():
        select = gr.Dropdown([ 'Mixtral 8x7B',
        'Llama 3 8B',
        'Mistral 7B v0.3',
        'Phi 3 mini',
    ],
    value="Mistral 7B v0.3",
    label="Model"
    )
        seed = gr.Slider(
        label="Seed",
        minimum=0,
        maximum=999999,
        step=1,
        value=0,
        visible=False
        )
        input = gr.Audio(label="User", sources="microphone", type="filepath", waveform_options=False)
        output = gr.Audio(label="AI", type="filepath",
                        interactive=False,
                        autoplay=True,
                        elem_classes="audio")
        gr.Interface(
            batch=True,
            max_batch_size=10, 
            fn=respond, 
            inputs=[input, select, seed],
            outputs=[output], live=True)  
        
    with gr.Row():
        select = gr.Dropdown([ 'fbt-mistral-7b',
        'Mixtral 8x7B',
        'Llama 3 8B',
        'Mistral 7B v0.3',
        'Phi 3 mini',
    ],
    value="Mistral 7B v0.3",
    label="Model"
    )
        seed = gr.Slider(
        label="Seed",
        minimum=0,
        maximum=999999,
        step=1,
        value=0,
        visible=False
        )
        input = gr.Textbox(label="User")
        output = gr.Textbox(label="AI", interactive=False)
        gr.Interface(
            batch=True,
            max_batch_size=10, 
            fn=models, 
            inputs=[input, select, seed],
            outputs=[output], live=True)  

if __name__ == "__main__":
    demo.queue(max_size=200).launch()