File size: 2,547 Bytes
ac4c7e0
f6b81c8
ac4c7e0
 
14bb3cc
ac4c7e0
f6d526b
ac4c7e0
 
 
0dfbd62
ac4c7e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14bb3cc
ac4c7e0
 
 
 
 
 
 
21e530e
ac4c7e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from pathlib import Path
import pandas as pd
import spaces

model_checkpoint = "HuggingFaceTB/SmolLM-1.7B"
model = AutoModelForCausalLM.from_pretrained(model_checkpoint)
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)

pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, repetition_penalty=1.5, temperature=0)


abs_path = Path(__file__).parent

df = pd.read_csv(str(abs_path / "models.csv"))
df.to_html("tab.html")

def refreshfn() -> gr.HTML:
    df = pd.read_csv(str(abs_path / "models.csv"))
    df.to_html("tab.html")
    f = open("tab.html")
    content = f.read()
    f.close()
    t = gr.HTML(content)
    return t

def chatfn(text):
    return text, text

with gr.Blocks() as demo:
    gr.Markdown("""
    # 🥇 Leaderboard Component
    """)
    with gr.Tabs():
        with gr.Tab("Demo"):
            f = open("tab.html")
            content = f.read()
            f.close()
            t = gr.HTML(content)
            btn = gr.Button("Refresh")
            btn.click(fn=refreshfn, inputs=None, outputs=t)
        with gr.Tab("Chats"):
            import random
            import time
            with gr.Column():
                chatbot = gr.Chatbot()
            with gr.Column():
                chatbot1 = gr.Chatbot()
            msg = gr.Textbox()
            clear = gr.ClearButton([msg, chatbot])
            @spaces.GPU(duration=200)
            def respond(message, chat_history):
                response = pipe(message)
                bot_message = response[0]["generated_text"]
                chat_history.append((message, bot_message))
                return "", chat_history

            import concurrent.futures
            
            def run_functions_simultaneously():
                with concurrent.futures.ThreadPoolExecutor() as executor:
                    # Submit the first function
                    future1 = executor.submit(msg.submit, respond, [msg, chatbot], [msg, chatbot])
                    
                    # Submit the second function
                    future2 = executor.submit(msg.submit, respond, [msg, chatbot1], [msg, chatbot1])
                    
                    # Wait for both futures to complete
                    concurrent.futures.wait([future1, future2])

            
            # Call the function to run the tasks simultaneously
            run_functions_simultaneously()

if __name__ == "__main__":
    demo.launch()