Valtry commited on
Commit
ef678c6
·
verified ·
1 Parent(s): 6722b1d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -55
app.py CHANGED
@@ -1,69 +1,49 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
3
 
 
 
4
 
5
- def respond(
6
- message,
7
- history: list[dict[str, str]],
8
- system_message,
9
- max_tokens,
10
- temperature,
11
- top_p,
12
- hf_token: gr.OAuthToken,
13
- ):
14
- """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
16
- """
17
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
18
 
19
- messages = [{"role": "system", "content": system_message}]
 
 
 
 
 
20
 
21
- messages.extend(history)
22
 
23
- messages.append({"role": "user", "content": message})
24
 
25
- response = ""
 
26
 
27
- for message in client.chat_completion(
28
- messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- choices = message.choices
35
- token = ""
36
- if len(choices) and choices[0].delta.content:
37
- token = choices[0].delta.content
38
 
39
- response += token
40
- yield response
41
 
 
 
 
 
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- chatbot = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
 
62
- with gr.Blocks() as demo:
63
- with gr.Sidebar():
64
- gr.LoginButton()
65
- chatbot.render()
66
 
 
 
 
 
 
 
67
 
68
- if __name__ == "__main__":
69
- demo.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
 
5
+ # Small model for CPU
6
+ model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
7
 
8
+ print("Loading tokenizer...")
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
 
 
 
 
 
 
 
 
 
 
 
10
 
11
+ print("Loading model...")
12
+ model = AutoModelForCausalLM.from_pretrained(
13
+ model_name,
14
+ torch_dtype=torch.float32,
15
+ device_map="cpu"
16
+ )
17
 
18
+ print("Model loaded successfully!")
19
 
20
+ def chat(message):
21
 
22
+ prompt = f"""
23
+ You are a helpful assistant.
24
 
25
+ User: {message}
26
+ Assistant:
27
+ """
 
 
 
 
 
 
 
 
28
 
29
+ inputs = tokenizer(prompt, return_tensors="pt")
 
30
 
31
+ output = model.generate(
32
+ **inputs,
33
+ max_new_tokens=100,
34
+ temperature=0.7
35
+ )
36
 
37
+ response = tokenizer.decode(output[0], skip_special_tokens=True)
38
+
39
+ return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
 
 
 
 
41
 
42
+ demo = gr.Interface(
43
+ fn=chat,
44
+ inputs=gr.Textbox(label="Ask something"),
45
+ outputs=gr.Textbox(label="AI Response"),
46
+ title="Auric AI Model Test"
47
+ )
48
 
49
+ demo.launch()