File size: 7,119 Bytes
a59d8d3
 
 
 
 
571a4b7
b4068de
 
571a4b7
 
a59d8d3
 
 
 
 
571a4b7
a59d8d3
 
 
 
 
 
 
 
 
571a4b7
 
a59d8d3
571a4b7
 
 
 
b4068de
571a4b7
 
a59d8d3
 
 
b4068de
a59d8d3
b4068de
 
a59d8d3
 
 
571a4b7
 
 
a59d8d3
 
 
571a4b7
a59d8d3
b4068de
571a4b7
b4068de
 
a59d8d3
571a4b7
a59d8d3
 
 
 
571a4b7
d3dee17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
780b546
d3dee17
c4b979c
 
 
 
 
 
d3dee17
c4b979c
 
 
 
cdc0a26
c4b979c
 
 
cdc0a26
c4b979c
cdc0a26
c4b979c
 
780b546
 
 
 
 
 
 
 
73eae6d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
af2e7c9
 
73eae6d
af2e7c9
 
 
 
 
 
 
 
 
 
 
c4b979c
73eae6d
af2e7c9
7a73b12
 
 
 
 
 
 
 
af2e7c9
7a73b12
abde24d
7a73b12
 
 
 
 
 
 
 
 
 
af2e7c9
7a73b12
 
 
 
af2e7c9
c19a8bf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
from threading import Thread

# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("togethercomputer/RedPajama-INCITE-Chat-3B-v1")
model = AutoModelForCausalLM.from_pretrained("togethercomputer/RedPajama-INCITE-Chat-3B-v1", torch_dtype=torch.float16)

# Move model to GPU if available, otherwise use CPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)

class StopOnTokens(StoppingCriteria):
    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
        stop_ids = [29, 0]  # Define stop token IDs
        for stop_id in stop_ids:
            if input_ids[0][-1] == stop_id:
                return True
        return False

def predict(message, history):
    history_transformer_format = list(zip(history[:-1], history[1:])) + [[message, ""]]
    stop = StopOnTokens()

    # Format the messages for the model
    messages = "".join([f"\n<human>:{item[0]}\n<bot>:{item[1]}" for item in history_transformer_format])

    # Tokenize the input and move it to the correct device (GPU/CPU)
    model_inputs = tokenizer([messages], return_tensors="pt").to(device)
    
    # Create a streamer for output token generation
    streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
    
    # Define generation parameters
    generate_kwargs = dict(
        model_inputs,
        streamer=streamer,
        max_new_tokens=1024,
        do_sample=True,
        top_p=0.95,
        top_k=1000,
        temperature=1.0,
        num_beams=1,
        stopping_criteria=StoppingCriteriaList([stop])
    )

    # Run the generation in a separate thread
    t = Thread(target=model.generate, kwargs=generate_kwargs)
    t.start()

    # Yield generated tokens as they are produced
    partial_message = ""
    for new_token in streamer:
        if new_token != '<':  # Ignore special tokens
            partial_message += new_token
            yield partial_message

# Gradio interface to interact with the model
gr.ChatInterface(predict).launch()




# import gradio as gr
# from huggingface_hub import InferenceClient

# """
# For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
# """
# client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")


# def respond(
#     message,
#     history: list[tuple[str, str]],
#     system_message,
#     max_tokens,
#     temperature,
#     top_p,
# ):
#     messages = [{"role": "system", "content": system_message}]

#     for val in history:
#         if val[0]:
#             messages.append({"role": "user", "content": val[0]})
#         if val[1]:
#             messages.append({"role": "assistant", "content": val[1]})

#     messages.append({"role": "user", "content": message})

#     response = ""

#     for message in client.chat_completion(
#         messages,
#         max_tokens=max_tokens,
#         stream=True,
#         temperature=temperature,
#         top_p=top_p,
#     ):
#         token = message.choices[0].delta.content

#         response += token
#         yield response

# """
# For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
# """
# demo = gr.ChatInterface(
#     respond,
#     additional_inputs=[
#         gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
#         gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
#         gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
#         gr.Slider(
#             minimum=0.1,
#             maximum=1.0,
#             value=0.95,
#             step=0.05,
#             label="Top-p (nucleus sampling)",
#         ),
#     ],
# )


# if __name__ == "__main__":
#     demo.launch()




# import gradio as gr

# def fake(message, history):
#     if message.strip():
#         # Instead of returning audio directly, return a message
#         return "Playing sample audio...", gr.Audio("https://github.com/gradio-app/gradio/raw/main/test/test_files/audio_sample.wav")
#     else:
#         return "Please provide the name of an artist", None

# with gr.Blocks() as demo:
#     chatbot = gr.Chatbot(placeholder="Play music by any artist!")
#     textbox = gr.Textbox(placeholder="Which artist's music do you want to listen to?", scale=7)
#     audio_player = gr.Audio()

#     def chat_interface(message, history):
#         response, audio = fake(message, history)
#         return history + [(message, response)], audio

#     textbox.submit(chat_interface, [textbox, chatbot], [chatbot, audio_player])

# demo.launch()

# import random
# def random_response(message, history):
#     return random.choice(["Yes", "No"])

# gr.ChatInterface(random_response).launch()



# import gradio as gr

# def yes_man(message, history):
#     if message.endswith("?"):
#         return "Yes"
#     else:
#         return "Ask me anything!"

# gr.ChatInterface(
#     yes_man,
#     chatbot=gr.Chatbot(placeholder="<strong>Ask me a yes or no question</strong><br>Ask me anything"),
#     textbox=gr.Textbox(placeholder="Ask me a yes or no question", container=False, scale=15),
#     title="Yes Man",
#     description="Ask Yes Man any question",
#     theme="soft",
#     examples=[{"text": "Hello"}, {"text": "Am I cool?"}, {"text": "Are tomatoes vegetables?"}],
#     cache_examples=True,
#     retry_btn=None,
#     undo_btn="Delete Previous",
#     clear_btn="Clear",
# ).launch()

# below code is not working
# import gradio as gr

# def count_files(files):
#     num_files = len(files)
#     return f"You uploaded {num_files} file(s)"

# with gr.Blocks() as demo:
#     with gr.Row():
#         chatbot = gr.Chatbot()
#         file_input = gr.Files(label="Upload Files")
#     file_input.change(count_files, inputs=file_input, outputs=chatbot)

# demo.launch()


# new code
# import os
# from langchain_openai import ChatOpenAI
# from langchain.schema import AIMessage, HumanMessage
# import openai
# import gradio as gr


# os.environ["OPENAI_API_KEY"] = "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"  # Replace with your key

# llm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo')

# def predict(message, history):
#     history_langchain_format = []
#     for msg in history:
#         if msg['role'] == "user":
#             history_langchain_format.append(HumanMessage(content=msg['content']))
#         elif msg['role'] == "assistant":
#             history_langchain_format.append(AIMessage(content=msg['content']))
#     history_langchain_format.append(HumanMessage(content=message))
#     gpt_response = llm(history_langchain_format)
#     return gpt_response.content

# gr.ChatInterface(predict).launch()