Merge branch #KingNish/OpenCHAT-Mini' into 'Kall00/Chab'
Browse files- README.md +1 -1
- app.py +8 -10
- requirements.txt +0 -2
README.md
CHANGED
|
@@ -4,7 +4,7 @@ emoji: 🐧
|
|
| 4 |
colorFrom: green
|
| 5 |
colorTo: yellow
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version: 4.
|
| 8 |
app_file: app.py
|
| 9 |
pinned: true
|
| 10 |
short_description: Chatbot with Vision,Image generation and WebSearch
|
|
|
|
| 4 |
colorFrom: green
|
| 5 |
colorTo: yellow
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 4.40.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: true
|
| 10 |
short_description: Chatbot with Vision,Image generation and WebSearch
|
app.py
CHANGED
|
@@ -83,9 +83,7 @@ def search(query):
|
|
| 83 |
client_gemma = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
|
| 84 |
client_mixtral = InferenceClient("NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO")
|
| 85 |
client_llama = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
func_caller = []
|
| 89 |
|
| 90 |
# Define the main chat function
|
| 91 |
def respond(message, history):
|
|
@@ -176,15 +174,15 @@ def respond(message, history):
|
|
| 176 |
buffer += new_text
|
| 177 |
yield buffer
|
| 178 |
else:
|
| 179 |
-
messages = f"<|
|
| 180 |
for msg in history:
|
| 181 |
-
messages += f"\n<|
|
| 182 |
-
messages += f"\n<|
|
| 183 |
-
messages+=f"\n<|
|
| 184 |
-
stream =
|
| 185 |
output = ""
|
| 186 |
for response in stream:
|
| 187 |
-
if not response.token.text == "<|
|
| 188 |
output += response.token.text
|
| 189 |
yield output
|
| 190 |
except:
|
|
@@ -211,7 +209,7 @@ demo = gr.ChatInterface(
|
|
| 211 |
examples=[
|
| 212 |
{"text": "Hy, who are you?",},
|
| 213 |
{"text": "What's the current price of Bitcoin",},
|
| 214 |
-
{"text": "Search and Tell me what's
|
| 215 |
{"text": "Create A Beautiful image of Effiel Tower at Night",},
|
| 216 |
{"text": "Write me a Python function to calculate the first 10 digits of the fibonacci sequence.",},
|
| 217 |
{"text": "What's the colour of car in given image", "files": ["./car1.png"]},
|
|
|
|
| 83 |
client_gemma = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
|
| 84 |
client_mixtral = InferenceClient("NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO")
|
| 85 |
client_llama = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
|
| 86 |
+
client_yi = InferenceClient("01-ai/Yi-1.5-34B-Chat")
|
|
|
|
|
|
|
| 87 |
|
| 88 |
# Define the main chat function
|
| 89 |
def respond(message, history):
|
|
|
|
| 174 |
buffer += new_text
|
| 175 |
yield buffer
|
| 176 |
else:
|
| 177 |
+
messages = f"<|im_start|>system\nYou are OpenCHAT mini a helpful assistant made by KingNish. You answers users query like human friend. You are also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You also try to show emotions using Emojis and reply like human, use short forms, friendly tone and emotions.<|im_end|>"
|
| 178 |
for msg in history:
|
| 179 |
+
messages += f"\n<|im_start|>user\n{str(msg[0])}<|im_end|>"
|
| 180 |
+
messages += f"\n<|im_start|>assistant\n{str(msg[1])}<|im_end|>"
|
| 181 |
+
messages+=f"\n<|im_start|>user\n{message_text}<|im_end|>\n<|im_start|>assistant\n"
|
| 182 |
+
stream = client_yi.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
|
| 183 |
output = ""
|
| 184 |
for response in stream:
|
| 185 |
+
if not response.token.text == "<|endoftext|>":
|
| 186 |
output += response.token.text
|
| 187 |
yield output
|
| 188 |
except:
|
|
|
|
| 209 |
examples=[
|
| 210 |
{"text": "Hy, who are you?",},
|
| 211 |
{"text": "What's the current price of Bitcoin",},
|
| 212 |
+
{"text": "Search and Tell me what's trending on Youtube.",},
|
| 213 |
{"text": "Create A Beautiful image of Effiel Tower at Night",},
|
| 214 |
{"text": "Write me a Python function to calculate the first 10 digits of the fibonacci sequence.",},
|
| 215 |
{"text": "What's the colour of car in given image", "files": ["./car1.png"]},
|
requirements.txt
CHANGED
|
@@ -1,7 +1,5 @@
|
|
| 1 |
-
huggingface_hub
|
| 2 |
bs4
|
| 3 |
pillow
|
| 4 |
-
gradio
|
| 5 |
torch
|
| 6 |
git+https://github.com/huggingface/transformers.git
|
| 7 |
opencv-python
|
|
|
|
|
|
|
| 1 |
bs4
|
| 2 |
pillow
|
|
|
|
| 3 |
torch
|
| 4 |
git+https://github.com/huggingface/transformers.git
|
| 5 |
opencv-python
|