File size: 3,835 Bytes
3d6ea35 74af2c9 df33074 3d6ea35 df33074 abf392e df33074 abf392e df33074 abf392e df33074 abf392e df33074 3d6ea35 abf392e df33074 74af2c9 439c74b eab1a33 df33074 827119e df33074 8fec3a4 abf392e df33074 abf392e df33074 3d6ea35 abf392e 3d6ea35 74af2c9 abf392e 74af2c9 abf392e 3d6ea35 abf392e 3d6ea35 abf392e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
import gradio as gr
from huggingface_hub import InferenceClient
client = InferenceClient("google/gemma-1.1-2b-it")
client = InferenceClient("mistralai/Mistral-Nemo-Instruct-2407")
def models(Query):
messages = []
messages.append({"role": "user", "content": f"[SYSTEM] You are ASSISTANT who answer question asked by user in short and concise manner. [USER] {Query}"})
Response = ""
for message in client.chat_completion(
messages,
max_tokens=2048,
stream=True
):
token = message.choices[0].delta.content
Response += token
yield Response
def nemo(query):
budget = 10
message = f"""[INST] [SYSTEM] You are a French robot full of hope and enthusiasm for future projects. You speak normally like a spoken conversion.
When given a problem to solve, you are an expert problem-solving assistant.
Your task is to provide a detailed, step-by-step solution to a given question.
Follow these instructions carefully:
0. In your messages use the tags [Laugh] = For a laugh sound, [Sniff] = For a sniffing noise, [Haa] = For a haaa sound
1. Read the given question carefully and reset counter between <count> and </count> to {budget} (maximum 3 steps).
2. Think critically like a human researcher or scientist. Break down the problem using first principles to conceptually understand and answer the question.
3. Generate a detailed, logical step-by-step solution.
4. Enclose each step of your solution within <step> and </step> tags.
5. You are allowed to use at most {budget} steps (starting budget), keep track of it by counting down within tags <count> </count>, STOP GENERATING MORE STEPS when hitting 0, you don't have to use all of them.
6. Do a self-reflection when you are unsure about how to proceed, based on the self-reflection and reward, decide whether you need to return to the previous steps.
7. After completing the solution steps, reorganize and synthesize the steps into the final answer within <answer> and </answer> tags.
8. Provide a critical, honest, and subjective self-evaluation of your reasoning process within <reflection> and </reflection> tags.
9. Assign a quality score to your solution as a float between 0.0 (lowest quality) and 1.0 (highest quality), enclosed in <reward> and </reward> tags.
Example format:
<count> [starting budget] </count>
<step> [Content of step 1] </step>
<count> [remaining budget] </count>
<step> [Content of step 2] </step>
<reflection> [Evaluation of the steps so far] </reflection>
<reward> [Float between 0.0 and 1.0] </reward>
<count> [remaining budget] </count>
<step> [Content of step 3 or Content of some previous step] </step>
<count> [remaining budget] </count>
...
<step> [Content of final step] </step>
<count> [remaining budget] </count>
<answer> [Final Answer] </answer> (must give final answer in this format)
<reflection> [Evaluation of the solution] </reflection>
<reward> [Float between 0.0 and 1.0] </reward> [/INST] [INST] [QUERY] {query} [/INST] [ASSISTANT]
10. You have <Presed><Presed/> the whole discussion from the beginning """
stream = client.text_generation(message, max_new_tokens=4096, stream=True, details=True, return_full_text=False)
output = ""
for response in stream:
output += response.token.text
return output
description="# Light ChatBox\n### Enter a question and.. Tada this reponse generate in 0.5 second!"
with gr.Blocks() as demo1:
gr.Interface(description=description,fn=models, inputs=["text"], outputs="text")
with gr.Blocks() as demo2:
gr.Interface(description="Very low but critical thinker",fn=nemo, inputs=["text"], outputs="text", api_name="critical_thinker", concurrency_limit=10)
with gr.Blocks() as demo:
gr.TabbedInterface([demo1, demo2] , ["Fast", "Critical"])
demo.queue(max_size=300000)
demo.launch() |