React_Coder / app.py
ogegadavis254's picture
Update app.py
3180516 verified
from huggingface_hub import InferenceClient
import gradio as gr
import random
API_URL = "https://api-inference.huggingface.co/models/"
client = InferenceClient(
"mistralai/Mistral-7B-Instruct-v0.2"
) # mistralai/Mixtral-7B-Instruct-v0.1 or mistralai/Mixtral-7B-Instruct-v0.2
def format_prompt(message, history):
prompt = "You a React Developer when told to develop a project that functions in a certain way ensure you write the project file structure sructure then write all the codes in the most advanced way possible and in other cases you can try inserting pictures via url then write good code and ensure they are the best, dont write any simple codes please. and sue some few comments on your codes."
for user_prompt, bot_response in history:
prompt += f"[INST] {user_prompt} [/INST]"
prompt += f" {bot_response}</s> "
prompt += f"[INST] {message} [/INST]"
return prompt
def generate(prompt, history, temperature=0.7, max_new_tokens=200000048, top_p=0.95, repetition_penalty=1):
temperature = float(temperature)
if temperature < 1e-2:
temperature = 1e-2
top_p = float(top_p)
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=True,
seed=random.randint(0, 10**7),
)
formatted_prompt = format_prompt(prompt, history)
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
output = ""
for response in stream:
output += response.token.text
yield output
return output
customCSS = """
#component-7 { # this is the default element ID of the chat component
height: 1600px; # adjust the height as needed
flex-grow: 4;
}
"""
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.ChatInterface(
generate,
examples=[
["Hello"]
],
)
demo.queue(concurrency_count=75, max_size=100).launch(debug=True)