Karmastudios commited on
Commit
c64ad49
·
verified ·
1 Parent(s): c2ded22

Update app.js

Browse files
Files changed (1) hide show
  1. app.js +52 -56
app.js CHANGED
@@ -1,57 +1,53 @@
1
  import gradio as gr
2
- import requests
3
-
4
- # Replace with your model endpoint or HF Inference API
5
- MODEL_URL = "https://api-inference.huggingface.co/models/YOUR_MODEL"
6
- API_KEY = "YOUR_KEY"
7
-
8
- headers = {"Authorization": f"Bearer {API_KEY}"}
9
-
10
- def generate_react(prompt):
11
- if not prompt.strip():
12
- return "// No prompt provided."
13
-
14
- payload = {
15
- "inputs": f"Write React code for: {prompt}",
16
- "parameters": {"max_new_tokens": 400}
17
- }
18
-
19
- try:
20
- response = requests.post(MODEL_URL, headers=headers, json=payload)
21
- data = response.json()
22
-
23
- if isinstance(data, dict) and "error" in data:
24
- return f"// Model error: {data['error']}"
25
-
26
- return data[0]["generated_text"]
27
- except Exception as e:
28
- return f"// Error: {str(e)}"
29
-
30
-
31
- with gr.Blocks(theme=gr.themes.Monochrome()) as demo:
32
- gr.Markdown(
33
- """
34
- # ⚡ GhosTech React Code Generator
35
- Type what you want and generate clean React components, hooks, utilities, or full pages.
36
- """
37
- )
38
-
39
- with gr.Row():
40
- with gr.Column(scale=1):
41
- prompt = gr.Textbox(
42
- label="Describe the React code you want",
43
- placeholder="Example: A React component that fetches weather data and displays it...",
44
- lines=6,
45
- )
46
- generate_btn = gr.Button("Generate React Code", variant="primary")
47
-
48
- with gr.Column(scale=1):
49
- output = gr.Code(
50
- label="Generated React Code",
51
- language="javascript",
52
- value="// Your generated code will appear here."
53
- )
54
-
55
- generate_btn.click(generate_react, inputs=prompt, outputs=output)
56
-
57
- demo.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
+
4
+ # Choose a lightweight, open model
5
+ model_name = "mistralai/Mistral-7B-Instruct-v0.2"
6
+
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForCausalLM.from_pretrained(
9
+ model_name,
10
+ torch_dtype="auto",
11
+ device_map="auto"
12
+ )
13
+
14
+ pipe = pipeline(
15
+ "text-generation",
16
+ model=model,
17
+ tokenizer=tokenizer,
18
+ max_new_tokens=256,
19
+ do_sample=True,
20
+ temperature=0.7,
21
+ top_p=0.9
22
+ )
23
+
24
+ def chat(history, message):
25
+ # Build conversation text
26
+ prompt = ""
27
+ for user, bot in history:
28
+ prompt += f"User: {user}\nAssistant: {bot}\n"
29
+ prompt += f"User: {message}\nAssistant:"
30
+
31
+ output = pipe(prompt)[0]["generated_text"]
32
+ reply = output.split("Assistant:")[-1].strip()
33
+
34
+ history.append((message, reply))
35
+ return history, ""
36
+
37
+ with gr.Blocks() as demo:
38
+ gr.Markdown("# 🔥 My Chatbot")
39
+ chatbot = gr.Chatbot()
40
+ msg = gr.Textbox(label="Say something")
41
+ clear = gr.Button("Clear chat")
42
+
43
+ state = gr.State([])
44
+
45
+ def respond(message, history):
46
+ if history is None:
47
+ history = []
48
+ return chat(history, message)
49
+
50
+ msg.submit(respond, [msg, chatbot], [chatbot, msg])
51
+ clear.click(lambda: ([], ""), None, [chatbot, msg])
52
+
53
+ demo.launch()