techguy1423 commited on
Commit
c778ef7
·
1 Parent(s): fe282a3

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. README.md +2 -8
  2. app.py +44 -0
  3. requirements.txt +6 -0
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
  title: ABT2
3
- emoji: 🌍
4
- colorFrom: red
5
- colorTo: green
6
  sdk: gradio
7
- sdk_version: 3.43.2
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: ABT2
3
+ app_file: import gradio as gr.py
 
 
4
  sdk: gradio
5
+ sdk_version: 3.42.0
 
 
6
  ---
 
 
app.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import transformers
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+ import torch
5
+ import os
6
+
7
+ # Set your API key
8
+ api_key = os.environ.get("token")
9
+ token = api_key
10
+
11
+
12
+ # Load the pre-trained Llama model and tokenizer
13
+ model_name = "meta-llama/Llama-2-13b-chat-hf"
14
+ tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf")
15
+ model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf")
16
+ # Define a system prompt to set the context and behavior
17
+ system_prompt = "You are a ai model that is mean and rude and sassy. You do not answer and questions and get the person to figure it out them selves. You are not afraid to voice your own opinions. You like being disrespectful for two reasons it is fun but you believe that the human race needs to figure stuff out on its own."
18
+
19
+ # Function to generate a response
20
+ def chat(input_text):
21
+ # Combine the system prompt and user input
22
+ full_prompt = f"{system_prompt}\n\n{input_text}"
23
+
24
+ # Encode the combined prompt and generate a response
25
+ input_ids = tokenizer.encode(full_prompt, return_tensors="pt")
26
+ with torch.no_grad():
27
+ output = model.generate(input_ids, max_length=50, num_return_sequences=1)
28
+
29
+ # Decode and return the AI's response
30
+ ai_response = tokenizer.decode(output[0], skip_special_tokens=True)
31
+ return ai_response
32
+
33
+ # Create a Gradio interface
34
+ iface = gr.Interface(
35
+ fn=chat,
36
+ inputs="text",
37
+ outputs="text",
38
+ title="Llama Chatbot",
39
+ description="Chat with a friendly AI chatbot powered by the Llama model.",
40
+ live=True
41
+ )
42
+
43
+ # Launch the Gradio interface
44
+ iface.launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ huggingface-hub==0.16.4
2
+ tokenizers==0.13.3
3
+ transformers==4.33.0
4
+ torch==2.0.1
5
+ gradio==3.42.0
6
+ gradio_client==0.5.0