kumar1907 commited on
Commit
a42e4a7
·
verified ·
1 Parent(s): 1d21501

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -1,17 +1,17 @@
1
- # app.py
2
  import os
 
3
  import gradio as gr
4
- from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
5
 
6
  model_id = "meta-llama/Llama-3.1-8B-Instruct"
7
- token = os.environ.get("venkat") # 🔐 Get the token from environment variable
8
 
9
  tokenizer = AutoTokenizer.from_pretrained(model_id, token=token)
10
  model = AutoModelForCausalLM.from_pretrained(model_id, token=token, device_map="auto")
 
11
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
12
 
13
  def chat_with_llama(prompt):
14
  result = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7)
15
- return result[0]['generated_text']
16
-
17
- gr.Interface(fn=chat_with_llama, inputs="text", outputs="text", title="Venkat Assistant").launch()
 
 
1
  import os
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
  import gradio as gr
 
4
 
5
  model_id = "meta-llama/Llama-3.1-8B-Instruct"
6
+ token = os.getenv("kumar")
7
 
8
  tokenizer = AutoTokenizer.from_pretrained(model_id, token=token)
9
  model = AutoModelForCausalLM.from_pretrained(model_id, token=token, device_map="auto")
10
+
11
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
12
 
13
  def chat_with_llama(prompt):
14
  result = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7)
15
+ return result[0]["generated_text"]
16
+
17
+ gr.Interface(fn=chat_with_llama, inputs="text", outputs="text", title="Venkat's Chatbot").launch()