makhdoomnaeem commited on
Commit
ebe2e1f
·
verified ·
1 Parent(s): 87243fd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -33
app.py CHANGED
@@ -1,21 +1,15 @@
1
  import streamlit as st
2
- from transformers import PegasusForConditionalGeneration, PegasusTokenizer
3
- import torch
4
 
5
- # Load Hugging Face Model and Tokenizer
6
- @st.cache_resource
7
- def load_model():
8
- model_name = "tuner007/pegasus_paraphrase"
9
- model = PegasusForConditionalGeneration.from_pretrained(model_name)
10
- tokenizer = PegasusTokenizer.from_pretrained(model_name)
11
- return model, tokenizer
12
-
13
- # Initialize model and tokenizer
14
- model, tokenizer = load_model()
15
 
16
  # Streamlit App
17
- st.title("Humanizer & Rephraser App")
18
- st.subheader("Powered by Pegasus and Streamlit")
19
 
20
  # User Input
21
  input_text = st.text_area("Enter the text you want to process:", "")
@@ -47,34 +41,26 @@ if st.button("Generate Output"):
47
  else:
48
  task_prompt = f"Rephrase the following text: {input_text}"
49
 
50
- # Tokenize and process input
51
- inputs = tokenizer(task_prompt, return_tensors="pt", max_length=512, truncation=True)
52
- st.write("Debug: Tokenized Input Shape:", inputs.input_ids.shape) # Debugging tokenized input shape
53
-
54
- # Generate output
55
- output_ids = model.generate(
56
- inputs.input_ids,
57
- max_length=512,
58
- num_beams=5,
59
- num_return_sequences=1,
60
- early_stopping=True,
61
  )
62
 
63
- if output_ids is None or len(output_ids) == 0:
64
- raise ValueError("The model generated no output.")
65
 
66
- # Decode and validate the output
67
- output_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
68
- if not output_text.strip():
69
- raise ValueError("Decoded output is empty.")
70
 
71
  output_word_count = len(output_text.split())
 
 
72
  st.success("Done!")
73
  st.text_area("Generated Output:", value=output_text, height=200)
74
  st.write(f"**Output Word Count:** {output_word_count}")
75
 
76
- except torch.cuda.OutOfMemoryError:
77
- st.error("CUDA out of memory. Try reducing input size or using CPU.")
78
  except Exception as e:
79
  st.error(f"An error occurred: {str(e)}")
80
 
 
1
  import streamlit as st
2
+ import os
3
+ from groq import Groq
4
 
5
+ # Initialize Groq client
6
+ GROQ_API_KEY = "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" # Replace with your Groq API key
7
+ os.environ["GROQ_API_KEY"] = GROQ_API_KEY
8
+ client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
 
 
 
 
 
 
9
 
10
  # Streamlit App
11
+ st.title("Humanizer & Rephraser App (Groq-powered)")
12
+ st.subheader("Powered by Groq and Streamlit")
13
 
14
  # User Input
15
  input_text = st.text_area("Enter the text you want to process:", "")
 
41
  else:
42
  task_prompt = f"Rephrase the following text: {input_text}"
43
 
44
+ # Call Groq API
45
+ chat_completion = client.chat.completions.create(
46
+ messages=[{"role": "user", "content": task_prompt}],
47
+ model="llama-3.3-70b-versatile", # Replace with the desired model
48
+ stream=False,
 
 
 
 
 
 
49
  )
50
 
51
+ # Extract the generated content
52
+ output_text = chat_completion.choices[0].message.content.strip()
53
 
54
+ if not output_text:
55
+ raise ValueError("Groq API returned an empty output.")
 
 
56
 
57
  output_word_count = len(output_text.split())
58
+
59
+ # Display Output
60
  st.success("Done!")
61
  st.text_area("Generated Output:", value=output_text, height=200)
62
  st.write(f"**Output Word Count:** {output_word_count}")
63
 
 
 
64
  except Exception as e:
65
  st.error(f"An error occurred: {str(e)}")
66