GeminiAi commited on
Commit
28d44d6
·
verified ·
1 Parent(s): 64269b5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -12
app.py CHANGED
@@ -1,33 +1,33 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
 
4
- # Load pre-trained models
5
- summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
6
- qa_pipeline = pipeline("question-answering", model="distilbert-base-cased-distilled-squad")
7
- flashcard_generator = pipeline("text-generation", model="gpt-2")
8
 
9
  # Function for text summarization
10
  def summarize(text):
11
  try:
12
- summary = summarizer(text, max_length=130, min_length=30, do_sample=False)
13
- return summary[0]['summary_text']
 
14
  except Exception as e:
15
  return f"Error in summarization: {str(e)}"
16
 
17
  # Function for generating flashcards
18
  def generate_flashcards(text):
19
  try:
20
- prompt = f"Generate flashcards for the following text: {text}"
21
- flashcards = flashcard_generator(prompt, max_length=200, num_return_sequences=1)
22
- return flashcards[0]['generated_text']
23
  except Exception as e:
24
  return f"Error in flashcard generation: {str(e)}"
25
 
26
  # Function for question answering
27
  def answer_question(text, question):
28
  try:
29
- result = qa_pipeline(question=question, context=text)
30
- return result['answer']
 
31
  except Exception as e:
32
  return f"Error in question answering: {str(e)}"
33
 
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
 
4
+ # Initialize the Inference Client
5
+ client = InferenceClient(model="HuggingFaceH4/zephyr-7b-beta")
 
 
6
 
7
  # Function for text summarization
8
  def summarize(text):
9
  try:
10
+ prompt = f"Summarize the following text:\n\n{text}\n\nSummary:"
11
+ response = client.text_generation(prompt, max_new_tokens=100)
12
+ return response
13
  except Exception as e:
14
  return f"Error in summarization: {str(e)}"
15
 
16
  # Function for generating flashcards
17
  def generate_flashcards(text):
18
  try:
19
+ prompt = f"Generate flashcards for the following text:\n\n{text}\n\nFlashcards:"
20
+ response = client.text_generation(prompt, max_new_tokens=200)
21
+ return response
22
  except Exception as e:
23
  return f"Error in flashcard generation: {str(e)}"
24
 
25
  # Function for question answering
26
  def answer_question(text, question):
27
  try:
28
+ prompt = f"Answer the following question based on the text:\n\nText: {text}\n\nQuestion: {question}\n\nAnswer:"
29
+ response = client.text_generation(prompt, max_new_tokens=100)
30
+ return response
31
  except Exception as e:
32
  return f"Error in question answering: {str(e)}"
33