Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -35,6 +35,22 @@ dataset = load_dataset("squad") # you can replace "squad" with any dataset you'
|
|
| 35 |
|
| 36 |
# Print the first few entries to verify that it’s loaded
|
| 37 |
print(dataset["train"][0]) # Prints the first example from the training set
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
|
| 39 |
|
| 40 |
|
|
|
|
| 35 |
|
| 36 |
# Print the first few entries to verify that it’s loaded
|
| 37 |
print(dataset["train"][0]) # Prints the first example from the training set
|
| 38 |
+
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
| 39 |
+
# Load pre-trained GPT-2 model and tokenizer from Hugging Face
|
| 40 |
+
model_name = "gpt2" # You can use other models such as 'distilgpt2' for faster responses
|
| 41 |
+
|
| 42 |
+
# Initialize tokenizer and model
|
| 43 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 44 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
| 45 |
+
|
| 46 |
+
# Create a pipeline for text generation
|
| 47 |
+
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 48 |
+
def chatbot_response(user_input):
|
| 49 |
+
# Generate a response using the model
|
| 50 |
+
response = generator(user_input, max_length=100, num_return_sequences=1)
|
| 51 |
+
|
| 52 |
+
# Extract and return the generated text
|
| 53 |
+
return response[0]['generated_text']
|
| 54 |
|
| 55 |
|
| 56 |
|