jsakshi commited on
Commit
ec542cb
·
verified ·
1 Parent(s): 695e71b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -2
app.py CHANGED
@@ -457,7 +457,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
457
 
458
  demo.launch()'''
459
 
460
- import gradio as gr
461
  from transformers import AutoModelForCausalLM, AutoTokenizer
462
  import torch
463
  import sys
@@ -577,6 +577,33 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
577
 
578
  exit_check.change(fn=lambda x: sys.exit() if x else None, inputs=[exit_check])
579
 
580
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
581
 
582
 
 
457
 
458
  demo.launch()'''
459
 
460
+ '''import gradio as gr
461
  from transformers import AutoModelForCausalLM, AutoTokenizer
462
  import torch
463
  import sys
 
577
 
578
  exit_check.change(fn=lambda x: sys.exit() if x else None, inputs=[exit_check])
579
 
580
+ demo.launch()'''
581
+
582
+ import requests
583
+
584
+ API_TOKEN = "Sakshi"
585
+ API_URL = f"https://api-inference.huggingface.co/models/TinyLlama/TinyLlama-1.1B-Chat-v1.0"
586
+ HEADERS = {"Authorization": f"Bearer {API_TOKEN}"}
587
+
588
+ def generate_story_continuation(current_story, selected_option=None):
589
+ """
590
+ Generate story continuation using Hugging Face API
591
+ """
592
+ if selected_option:
593
+ prompt = f"""Previous story: {current_story}
594
+ Selected path: {selected_option}
595
+ Continue the story with this choice and provide two new options."""
596
+ else:
597
+ prompt = f"Create a story based on: {current_story}"
598
+
599
+ payload = {"inputs": prompt}
600
+ response = requests.post(API_URL, headers=HEADERS, json=payload)
601
+
602
+ if response.status_code == 200:
603
+ story = response.json()[0]['generated_text']
604
+ return story, "Option 1: Continue the adventure", "Option 2: Take a different direction"
605
+ else:
606
+ return "Error generating story", "", ""
607
+
608
 
609