jsakshi commited on
Commit
4566613
·
verified ·
1 Parent(s): 0a90be1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -130
app.py CHANGED
@@ -301,8 +301,7 @@ demo.launch()'''
301
 
302
 
303
 
304
-
305
- '''import gradio as gr
306
  from transformers import AutoModelForCausalLM, AutoTokenizer
307
  import torch
308
  import sys
@@ -456,131 +455,4 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
456
  # Exit handling
457
  exit_check.change(fn=lambda x: sys.exit() if x else None, inputs=[exit_check])
458
 
459
- demo.launch()'''
460
-
461
-
462
-
463
-
464
- import gradio as gr
465
- from transformers import AutoModelForCausalLM, AutoTokenizer
466
- import torch
467
-
468
- model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
469
- tokenizer = AutoTokenizer.from_pretrained(model_name)
470
- model = AutoModelForCausalLM.from_pretrained(
471
- model_name,
472
- torch_dtype=torch.float16,
473
- device_map="auto",
474
- )
475
-
476
- def generate_story_continuation(current_story, selected_option=None):
477
- """Generate story continuation based on current story and selected option"""
478
- if selected_option:
479
- prompt = f"""Previous story: {current_story}
480
- Selected path: {selected_option}
481
- Continue the story with this choice and provide two new options."""
482
- else:
483
- prompt = f"Create a story based on: {current_story} and provide two directions in which the story can go."
484
-
485
- messages = [{"role": "user", "content": prompt}]
486
- formatted_prompt = tokenizer.apply_chat_template(
487
- messages,
488
- tokenize=False,
489
- add_generation_prompt=True
490
- )
491
-
492
- inputs = tokenizer(formatted_prompt, return_tensors="pt").to(model.device)
493
- outputs = model.generate(
494
- **inputs,
495
- max_length=400,
496
- temperature=0.7,
497
- do_sample=True,
498
- pad_token_id=tokenizer.eos_token_id
499
- )
500
-
501
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
502
- response = response.split("Assistant: ")[-1].strip()
503
-
504
- if "Option 1:" in response and "Option 2:" in response:
505
- story_part = response.split("Option 1:")[0].strip()
506
- options_part = "Option 1:" + response.split("Option 1:")[1]
507
- option1 = options_part.split("Option 2:")[0].replace("Option 1:", "").strip()
508
- option2 = options_part.split("Option 2:")[1].strip()
509
- else:
510
- story_part = response
511
- option1 = "Follow the mysterious voice"
512
- option2 = "Explore the abandoned house"
513
-
514
- return story_part, option1, option2
515
-
516
- def handle_option_click(current_story, option_text):
517
- """Handle option selection and update story"""
518
- if not option_text or not current_story:
519
- return current_story, "Option 1", "Option 2"
520
-
521
- new_story_part, option1, option2 = generate_story_continuation(current_story, option_text)
522
- updated_story = f"{current_story}\n\n⚡ You chose: {option_text}\n\n{new_story_part}"
523
- return updated_story, option1, option2
524
-
525
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
526
- gr.Markdown("# 🌟 Interactive Story Generator")
527
-
528
- with gr.Column():
529
- story_prompt = gr.Textbox(
530
- label="Start your story",
531
- placeholder="Enter your story prompt here...",
532
- lines=3
533
- )
534
-
535
- generate_btn = gr.Button("Generate Story", variant="primary")
536
-
537
- story_display = gr.Textbox(
538
- label="Your Story Journey",
539
- lines=12,
540
- interactive=False,
541
- show_copy_button=True
542
- )
543
-
544
- with gr.Row():
545
- option1_btn = gr.Button("Option 1", visible=False)
546
- option2_btn = gr.Button("Option 2", visible=False)
547
-
548
- exit_btn = gr.Button("Exit Story", variant="stop")
549
-
550
- def generate_initial_story(prompt):
551
- story_part, option1, option2 = generate_story_continuation(prompt)
552
- return {
553
- story_display: story_part,
554
- option1_btn: gr.update(value=option1, visible=True),
555
- option2_btn: gr.update(value=option2, visible=True)
556
- }
557
-
558
- generate_btn.click(
559
- generate_initial_story,
560
- inputs=story_prompt,
561
- outputs=[story_display, option1_btn, option2_btn]
562
- )
563
-
564
- def option_selected(option_text, current_story):
565
- updated_story, new_opt1, new_opt2 = handle_option_click(current_story, option_text)
566
- return {
567
- story_display: updated_story,
568
- option1_btn: gr.update(value=new_opt1),
569
- option2_btn: gr.update(value=new_opt2)
570
- }
571
-
572
- option1_btn.click(
573
- fn=option_selected,
574
- inputs=[option1_btn, story_display],
575
- outputs=[story_display, option1_btn, option2_btn]
576
- )
577
-
578
- option2_btn.click(
579
- fn=option_selected,
580
- inputs=[option2_btn, story_display],
581
- outputs=[story_display, option1_btn, option2_btn]
582
- )
583
-
584
- exit_btn.click(fn=lambda: None, inputs=None, outputs=None, js="window.close()")
585
-
586
- demo.launch()
 
301
 
302
 
303
 
304
+ import gradio as gr
 
305
  from transformers import AutoModelForCausalLM, AutoTokenizer
306
  import torch
307
  import sys
 
455
  # Exit handling
456
  exit_check.change(fn=lambda x: sys.exit() if x else None, inputs=[exit_check])
457
 
458
+ demo.launch()