HAMMALE commited on
Commit
2379646
·
verified ·
1 Parent(s): 598b240

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -5
app.py CHANGED
@@ -440,6 +440,95 @@ REACT MODE
440
 
441
  return filepath
442
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
443
  with gr.Blocks(title="LLM Reasoning Modes Comparison") as demo:
444
  gr.Markdown("""
445
  # LLM Reasoning Modes Comparison
@@ -468,6 +557,7 @@ with gr.Blocks(title="LLM Reasoning Modes Comparison") as demo:
468
  with gr.Row():
469
  submit_btn = gr.Button("Run", variant="primary", size="lg")
470
  download_btn = gr.Button("Download Results", variant="secondary", size="lg")
 
471
 
472
  with gr.Column(scale=1):
473
  gr.Markdown("**Example Questions**")
@@ -487,7 +577,8 @@ with gr.Blocks(title="LLM Reasoning Modes Comparison") as demo:
487
  with gr.Column():
488
  react_output = gr.Markdown(label="ReAct Output")
489
 
490
- download_file = gr.File(label="Download", visible=False)
 
491
 
492
  submit_btn.click(
493
  fn=run_comparison,
@@ -499,10 +590,12 @@ with gr.Blocks(title="LLM Reasoning Modes Comparison") as demo:
499
  fn=download_results,
500
  inputs=[think_output, act_output, react_output, question_input],
501
  outputs=download_file
502
- ).then(
503
- fn=lambda: gr.File(visible=True),
504
- outputs=download_file
 
 
505
  )
506
 
507
  if __name__ == "__main__":
508
- demo.launch()
 
440
 
441
  return filepath
442
 
443
+ def download_model_info():
444
+ """Create a text file with model download instructions."""
445
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
446
+ filename = f"model_download_instructions_{timestamp}.txt"
447
+
448
+ content = """
449
+ ===============================================================================
450
+ GPT-OSS-20B MODEL DOWNLOAD INSTRUCTIONS
451
+ ===============================================================================
452
+
453
+ Model: openai/gpt-oss-20b
454
+ Repository: https://huggingface.co/openai/gpt-oss-20b
455
+
456
+ -------------------------------------------------------------------------------
457
+ METHOD 1: Using Hugging Face CLI
458
+ -------------------------------------------------------------------------------
459
+
460
+ 1. Install Hugging Face Hub:
461
+ pip install huggingface-hub
462
+
463
+ 2. Login to Hugging Face (optional but recommended):
464
+ huggingface-cli login
465
+
466
+ 3. Download the model:
467
+ huggingface-cli download openai/gpt-oss-20b
468
+
469
+ -------------------------------------------------------------------------------
470
+ METHOD 2: Using Python
471
+ -------------------------------------------------------------------------------
472
+
473
+ from huggingface_hub import snapshot_download
474
+
475
+ model_path = snapshot_download(
476
+ repo_id="openai/gpt-oss-20b",
477
+ cache_dir="./models"
478
+ )
479
+ print(f"Model downloaded to: {model_path}")
480
+
481
+ -------------------------------------------------------------------------------
482
+ METHOD 3: Using Git LFS
483
+ -------------------------------------------------------------------------------
484
+
485
+ 1. Install Git LFS:
486
+ git lfs install
487
+
488
+ 2. Clone the repository:
489
+ git clone https://huggingface.co/openai/gpt-oss-20b
490
+
491
+ -------------------------------------------------------------------------------
492
+ METHOD 4: Using transformers library
493
+ -------------------------------------------------------------------------------
494
+
495
+ from transformers import AutoModelForCausalLM, AutoTokenizer
496
+
497
+ model = AutoModelForCausalLM.from_pretrained("openai/gpt-oss-20b")
498
+ tokenizer = AutoTokenizer.from_pretrained("openai/gpt-oss-20b")
499
+
500
+ # Save locally
501
+ model.save_pretrained("./local_model")
502
+ tokenizer.save_pretrained("./local_model")
503
+
504
+ -------------------------------------------------------------------------------
505
+ SYSTEM REQUIREMENTS
506
+ -------------------------------------------------------------------------------
507
+
508
+ - Storage: ~40GB disk space
509
+ - RAM: Minimum 16GB, recommended 32GB+
510
+ - GPU: Optional but recommended (NVIDIA with CUDA support)
511
+ - Python: 3.8 or higher
512
+
513
+ -------------------------------------------------------------------------------
514
+ USEFUL LINKS
515
+ -------------------------------------------------------------------------------
516
+
517
+ Model Card: https://huggingface.co/openai/gpt-oss-20b
518
+ Documentation: https://huggingface.co/docs
519
+ CLI Guide: https://huggingface.co/docs/huggingface_hub/guides/cli
520
+
521
+ ===============================================================================
522
+ Generated: """ + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + """
523
+ ===============================================================================
524
+ """
525
+
526
+ filepath = f"/tmp/{filename}"
527
+ with open(filepath, 'w', encoding='utf-8') as f:
528
+ f.write(content)
529
+
530
+ return filepath
531
+
532
  with gr.Blocks(title="LLM Reasoning Modes Comparison") as demo:
533
  gr.Markdown("""
534
  # LLM Reasoning Modes Comparison
 
557
  with gr.Row():
558
  submit_btn = gr.Button("Run", variant="primary", size="lg")
559
  download_btn = gr.Button("Download Results", variant="secondary", size="lg")
560
+ download_model_btn = gr.Button("Download Model Info", variant="secondary", size="lg")
561
 
562
  with gr.Column(scale=1):
563
  gr.Markdown("**Example Questions**")
 
577
  with gr.Column():
578
  react_output = gr.Markdown(label="ReAct Output")
579
 
580
+ download_file = gr.File(label="Download Results")
581
+ download_model_file = gr.File(label="Model Download Instructions")
582
 
583
  submit_btn.click(
584
  fn=run_comparison,
 
590
  fn=download_results,
591
  inputs=[think_output, act_output, react_output, question_input],
592
  outputs=download_file
593
+ )
594
+
595
+ download_model_btn.click(
596
+ fn=download_model_info,
597
+ outputs=download_model_file
598
  )
599
 
600
  if __name__ == "__main__":
601
+ demo.launch(share=True)