Spaces:
Sleeping
Sleeping
| import torch | |
| import gradio as gr | |
| from transformers import pipeline, logging, AutoModelForCausalLM, AutoTokenizer | |
| model_name = "microsoft/phi-2" | |
| model = AutoModelForCausalLM.from_pretrained( | |
| model_name, | |
| trust_remote_code=True | |
| ) | |
| model.config.use_cache = False | |
| tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) | |
| tokenizer.pad_token = tokenizer.eos_token | |
| peft_model_folder = './ckpts' | |
| model.load_adapter(peft_model_folder) | |
| def generate_text(input_text, max_length): | |
| pipe = pipeline(task="text-generation",model=model,tokenizer=tokenizer, max_length=max_length) | |
| result = pipe(f"<s>[INST] {input_text} [/INST]") | |
| return_answer = result[0]['generated_text'] | |
| return return_answer | |
| # Create a Gradio interface | |
| iface = gr.Interface( | |
| fn=generate_text, # Function to be called on user input | |
| inputs=[gr.Textbox( | |
| label="Ask question?", | |
| info="Enter your prompt:" | |
| ), | |
| gr.Slider(1, 200, value = 10, step=1, label="Max Length")], | |
| outputs=gr.Textbox( | |
| label="Response from Phi2 Model: ", | |
| ), | |
| ) | |
| # Launch the Gradio app | |
| iface.launch() |