Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| """ | |
| For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference | |
| """ | |
| client = InferenceClient("gargabhi/shortstories20M") | |
| description = """ | |
| # Generate short stories using custom verb or noun or adjective | |
| """ | |
| prompt = 'Write a story. In the story, try to use the verb "fight", the noun "king" and the adjective "brave". Possible story:' | |
| def generate_text(input_prompt="", max_len=200, top_k=10, temp=0.5, top_p=0.95): | |
| print('inputs: ') | |
| print('prompt:', prompt) | |
| print('max_len:', max_len) | |
| print('top-k:', top_k) | |
| print('temp:', temp) | |
| print('top_p:', top_p) | |
| response = client.text_generation(input_prompt, do_sample=True, max_new_tokens=max_len, temperature=temp, top_k=top_k) | |
| print('response:') | |
| print(response) | |
| return response | |
| inputs = [ | |
| gr.Textbox(prompt, label="Prompt text"), | |
| gr.Slider(minimum=50, maximum=250, step=50, label="max-lenth generation", value=200), | |
| gr.Slider(minimum=0, maximum=20, step=1, label="top-k", value=10), | |
| gr.Slider(minimum=0.0, maximum=4.0, step=0.1, label="temperature", value=0.5), | |
| gr.Slider(0.0, 1.0, label="top-p", value=0.95), | |
| #gr.Textbox(label="top-k", value=10,), | |
| ] | |
| outputs = [gr.Textbox(label="Generated Text")] | |
| demo = gr.Interface(fn=generate_text, inputs=inputs, outputs=outputs, allow_flagging=False, description=description) | |
| if __name__ == "__main__": | |
| demo.launch(debug=True) |