Spaces:
Runtime error
Runtime error
Duplicate from EuroPython2022/Scratchpad-w-BLOOM
Browse filesCo-authored-by: Muhtasham Oblokulov <muhtasham@users.noreply.huggingface.co>
- .gitattributes +27 -0
- README.md +13 -0
- app.py +63 -0
.gitattributes
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Length generalization via BLOOM
|
| 3 |
+
emoji: 📝
|
| 4 |
+
colorFrom: indigo
|
| 5 |
+
colorTo: red
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 3.0.24
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
duplicated_from: EuroPython2022/Scratchpad-w-BLOOM
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import requests
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
##Bloom
|
| 6 |
+
API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom"
|
| 7 |
+
HF_TOKEN = os.environ["HF_TOKEN"]
|
| 8 |
+
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
|
| 9 |
+
|
| 10 |
+
def text_generate(prompt):
|
| 11 |
+
print(f"Prompt is :{prompt}")
|
| 12 |
+
p = prompt + " Solution: "
|
| 13 |
+
print(f"Final prompt is : {p}")
|
| 14 |
+
json_ = {"inputs": p,
|
| 15 |
+
"parameters":
|
| 16 |
+
{
|
| 17 |
+
"top_p": 0.9,
|
| 18 |
+
"temperature": 1.1,
|
| 19 |
+
"max_new_tokens": 250,
|
| 20 |
+
"return_full_text": True
|
| 21 |
+
}, "options":
|
| 22 |
+
{
|
| 23 |
+
"use_cache": True,
|
| 24 |
+
"wait_for_model":True
|
| 25 |
+
},}
|
| 26 |
+
response = requests.post(API_URL, headers=headers, json=json_)
|
| 27 |
+
print(f"Response is : {response}")
|
| 28 |
+
output = response.json()
|
| 29 |
+
print(f"output is : {output}")
|
| 30 |
+
output_tmp = output[0]['generated_text']
|
| 31 |
+
print(f"output_tmp is: {output_tmp}")
|
| 32 |
+
solution = output_tmp.split("\nQ:")[0]
|
| 33 |
+
print(f"Final response after splits is: {solution}")
|
| 34 |
+
return solution
|
| 35 |
+
|
| 36 |
+
demo = gr.Blocks()
|
| 37 |
+
|
| 38 |
+
with demo:
|
| 39 |
+
gr.Markdown("<h1><center>Length generalization (LG) With BLOOM🌸 </center></h1>")
|
| 40 |
+
gr.Markdown(
|
| 41 |
+
"""
|
| 42 |
+
We will examine large language models ability to extrapolate to longer problems! \n
|
| 43 |
+
Length generalization (LG) is important: Often, long examples are rare and intrinsically more difficult, yet are the ones we care more about. \n
|
| 44 |
+
Recent paper [Exploring Length Generalization in Large Language Models](https://arxiv.org/pdf/2207.04901) found that using few-shot [scratchpad](https://arxiv.org/abs/2112.00114), a combo behind many strong LLM results (eg. #Minerva ) \n
|
| 45 |
+
leads to **substantial improvements in length generalization!** \n
|
| 46 |
+
In-context learning enables variable length pattern matching, producing solutions of correct lengths. \n
|
| 47 |
+
This space is an attempt at inspecting this LLM behavior/capability in the new HuggingFace BigScienceW [Bloom](https://huggingface.co/bigscience/bloom) model. \n
|
| 48 |
+
This Space is created by [Muhtasham Oblokulov](https://twitter.com/muhtasham9) for EuroPython 2022 Demo. \n
|
| 49 |
+
This Space is work in progress, BLOOM doesn't support inference on long sequencess so you may try with shorter sequences. \n
|
| 50 |
+
"""
|
| 51 |
+
)
|
| 52 |
+
with gr.Row():
|
| 53 |
+
input_prompt = gr.Textbox(value="Q:The coin is heads up.(1) Then Austin flips. Is the coin still heads up? Solution: Coin is initially heads up. (1) After Austin flips, coin turns to heads. Q: The coin is heads up. (2) Then Austin doesn't flip. (1) Then Kara flips. Is the coin still heads up?",
|
| 54 |
+
label="Enter your examples zero-shot (few-shot is not supported due to API limit) followed by Query :")
|
| 55 |
+
generated_txt = gr.Textbox(lines=10, label="Generated Solution:")
|
| 56 |
+
|
| 57 |
+
b1 = gr.Button("Generate Text")
|
| 58 |
+
b1.click(text_generate,inputs=[input_prompt], outputs=[generated_txt])
|
| 59 |
+
|
| 60 |
+
with gr.Row():
|
| 61 |
+
gr.Markdown("")
|
| 62 |
+
|
| 63 |
+
demo.launch(enable_queue=True, debug=True)
|