Chirag commited on
Commit
da94d61
·
1 Parent(s): 0764d85
Files changed (2) hide show
  1. README.md +2 -2
  2. app.py +71 -12
README.md CHANGED
@@ -3,9 +3,9 @@ title: Documentreader
3
  emoji: 💬
4
  colorFrom: yellow
5
  colorTo: purple
6
- sdk: docker
7
  sdk_version: 5.0.1
8
- app_file: app.sh
9
  pinned: false
10
  license: apache-2.0
11
  short_description: Document Reader
 
3
  emoji: 💬
4
  colorFrom: yellow
5
  colorTo: purple
6
+ sdk: gradio
7
  sdk_version: 5.0.1
8
+ app_file: app.py
9
  pinned: false
10
  license: apache-2.0
11
  short_description: Document Reader
app.py CHANGED
@@ -1,17 +1,76 @@
1
- import nbformat
2
- from nbconvert.preprocessors import ExecutePreprocessor
3
- import jupyter_client
4
 
5
- print("Available kernels:", jupyter_client.kernelspec.find_kernel_specs())
6
 
7
- def run_notebook(path):
8
- with open(path, "r", encoding="utf-8") as f:
9
- nb = nbformat.read(f, as_version=4)
10
 
11
- ep = ExecutePreprocessor(timeout=600, kernel_name="python3")
12
- ep.preprocess(nb, {"metadata": {"path": "./"}})
13
 
14
- with open("executed_" + path, "w", encoding="utf-8") as f:
15
- nbformat.write(nb, f)
16
 
17
- run_notebook("app.ipynb")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
 
3
 
4
+ # In[1]:
5
 
 
 
 
6
 
7
+ import gradio as gr
8
+ from huggingface_hub import InferenceClient
9
 
 
 
10
 
11
+
12
+ # In[ ]:
13
+
14
+
15
+ """
16
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
17
+ """
18
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
19
+
20
+
21
+ def respond(
22
+ message,
23
+ history: list[tuple[str, str]],
24
+ system_message,
25
+ max_tokens,
26
+ temperature,
27
+ top_p,
28
+ ):
29
+ messages = [{"role": "system", "content": system_message}]
30
+
31
+ for val in history:
32
+ if val[0]:
33
+ messages.append({"role": "user", "content": val[0]})
34
+ if val[1]:
35
+ messages.append({"role": "assistant", "content": val[1]})
36
+
37
+ messages.append({"role": "user", "content": message})
38
+
39
+ response = ""
40
+
41
+ for message in client.chat_completion(
42
+ messages,
43
+ max_tokens=max_tokens,
44
+ stream=True,
45
+ temperature=temperature,
46
+ top_p=top_p,
47
+ ):
48
+ token = message.choices[0].delta.content
49
+
50
+ response += token
51
+ yield response
52
+
53
+
54
+ """
55
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
56
+ """
57
+ demo = gr.ChatInterface(
58
+ respond,
59
+ additional_inputs=[
60
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
61
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
62
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
63
+ gr.Slider(
64
+ minimum=0.1,
65
+ maximum=1.0,
66
+ value=0.95,
67
+ step=0.05,
68
+ label="Top-p (nucleus sampling)",
69
+ ),
70
+ ],
71
+ )
72
+
73
+
74
+ if __name__ == "__main__":
75
+ demo.launch()
76
+