InnovTech commited on
Commit
723acd2
·
1 Parent(s): 1202c9b

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +134 -0
app.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from typing import Iterable
3
+ import gradio as gr
4
+ from gradio.themes.base import Base
5
+ from gradio.themes.utils import colors, fonts, sizes
6
+
7
+ from llama_cpp import Llama
8
+ #from huggingface_hub import hf_hub_download
9
+
10
+ #hf_hub_download(repo_id="LLukas22/gpt4all-lora-quantized-ggjt", filename="ggjt-model.bin", local_dir=".")
11
+ llm = Llama(model_path="./ggjt-model.bin")
12
+
13
+
14
+ ins = '''### Instruction:
15
+ {}
16
+ ### Response:
17
+ '''
18
+
19
+ theme = gr.themes.Monochrome(
20
+ primary_hue="indigo",
21
+ secondary_hue="blue",
22
+ neutral_hue="slate",
23
+ radius_size=gr.themes.sizes.radius_sm,
24
+ font=[gr.themes.GoogleFont("Open Sans"), "ui-sans-serif", "system-ui", "sans-serif"],
25
+ )
26
+
27
+
28
+
29
+
30
+ # def generate(instruction):
31
+ # response = llm(ins.format(instruction))
32
+ # response = response['choices'][0]['text']
33
+ # result = ""
34
+ # for word in response.split(" "):
35
+ # result += word + " "
36
+ # yield result
37
+
38
+ def generate(instruction):
39
+ result = ""
40
+ for x in llm(ins.format(instruction), stop=['### Instruction:', '### End'], stream=True):
41
+ result += x['choices'][0]['text']
42
+ yield result
43
+
44
+
45
+
46
+
47
+ def process_example(args):
48
+ for x in generate(args):
49
+ pass
50
+ return x
51
+
52
+ css = ".generating {visibility: hidden}"
53
+
54
+ # Based on the gradio theming guide and borrowed from https://huggingface.co/spaces/shivi/dolly-v2-demo
55
+ class SeafoamCustom(Base):
56
+ def __init__(
57
+ self,
58
+ *,
59
+ primary_hue: colors.Color | str = colors.emerald,
60
+ secondary_hue: colors.Color | str = colors.blue,
61
+ neutral_hue: colors.Color | str = colors.blue,
62
+ spacing_size: sizes.Size | str = sizes.spacing_md,
63
+ radius_size: sizes.Size | str = sizes.radius_md,
64
+ font: fonts.Font
65
+ | str
66
+ | Iterable[fonts.Font | str] = (
67
+ fonts.GoogleFont("Quicksand"),
68
+ "ui-sans-serif",
69
+ "sans-serif",
70
+ ),
71
+ font_mono: fonts.Font
72
+ | str
73
+ | Iterable[fonts.Font | str] = (
74
+ fonts.GoogleFont("IBM Plex Mono"),
75
+ "ui-monospace",
76
+ "monospace",
77
+ ),
78
+ ):
79
+ super().__init__(
80
+ primary_hue=primary_hue,
81
+ secondary_hue=secondary_hue,
82
+ neutral_hue=neutral_hue,
83
+ spacing_size=spacing_size,
84
+ radius_size=radius_size,
85
+ font=font,
86
+ font_mono=font_mono,
87
+ )
88
+ super().set(
89
+ button_primary_background_fill="linear-gradient(90deg, *primary_300, *secondary_400)",
90
+ button_primary_background_fill_hover="linear-gradient(90deg, *primary_200, *secondary_300)",
91
+ button_primary_text_color="white",
92
+ button_primary_background_fill_dark="linear-gradient(90deg, *primary_600, *secondary_800)",
93
+ block_shadow="*shadow_drop_lg",
94
+ button_shadow="*shadow_drop_lg",
95
+ input_background_fill="zinc",
96
+ input_border_color="*secondary_300",
97
+ input_shadow="*shadow_drop",
98
+ input_shadow_focus="*shadow_drop_lg",
99
+ )
100
+
101
+
102
+ seafoam = SeafoamCustom()
103
+
104
+
105
+ with gr.Blocks(theme=seafoam, analytics_enabled=False, css=css) as demo:
106
+ with gr.Column():
107
+ gr.Markdown(
108
+ """ ## Innovtech Pro AI
109
+
110
+ """
111
+ )
112
+
113
+ with gr.Row():
114
+ with gr.Column(scale=3):
115
+ instruction = gr.Textbox(placeholder="Enter your question here", label="Question", elem_id="q-input")
116
+
117
+ with gr.Box():
118
+ gr.Markdown("**Answer**")
119
+ output = gr.Markdown(elem_id="q-output")
120
+ submit = gr.Button("Generate", variant="primary")
121
+ gr.Examples(
122
+ examples=examples,
123
+ inputs=[instruction],
124
+ cache_examples=False,
125
+ fn=process_example,
126
+ outputs=[output],
127
+ )
128
+
129
+
130
+
131
+ submit.click(generate, inputs=[instruction], outputs=[output])
132
+ instruction.submit(generate, inputs=[instruction], outputs=[output])
133
+
134
+ demo.queue(concurrency_count=1).launch(debug=True)