Atulit23 commited on
Commit
d6200d7
·
verified ·
1 Parent(s): 50d715d

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. README.md +3 -9
  2. app.py +87 -0
  3. llama-2-7b-chat.ggmlv3.q8_0.bin +3 -0
  4. requirements.txt +6 -0
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: Llama Gradio
3
- emoji: 🐢
4
- colorFrom: indigo
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 4.16.0
8
  app_file: app.py
9
- pinned: false
 
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: llama_gradio
 
 
 
 
 
3
  app_file: app.py
4
+ sdk: gradio
5
+ sdk_version: 3.44.4
6
  ---
 
 
app.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from langchain.prompts import PromptTemplate
3
+ from langchain.llms import CTransformers
4
+ import os
5
+ import gradio as gr
6
+
7
+ def GetLlamaResponse(topic):
8
+ llm = CTransformers(
9
+ model_type="llama",
10
+ model="/home/arnav-fedora/Blog_Generation/model/llama-2-7b-chat.ggmlv3.q8_0.bin",
11
+ config={"max_new_tokens": 256, "temperature": 0.4},
12
+ )
13
+ template = """
14
+ Generater a poem for hungry natural who wish to eat a delicious {topic} within 256 words
15
+ """
16
+ prompt = PromptTemplate(
17
+ input_variables=["topic", "word_count", "poem_style", "temperature"],
18
+ template=template,
19
+ )
20
+
21
+ response = llm(
22
+ prompt.format(
23
+ word_count=256,
24
+ poem_style='Natural',
25
+ temperature=0.4,
26
+ topic=topic,
27
+ )
28
+ )
29
+
30
+ return response
31
+
32
+
33
+ # st.set_page_config(
34
+ # page_title="Generate Poem",
35
+ # page_icon=" :pizza:",
36
+ # layout="centered",
37
+ # initial_sidebar_state="collapsed",
38
+ # )
39
+
40
+ # st.header("Generate poems :pizza:")
41
+
42
+ # topic = st.text_input("Enter the poem topic")
43
+
44
+
45
+ # col1, col2 = st.columns([10, 10])
46
+ # col3 = col3 = st.columns(1)[0]
47
+
48
+ # with col1:
49
+ # word_count = st.text_input("Enter number of words : ")
50
+
51
+ # with col2:
52
+ # poem_style = st.selectbox(
53
+ # "Write the poem for", ("Michelin Tasters", "Foodies", "Laymen"), index=2
54
+ # )
55
+
56
+ # with col3:
57
+ # temperature = st.slider(
58
+ # "Select Temperature", min_value=0.0, max_value=1.0, step=0.01
59
+ # )
60
+
61
+ # submit = st.button("Generate poem")
62
+
63
+ # if submit:
64
+ # st.write(GetLlamaResponse(word_count, poem_style, temperature, topic))
65
+
66
+
67
+
68
+ inputs_image_url = [
69
+ gr.Textbox(type="text", label="Image URL"),
70
+ ]
71
+
72
+ outputs_result_dict = [
73
+ gr.Textbox(type="text", label="Result Dictionary"),
74
+ ]
75
+
76
+ interface_image_url = gr.Interface(
77
+ fn=GetLlamaResponse,
78
+ inputs=inputs_image_url,
79
+ outputs=outputs_result_dict,
80
+ title="Dark review detection",
81
+ cache_examples=False,
82
+ )
83
+
84
+ gr.TabbedInterface(
85
+ [interface_image_url],
86
+ tab_names=['Some inference']
87
+ ).queue().launch()
llama-2-7b-chat.ggmlv3.q8_0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bfdde943555c78294626a6ccd40184162d066d39774bd2c98dae24943d32cc3
3
+ size 7160799872
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ sentence-transformers
2
+ uvicorn
3
+ ctransformers
4
+ langchain
5
+ python-box
6
+ gradio