Spaces:
Sleeping
Sleeping
File size: 5,326 Bytes
3322126 8d22d8f 3322126 d8cbc06 f8d1bf9 c1aaf65 3322126 d8cbc06 312a3be 1f377bf dbcae1e 8a55f36 7232dd7 8a55f36 fdf6aba 1d63853 3322126 30fa788 c14f140 3322126 f8d1bf9 3322126 c1aaf65 a39227b c1aaf65 3322126 4be3c4f c1aaf65 ac1de0c 3322126 30fa788 ac1de0c c14f140 3322126 ac1de0c 1d63853 ee4e844 dbcae1e 1f377bf 5b32537 a155c0b f8d1bf9 c1aaf65 1d63853 c1aaf65 3322126 ac1de0c | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 | import gradio as gr
from groq import Groq
from huggingface_hub import list_models
import os
from dotenv import load_dotenv
import requests
import json
load_dotenv(verbose=True)
sqlcmd = os.environ.get("SQLCMD_URL")
lresponse = requests.get(sqlcmd)
loginfo= lresponse.json()
groqkey = next((item['key'] for item in loginfo if item['api'] == 'GROQ_API_KEY'), None)
def toggle_all(prompt):
# ここでタブの可視性を切り替えるロジックを追加
if not prompt == '':
client = Groq(api_key=groqkey)
response = client.chat.completions.create(
messages=[
{
"role": "system",
"content": "you are a helpful assistant."
},
{
"role": "user",
"content": prompt,
}
],
model="llama-3.3-70b-versatile",
)
answer = response.choices[0].message.content
#answer = prompt+" answered."
return gr.update(visible=True), answer
else:
print("no prompt")
def hello(profile: gr.OAuthProfile | None) -> str:
# ^ expect a gr.OAuthProfile object as input to get the user's profile
# if the user is not logged in, profile will be None
if profile is None:
return "⛔️"
return f"ようこそ! {profile.name}さん"
#def list_private_models(profile: gr.OAuthProfile | None, oauth_token: gr.OAuthToken | None) -> str:
def list_private_models(profile: gr.OAuthProfile | None, oauth_token: gr.OAuthToken | None):
# ^ expect a gr.OAuthToken object as input to get the user's token
# if the user is not logged in, oauth_token will be None
gr.Textbox(oauth_token)
if oauth_token is None:
return "Please log in to list private models.", gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
#models = [
#f"{model.id} ({'private' if model.private else 'public'})"
#for model in list_models(author=profile.username, token=oauth_token.token)
#]
#return "Models:\n\n" + "\n - ".join(models) + ".", gr.update(visible=True)
return profile.username, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
def process_eprag(prompt):
if prompt == "":
return "プロンプトを入力してください。", "プロンプトは必須です。"
else:
url = 'http://www.ryhintl.com/eprag-be/llm?query='+prompt
res = requests.get(url)
rtn = res.content.decode('utf-8')
return rtn
def process_agent(prompt):
if prompt == "":
return "プロンプトを入力してください。", "プロンプトは必須です。"
else:
url = 'https://www.ryhintl.com/crewai/autogen?qry='+prompt
res = requests.get(url)
rtn = res.content.decode('utf-8')
parsed_data = json.loads(rtn)
content_list = [entry['content'] for entry in parsed_data['chat_history']]
for content in content_list:
mycontent = content
return mycontent
with gr.Blocks() as agentic:
gr.Markdown(
"# Gradio OAuth Space for Agentic RAG"
#"\n\nThis Space is a agentic demo for the **Sign in with Hugging Face** feature. "
#"Duplicate this Space to get started."
#"\n\nFor more details, check out:"
#"\n- https://www.gradio.app/guides/sharing-your-app#o-auth-login-via-hugging-face"
#"\n- https://huggingface.co/docs/hub/spaces-oauth"
)
gr.LoginButton()
# ^ add a login button to the Space
m1 = gr.Markdown()
m2 = gr.Markdown()
agentic.load(hello, inputs=None, outputs=m1)
with gr.Tab("LLM", visible=False) as tab_llm:
with gr.Column():
prompt = gr.Textbox(visible=True, label="プロンプト")
resp = gr.Textbox(visible=True, label="レスポンス")
show_button = gr.Button("生成")
show_button.click(fn=toggle_all, inputs=prompt, outputs=[tab_llm, resp])
with gr.Tab("EPRAG", visible=False) as tab_eprag:
gr.Markdown("# 🗞️ AGENTIC EPRAG")
with gr.Row():
eprag_input = gr.Textbox(label="プロンプト", type="text")
with gr.Row():
eprag_output = gr.Textbox(label="AIアシスタントの応答")
submit_button = gr.Button("EPRAGプロセス", variant="primary")
submit_button.click(
process_eprag,
inputs=[eprag_input],
outputs=[eprag_output]
)
with gr.Tab("AGENT", visible=False) as tab_agentic:
gr.Markdown("# 🗞️ AGENTIC AUTOGEN")
with gr.Row():
agent_input = gr.Textbox(label="プロンプト", type="text")
with gr.Row():
agent_output = gr.Textbox(label="Agentアシスタントの応答")
submit_button = gr.Button("AGENTICプロセス", variant="primary")
submit_button.click(
process_agent,
inputs=[agent_input],
outputs=[agent_output]
)
agentic.load(list_private_models, inputs=None, outputs=[m2, tab_llm, tab_eprag, tab_agentic])
agentic.launch() |