Spaces:
Sleeping
Sleeping
File size: 5,367 Bytes
d877f5a 74b5ebb d877f5a 5046452 d877f5a 761ae49 d877f5a 703e6fc 111b72c 18bfb47 5711245 f9d919d 111b72c 18bfb47 d877f5a 651f405 74b5ebb ff0123b 74b5ebb ff0123b 651f405 a79efbe 15588b9 74b5ebb 111b72c 74b5ebb 111b72c 74b5ebb 18bfb47 74b5ebb 655703b 094500b cb39903 703e6fc 458f0bb 896c15b 74b5ebb 094500b 6354b1b 74b63e3 d1c021a 6354b1b 74b5ebb 703e6fc 5711245 761ae49 655703b 761ae49 74b5ebb 761ae49 f9d919d 111b72c f9d919d 111b72c f9d919d 111b72c f9d919d a92023f 458f0bb 0259ec2 18bfb47 f9d919d 458f0bb f9d919d 23586d1 f9d919d 23586d1 5b72b9c f9d919d 74b5ebb 761ae49 74b5ebb d877f5a 761ae49 5711245 74b5ebb 761ae49 74b5ebb 761ae49 6a7682d 761ae49 74b5ebb d877f5a 74b5ebb 111b72c d877f5a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 |
import requests
import json
import os
from datetime import datetime
from zoneinfo import ZoneInfo
import gradio as gr
from gradio_client import Client
from extract import extract
import app_util
import cache
from pgsoft.pgconst.const import service_list, functionality_list, game_list
from pgsoft.pgdate.date_utils import beijing
#######################
# proxy version
#######################
proxy_version = "1.0.0-2023-12-12-a" # use cache
t = datetime.now()
t = t.astimezone(ZoneInfo("Asia/Shanghai"))
print(f"[Beijing]: {t.replace(microsecond=0)}")
t = t.astimezone(ZoneInfo("America/Los_Angeles"))
print(f"[Seattle]: {t.replace(microsecond=0)}")
# assert False
spaces = {
"b_demo_hf": "stevez-ai",
"pgdemo2": "stevez-ai2",
"pgdemo3": "stevez-ai3",
}
identity = os.environ.get("identity")
if not identity:
identity = "b_demo_hf"
space = "stevez-ai"
if identity in spaces:
space = spaces[identity]
filepath = os.sep.join(["cache", "cached_ai.json"])
cache.load_cache(filepath)
def run(hf_token, service, game, functionality, nlp_command):
"""
event handler
"""
# reuse hf_token field as json string
token, user_name, redirect, source, _ = extract(hf_token)
if user_name is None:
user_name = "__fake__"
# redirect all traffic to the proxy sever
global space
if redirect is not None:
space = redirect
url = f"https://{space}.hf.space"
if token is None or token == "":
return "please specify hf token"
if service not in service_list[1:]:
if game is None:
return "please specify which game"
if functionality is None:
return "please choose the AI functionality"
if functionality == "AI":
if nlp_command in ["", None]:
return "please make sure the command is not empty"
service_start = beijing()
print(f">>>>>>>>>>>>>>> service starts at {service_start} <<<<<<<<<<<<<<")
if service == "download game":
res = app_util.call_clouddisk(service, nlp_command, token)
if res is None:
outp = {"status": "Failure"}
else:
outp = {"status": "OK", "result": json.loads(res)}
elif service == "upload game":
res = app_util.call_clouddisk(service, nlp_command, token)
if res is None:
outp = {"status": "Failure"}
else:
outp = {"status": "OK", "result": res}
elif service == "list games":
res = app_util.call_clouddisk(service, nlp_command, token)
if res is None:
outp = {"status": "Failure"}
else:
outp = {"status": "OK", "result": json.loads(res)["result"]}
else:
try:
assert "games" in service_list
if service == "games":
print(f"{beijing()} [{user_name}] [{game}] {nlp_command}")
outp = cache.get_cache(nlp_command)
if outp is None:
client = Client(
url,
hf_token=token,
verbose=False,
)
calling_start = beijing()
print(f"calling ai starts at {calling_start}")
res = client.predict(
service,
game,
functionality,
nlp_command, # hidden,
api_name="/predict",
)
calling_end = beijing()
timecost = calling_end.timestamp() - calling_start.timestamp()
print(f"calling ai ends at {calling_end}, costs {timecost:.2f}s")
outp = json.loads(res)
cache.add_cache(nlp_command, outp)
# add proxy version info to the output
outp["proxy-version"] = proxy_version
outp["user"] = user_name
outp["game"] = game
if source:
outp["source"] = source
calling_start = beijing()
print(f"calling logger starts at {beijing()}")
app_util.call_logger(outp, identity, token)
calling_end = beijing()
timecost = calling_end.timestamp() - calling_start.timestamp()
print(f"calling logger ends at {calling_end}, costs {timecost:.2f}s")
except Exception as e:
return (
f"{type(e)}, {str(e)}. \nyou may want to make "
+ "sure your hf_token is correct"
)
service_end = beijing()
timecost = service_end.timestamp() - service_start.timestamp()
print(
f">>>>>>>>>>>>>>> service ends at {service_end}, "
+ f"costs {timecost:.2f}s <<<<<<<<<<<<<<<\n"
)
return json.dumps(outp, indent=4)
demo = gr.Interface(
fn=run,
inputs=[
"text",
gr.Radio(
service_list,
value=service_list[0],
info="Shared services",
),
gr.Radio(
game_list,
value=game_list[1],
info="Which game you want the AI to support?",
),
gr.Radio(
functionality_list,
value=functionality_list[0],
# label = "What do you want to do?",
info="What functionality?",
),
"text",
],
outputs="text",
title="Demo",
allow_flagging="never",
)
demo.launch()
|