Spaces:
Sleeping
Sleeping
| import requests | |
| import json | |
| import os | |
| from datetime import datetime | |
| from zoneinfo import ZoneInfo | |
| import gradio as gr | |
| from gradio_client import Client | |
| from extract import extract | |
| import app_util | |
| import cache | |
| from pgsoft.pgconst.const import service_list, functionality_list, game_list | |
| from pgsoft.pgdate.date_utils import beijing | |
| ####################### | |
| # proxy version | |
| ####################### | |
| proxy_version = "1.0.0-2023-12-12-a" # use cache | |
| t = datetime.now() | |
| t = t.astimezone(ZoneInfo("Asia/Shanghai")) | |
| print(f"[Beijing]: {t.replace(microsecond=0)}") | |
| t = t.astimezone(ZoneInfo("America/Los_Angeles")) | |
| print(f"[Seattle]: {t.replace(microsecond=0)}") | |
| # assert False | |
| spaces = { | |
| "b_demo_hf": "stevez-ai", | |
| "pgdemo2": "stevez-ai2", | |
| "pgdemo3": "stevez-ai3", | |
| } | |
| identity = os.environ.get("identity") | |
| if not identity: | |
| identity = "b_demo_hf" | |
| space = "stevez-ai" | |
| if identity in spaces: | |
| space = spaces[identity] | |
| filepath = os.sep.join(["cache", "cached_ai.json"]) | |
| cache.load_cache(filepath) | |
| def run(hf_token, service, game, functionality, nlp_command): | |
| """ | |
| event handler | |
| """ | |
| # reuse hf_token field as json string | |
| token, user_name, redirect, source, _ = extract(hf_token) | |
| if user_name is None: | |
| user_name = "__fake__" | |
| # redirect all traffic to the proxy sever | |
| global space | |
| if redirect is not None: | |
| space = redirect | |
| url = f"https://{space}.hf.space" | |
| if token is None or token == "": | |
| return "please specify hf token" | |
| if service not in service_list[1:]: | |
| if game is None: | |
| return "please specify which game" | |
| if functionality is None: | |
| return "please choose the AI functionality" | |
| if functionality == "AI": | |
| if nlp_command in ["", None]: | |
| return "please make sure the command is not empty" | |
| service_start = beijing() | |
| print(f"<<<<<<<<<<<<<< service starts at {service_start} <<<<<<<<<<<<<<") | |
| if service == "download game": | |
| res = app_util.call_clouddisk(service, nlp_command, token) | |
| if res is None: | |
| outp = {"status": "Failure"} | |
| else: | |
| outp = {"status": "OK", "result": json.loads(res)} | |
| elif service == "upload game": | |
| res = app_util.call_clouddisk(service, nlp_command, token) | |
| if res is None: | |
| outp = {"status": "Failure"} | |
| else: | |
| outp = {"status": "OK", "result": res} | |
| elif service == "list games": | |
| res = app_util.call_clouddisk(service, nlp_command, token) | |
| if res is None: | |
| outp = {"status": "Failure"} | |
| else: | |
| outp = {"status": "OK", "result": json.loads(res)["result"]} | |
| else: | |
| try: | |
| assert "games" in service_list | |
| if service == "games": | |
| print(f"{beijing()} [{user_name}] [{game}] {nlp_command}") | |
| outp = cache.get_cache(nlp_command) | |
| if outp is None: | |
| client = Client( | |
| url, | |
| hf_token=token, | |
| verbose=False, | |
| ) | |
| calling_start = beijing() | |
| print(f"calling ai starts at {calling_start}") | |
| res = client.predict( | |
| service, | |
| game, | |
| functionality, | |
| nlp_command, # hidden, | |
| api_name="/predict", | |
| ) | |
| calling_end = beijing() | |
| timecost = calling_end.timestamp() - calling_start.timestamp() | |
| print(f"calling ai ends at {calling_end}, costs {timecost:.2f}s") | |
| outp = json.loads(res) | |
| outp["cache"] = False | |
| cache.add_cache(nlp_command, outp) | |
| else: | |
| print(f"[cache] return from cache") | |
| # add proxy version info to the output | |
| outp["proxy-version"] = proxy_version | |
| outp["user"] = user_name | |
| outp["game"] = game | |
| if source: | |
| outp["source"] = source | |
| calling_start = beijing() | |
| print(f"calling logger starts at {beijing()}") | |
| app_util.call_logger(outp, identity, token) | |
| calling_end = beijing() | |
| timecost = calling_end.timestamp() - calling_start.timestamp() | |
| print(f"calling logger ends at {calling_end}, costs {timecost:.2f}s") | |
| except Exception as e: | |
| return ( | |
| f"{type(e)}, {str(e)}. \nyou may want to make " | |
| + "sure your hf_token is correct" | |
| ) | |
| service_end = beijing() | |
| timecost = service_end.timestamp() - service_start.timestamp() | |
| print( | |
| f">>>>>>>>>>>>>>> service ends at {service_end}, " | |
| + f"costs {timecost:.2f}s >>>>>>>>>>>>>>>\n" | |
| ) | |
| return json.dumps(outp, indent=4) | |
| demo = gr.Interface( | |
| fn=run, | |
| inputs=[ | |
| "text", | |
| gr.Radio( | |
| service_list, | |
| value=service_list[0], | |
| info="Shared services", | |
| ), | |
| gr.Radio( | |
| game_list, | |
| value=game_list[1], | |
| info="Which game you want the AI to support?", | |
| ), | |
| gr.Radio( | |
| functionality_list, | |
| value=functionality_list[0], | |
| # label = "What do you want to do?", | |
| info="What functionality?", | |
| ), | |
| "text", | |
| ], | |
| outputs="text", | |
| title="Demo", | |
| allow_flagging="never", | |
| ) | |
| demo.launch() | |