Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import requests, re | |
| from api_usage import get_subscription, check_key_availability, get_orgs_me, check_key_ant_availability, check_ant_rate_limit, check_key_gemini_availability, check_key_azure_availability, get_azure_status, get_azure_deploy, check_key_mistral_availability, check_mistral_quota, check_key_replicate_availability, check_key_aws_availability, check_key_or_availability, check_key_or_limits, check_gcp_anthropic, check_groq_status, check_nai_status, check_elevenlabs_status, check_xai_status, check_stability_status, check_deepseek_status | |
| async def sort_key(key, rate_limit, claude_model): | |
| _key = key.strip() | |
| if re.match(re.compile("sk-or-v1-[a-z0-9]{64}"), _key): | |
| return get_key_openrouter_info(_key) | |
| if re.match(re.compile("sk-ant-api03-[a-zA-Z0-9\-_]{93}AA"), _key) or (_key.startswith("sk-ant-") and len(_key) == 93) or (len(_key) == 89 and re.match(re.compile("sk-[a-zA-Z0-9]{86}"), _key)): | |
| return await get_key_ant_info(_key, rate_limit, claude_model) | |
| if re.match(re.compile(r"sk-[a-zA-Z0-9]{48}"), _key) and len(_key) == 51 and 'T3BlbkFJ' not in _key: | |
| return get_key_stability_info(_key) | |
| if re.match(re.compile(r"sk-[a-f0-9]{32}"), _key): | |
| return get_key_deepseek_info(_key) | |
| if _key.startswith("sk-"): | |
| return get_key_oai_info(_key) | |
| if _key.startswith("AIzaSy"): | |
| return get_key_gemini_info(_key) | |
| if _key.startswith("pst-"): | |
| return get_key_nai_info(_key) | |
| if (_key.startswith("r8_") and len(_key) == 40) or (_key.islower() and len(_key) == 40): | |
| return get_key_replicate_info(_key) | |
| if _key.startswith("xai-"): | |
| return get_key_xai_info(_key) | |
| if len(_key.split(':')) == 2 and _key.split(':')[1].islower() and len(_key.split(':')[1]) >= 32 and "openai.azure.com" not in _key.split(':')[1]: | |
| endpoint = f"{_key.split(':')[0]}.openai.azure.com" | |
| api_key = _key.split(':')[1] | |
| return get_key_azure_info(endpoint, api_key) | |
| if "openai.azure.com" in _key.split(';')[0]: | |
| endpoint = _key.split(';')[0] | |
| api_key = _key.split(';')[1] | |
| return get_key_azure_info(endpoint, api_key) | |
| if _key.startswith("AKIA") and len(_key.split(':')[0]) == 20 and _key.split(':')[0].isupper(): | |
| return await get_key_aws_info(_key) | |
| if re.match(re.compile(r"[a-f0-9]{32}"), _key) or re.match(re.compile(r"sk_[a-f0-9]{48}"), _key): | |
| return get_key_elevenlabs_info(_key) | |
| if re.match(re.compile(r"[a-zA-Z0-9]{32}"), _key): | |
| return get_key_mistral_info(_key) | |
| if re.match(re.compile(r"gsk_[a-zA-Z0-9]{20}WGdyb3FY[a-zA-Z0-9]{24}"), _key): | |
| return get_key_groq_info(_key) | |
| if re.match(re.compile(r"[\w\-]+:[\w\-@\.]+:[\w-]+:.+"), _key): # 0: refresh token | |
| return await get_key_gcp_info(_key, 0) | |
| if re.match(re.compile(r"[\w\-]+:[\w\-@\.]+:.+\\n"), _key): # 1: service account | |
| return await get_key_gcp_info(_key, 1) | |
| return not_supported(_key) | |
| def get_key_oai_info(key): | |
| # Return a dictionary containing key information | |
| session = requests.Session() | |
| status, org_data = check_key_availability(session, key) | |
| info_dict = { | |
| "key_type": "OpenAI", | |
| "key_availability": True if status else False, | |
| "gpt4_availability": "", | |
| "gpt4_32k_availability": "", | |
| "default_org": "", | |
| "org_description": "", | |
| "organization": "", | |
| "models": "", | |
| "requests_per_minute": "", | |
| "tokens_per_minute": "", | |
| "quota": "", | |
| "all_models": "" | |
| } | |
| if not status: | |
| return info_dict | |
| if status == 403: | |
| status_me, orgs_me = get_orgs_me(session, key) | |
| if status_me == 200: | |
| org_data = orgs_me | |
| subscription_info = get_subscription(key, session, org_data) | |
| # Update the info_dict with subscription details | |
| info_dict.update({ | |
| "gpt4_availability": subscription_info["has_gpt4"], | |
| "gpt4_32k_availability": subscription_info["has_gpt4_32k"], | |
| "default_org": subscription_info["default_org"], | |
| "org_description": subscription_info["org_description"], | |
| "organization": subscription_info["organization"], | |
| "models": subscription_info["models"], | |
| "requests_per_minute": subscription_info["rpm"], | |
| "tokens_per_minute": subscription_info["tpm"], | |
| "quota": subscription_info["quota"], | |
| "all_models": subscription_info["all_models"] | |
| }) | |
| return info_dict | |
| async def get_key_ant_info(key, rate_limit, claude_model): | |
| # Return a dictionary containing key information | |
| key_avai = await check_key_ant_availability(key, claude_model) | |
| info_dict = {#"account_name": "", | |
| "key_type": "Anthropic Claude", | |
| "key_availability": key_avai[0], | |
| "status": "", | |
| "filter_response": "", | |
| "requests_per_minute": "", | |
| "tokens_per_minute": "", | |
| "tokens_input_per_minute": "", | |
| "tokens_output_per_minute": "", | |
| "tier": "", | |
| "concurrent_rate_limit": "", | |
| "models": ""} | |
| info_dict["status"] = key_avai[1] | |
| info_dict["filter_response"] = key_avai[2] | |
| info_dict["requests_per_minute"] = key_avai[3] + ("" if key_avai[3] == "" else f" ({key_avai[4]} left)") | |
| info_dict["tokens_per_minute"] = key_avai[5] + ("" if key_avai[5] == "" else f" ({key_avai[6]} left)") | |
| info_dict["tokens_input_per_minute"] = key_avai[8] + ("" if key_avai[8] == "" else f" ({key_avai[9]} left)") | |
| info_dict["tokens_output_per_minute"] = key_avai[10] + ("" if key_avai[10] == "" else f" ({key_avai[11]} left)") | |
| info_dict["tier"] = key_avai[7] | |
| info_dict["models"] = key_avai[12] | |
| if rate_limit: | |
| rate = await check_ant_rate_limit(key, claude_model) | |
| info_dict["concurrent_rate_limit"] = rate | |
| return info_dict | |
| def get_key_gemini_info(key): | |
| key_avai = check_key_gemini_availability(key) | |
| info_dict = {#"account_name": "", | |
| "key_type": "Google Gemini", | |
| "key_availability": key_avai[0], | |
| "status": key_avai[1], | |
| "models": key_avai[2]} | |
| return info_dict | |
| def get_key_azure_info(endpoint, api_key): | |
| key_avai = check_key_azure_availability(endpoint, api_key) | |
| info_dict = { | |
| "key_type": "Microsoft Azure OpenAI", | |
| "key_availability": key_avai[0], | |
| "gpt35_availability": "", | |
| "gpt4_availability": "", | |
| "gpt4_32k_availability": "", | |
| "dall_e_3_availability": "", | |
| "moderation_status": "", | |
| "models": "", | |
| "deployments": "", | |
| "rate_limits": {} # New field for per-deployment limits | |
| } | |
| if key_avai[0]: | |
| azure_deploy = get_azure_deploy(endpoint, api_key) | |
| status = get_azure_status(endpoint, api_key, azure_deploy) | |
| # Check rate limits for each deployment | |
| for model, deployment in azure_deploy.items(): | |
| url = f"https://{endpoint}/openai/deployments/{deployment}/chat/completions?api-version=2024-02-15-preview" | |
| headers = {"api-key": api_key} | |
| response = requests.post(url, headers=headers, json={ | |
| "messages": [{"role": "user", "content": "."}], | |
| "max_tokens": 1 | |
| }) | |
| info_dict["rate_limits"][model] = { | |
| "remaining_requests": response.headers.get("x-ratelimit-remaining-requests", ""), | |
| "remaining_tokens": response.headers.get("x-ratelimit-remaining-tokens", ""), | |
| "limit_tokens": response.headers.get("x-ratelimit-limit-tokens", "") | |
| } | |
| info_dict.update({ | |
| "gpt35_availability": status[1], | |
| "gpt4_availability": status[2], | |
| "gpt4_32k_availability": status[3], | |
| "dall_e_3_availability": status[4], | |
| "moderation_status": status[0], | |
| "models": key_avai[1], | |
| "deployments": azure_deploy | |
| }) | |
| return info_dict | |
| def get_key_mistral_info(key): | |
| key_avai = check_key_mistral_availability(key) | |
| info_dict = {#"account_name": "", | |
| "key_type": "Mistral AI", | |
| "key_availability": True if key_avai else False, | |
| "has_quota": "", | |
| "limits": "", | |
| "models": ""} | |
| if key_avai: | |
| quota_info = check_mistral_quota(key) | |
| info_dict['has_quota'] = quota_info[0] | |
| if quota_info[1]: | |
| info_dict['limits'] = quota_info[1] | |
| info_dict['models'] = key_avai | |
| return info_dict | |
| def get_key_replicate_info(key): | |
| key_avai = check_key_replicate_availability(key) | |
| info_dict = {#"account_name": "", | |
| "key_type": "Replicate", | |
| "key_availability": key_avai[0], | |
| "account_name": "", | |
| "type": "", | |
| "has_quota": "", | |
| "hardware_available": ""} | |
| if key_avai[0]: | |
| info_dict['account_name'] = key_avai[1]['username'] | |
| info_dict['type'] = key_avai[1]['type'] | |
| info_dict['has_quota'] = key_avai[2] | |
| info_dict['hardware_available'] = key_avai[3] | |
| return info_dict | |
| async def get_key_aws_info(key): | |
| key_avai = await check_key_aws_availability(key) | |
| info_dict = {#"account_name": "", | |
| "key_type": "Amazon AWS Claude", | |
| "key_availability": key_avai[0], | |
| "username": "", | |
| "root": "", | |
| "admin": "", | |
| "quarantine": "", | |
| "iam_full_access": "", | |
| "iam_user_change_password": "", | |
| "aws_bedrock_full_access": "", | |
| "enabled_region": "", | |
| "models_usage": "", | |
| "cost_and_usage": key_avai[1]} | |
| if key_avai[0]: | |
| info_dict['username'] = key_avai[1] | |
| info_dict['root'] = key_avai[2] | |
| info_dict['admin'] = key_avai[3] | |
| info_dict['quarantine'] = key_avai[4] | |
| info_dict['iam_full_access'] = key_avai[5] | |
| info_dict['iam_user_change_password'] = key_avai[6] | |
| info_dict['aws_bedrock_full_access'] = key_avai[7] | |
| info_dict['enabled_region'] = key_avai[8] | |
| info_dict['models_usage'] = key_avai[9] | |
| info_dict['cost_and_usage'] = key_avai[10] | |
| return info_dict | |
| def get_key_openrouter_info(key): | |
| key_avai = check_key_or_availability(key) | |
| info_dict = {#"account_name": "", | |
| "key_type": "OpenRouter", | |
| "key_availability": key_avai[0], | |
| "is_free_tier": "", | |
| "usage": "", | |
| "balance": "", | |
| "limit": "", | |
| "limit_remaining": "", | |
| "rate_limit_per_minite": "", | |
| "4_turbo_per_request_tokens_limit": "", | |
| "sonnet_per_request_tokens_limit": "", | |
| "opus_per_request_tokens_limit": ""} | |
| if key_avai[0]: | |
| models_info = check_key_or_limits(key) | |
| info_dict['is_free_tier'] = key_avai[1]['is_free_tier'] | |
| info_dict['limit'] = key_avai[1]['limit'] | |
| info_dict['limit_remaining'] = key_avai[1]['limit_remaining'] | |
| info_dict['usage'] = f"${format(key_avai[1]['usage'], '.4f')}" | |
| info_dict['balance'] = f"${format(models_info[0], '.4f')}" if models_info[0] else f"${key_avai[2]/60} (estimated)" | |
| info_dict['rate_limit_per_minite'] = key_avai[2] | |
| info_dict['4_turbo_per_request_tokens_limit'] = models_info[1]['openai/gpt-4o'] | |
| info_dict['sonnet_per_request_tokens_limit'] = models_info[1]['anthropic/claude-3.5-sonnet:beta'] | |
| info_dict['opus_per_request_tokens_limit'] = models_info[1]['anthropic/claude-3-opus:beta'] | |
| else: | |
| info_dict['usage'] = key_avai[1] | |
| return info_dict | |
| async def get_key_gcp_info(key, type): | |
| key_avai = await check_gcp_anthropic(key, type) | |
| info_dict = {#"account_name": "", | |
| "key_type": "Vertex AI (GCP)", | |
| "key_availability": key_avai[0], | |
| "status": "", | |
| "enabled_region": ""} | |
| if key_avai[0]: | |
| info_dict['enabled_region'] = key_avai[2] | |
| else: | |
| info_dict['status'] = key_avai[1] | |
| return info_dict | |
| def get_key_groq_info(key): | |
| key_avai = check_groq_status(key) | |
| info_dict = {#"account_name": "", | |
| "key_type": "Groq", | |
| "key_availability": True if key_avai else False, | |
| "models": key_avai if key_avai else ""} | |
| return info_dict | |
| def get_key_nai_info(key): | |
| key_avai = check_nai_status(key) | |
| info_dict = {#"account_name": "", | |
| "key_type": "NovelAI", | |
| "key_availability": True if key_avai[0] else False, | |
| "user_info": key_avai[1] if key_avai[0] else ""} | |
| return info_dict | |
| def get_key_elevenlabs_info(key): | |
| key_avai = check_elevenlabs_status(key) | |
| info_dict = {#"account_name": "", | |
| "key_type": "ElevenLabs", | |
| "key_availability": key_avai[0], | |
| "user_info": key_avai[1], | |
| "voices_info": key_avai[2]} | |
| return info_dict | |
| def get_key_xai_info(key): | |
| key_avai = check_xai_status(key) | |
| info_dict = {#"account_name": "", | |
| "key_type": "xAI Grok", | |
| "key_availability": key_avai[0], | |
| "key_status": "", | |
| "models": ""} | |
| if key_avai[0]: | |
| info_dict['key_status'] = key_avai[1] | |
| info_dict['models'] = key_avai[2] | |
| return info_dict | |
| def get_key_stability_info(key): | |
| key_avai = check_stability_status(key) | |
| info_dict = {#"account_name": "", | |
| "key_type": "Stability AI", | |
| "key_availability": key_avai[0], | |
| "account_info": "", | |
| "credits": "", | |
| "models": ""} | |
| if key_avai[0]: | |
| info_dict['account_info'] = key_avai[1] | |
| info_dict['credits'] = key_avai[2] | |
| info_dict['models'] = key_avai[3] | |
| return info_dict | |
| def get_key_deepseek_info(key): | |
| key_avai = check_deepseek_status(key) | |
| info_dict = {#"account_name": "", | |
| "key_type": "deepseek", | |
| "key_availability": key_avai[0], | |
| "balance": "", | |
| "models": ""} | |
| if key_avai[0]: | |
| info_dict['models'] = key_avai[1] | |
| info_dict['balance'] = key_avai[2] | |
| return info_dict | |
| def not_supported(key): | |
| info_dict = {#"account_name": "", | |
| "key_type": "Not supported", | |
| "status": ""} | |
| return info_dict | |
| def clear_inputs(text): | |
| return "" | |
| with gr.Blocks() as demo: | |
| gr.Markdown(''' | |
| # OpenAI/Anthropic/Gemini/Azure/Mistral/Replicate/AWS Claude/OpenRouter/Vertex AI(GCP Anthropic)/Groq/NovelAI/ElevenLabs/xAI/Stability/Deepseek API Key Status Checker | |
| *(Based on shaocongma, CncAnon1, su, Drago, kingbased key checkers)* | |
| AWS credential's format: AWS_ACCESS_KEY_ID:AWS_SECRET_ACCESS_KEY (root might not be accurate) | |
| Azure endpoint's format: YOUR_RESOURCE_NAME:YOUR_API_KEY or (https://)YOUR_RESOURCE_NAME.openai.azure.com;YOUR_API_KEY | |
| GCP format: PROJECT_ID:CLIENT_EMAIL:PRIVATE_KEY (including \\n) | |
| | or refresh token: PROJECT_ID:CLIENT_ID:CLIENT_SECRET:REFRESH_TOKEN | |
| ''') | |
| claude_options = [ | |
| 'claude-3-haiku-20240307', | |
| 'claude-3-sonnet-20240229', | |
| 'claude-3-opus-20240229', | |
| 'claude-3-5-sonnet-20240620', | |
| 'claude-3-5-sonnet-20241022', | |
| 'claude-3-5-haiku-20241022' | |
| ] | |
| with gr.Row(): | |
| with gr.Column(): | |
| key = gr.Textbox(lines=1, max_lines=5, label="API Key") | |
| claude_model = gr.Dropdown(claude_options, value="claude-3-haiku-20240307", label="Claude API model", info="model for filter_response and concurrent check") | |
| rate_limit = gr.Checkbox(label="Check concurrent rate limit (API Claude, experimental)") | |
| with gr.Row(): | |
| clear_button = gr.Button("Clear") | |
| submit_button = gr.Button("Submit", variant="primary") | |
| with gr.Column(): | |
| info = gr.JSON(label="API Key Information", open=True) | |
| clear_button.click(fn=clear_inputs, inputs=[key], outputs=[key]) | |
| submit_button.click(fn=sort_key, inputs=[key, rate_limit, claude_model], outputs=[info], api_name="sort_key") | |
| demo.launch() |