FireStarter2040 commited on
Commit
06aaacb
·
1 Parent(s): 4fb1f8f

FireStarter full refactor: README, AGENTS, env.example, gitignore, app.py, requirements

Browse files
Files changed (5) hide show
  1. .gitignore +17 -0
  2. AGENTS.md +24 -0
  3. README.md +29 -14
  4. app.py +18 -143
  5. requirements.txt +2 -1
.gitignore ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.pyc
4
+ *.pyo
5
+ *.pyd
6
+
7
+ # Envs
8
+ .env
9
+ .env.*
10
+ venv/
11
+ .envrc
12
+
13
+ # System
14
+ .DS_Store
15
+
16
+ # Locks
17
+ requirements.lock
AGENTS.md ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AGENTS — WarpNation Mesh
2
+
3
+ Este documento define rapidamente os agentes utilizados por este app dentro do ecossistema FireStarter.
4
+
5
+ ---
6
+
7
+ ## 🧠 SYSTEM_AGENT
8
+ - Responsável pela orquestração geral.
9
+ - Carrega contexto, valida integridade e mantém coerência.
10
+
11
+ ## 🦾 MODEL_AGENT
12
+ - Interface com os modelos (OpenAI/HuggingFace).
13
+ - Garante formatação, parâmetros e consistência da resposta.
14
+
15
+ ## 🔐 SEC_AGENT
16
+ - Intermedia secrets e valida existência das variáveis.
17
+ - Evita erros silenciosos por falta de config.
18
+
19
+ ## 🧩 UTIL_AGENT
20
+ - Funções auxiliares (limpeza, formatação, logs).
21
+
22
+ ---
23
+
24
+ Esses agentes seguem o padrão do WarpNation Mesh, permitindo plug-and-play com MCP e serviços externos.
README.md CHANGED
@@ -1,14 +1,29 @@
1
- ---
2
- title: UncensoredChat
3
- emoji: 🤬
4
- colorFrom: indigo
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 5.44.0
8
- app_file: app.py
9
- pinned: true
10
- license: other
11
- short_description: Xortron Criminal Computing
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Chat App — FireStarter Edition
2
+
3
+ Aplicação minimalista para interação com modelos OpenAI/HuggingFace usando Python, dotenv e uma interface simples.
4
+
5
+ ## Requisitos
6
+ - Python 3.9+
7
+ - `pip install -r requirements.txt`
8
+ - Criar arquivo `.env` baseado no `.env.example`
9
+
10
+ ## Rodando
11
+ ```bash
12
+ python app.py
13
+ ```
14
+
15
+ ## Estrutura
16
+
17
+ * `app.py` — código principal
18
+ * `.env` — credenciais (não commit)
19
+ * `AGENTS.md` — definição de agentes do ecossistema WarpNation
20
+ * `requirements.txt` — dependências mínimas
21
+
22
+ ## Deploy (HuggingFace)
23
+
24
+ 1. Crie novo Space (Gradio/Python)
25
+ 2. Suba todos os arquivos
26
+ 3. Secrets → adicione suas chaves do `.env`
27
+ 4. Acesse a URL pública
28
+
29
+ Pronto.
app.py CHANGED
@@ -1,157 +1,32 @@
1
- import gradio as gr
2
- from openai import OpenAI
3
  import os
4
  from dotenv import load_dotenv
 
5
 
6
  load_dotenv()
7
 
8
- SYSTEM_PROMPT = os.getenv("XTRNPMT")
9
-
10
- API_BASE_URL = "https://api.featherless.ai/v1"
11
-
12
- FEATHERLESS_API_KEY = os.getenv("FEATHERLESS_API_KEY")
13
 
14
- FEATHERLESS_MODEL = "darkc0de/XortronCriminalComputingConfig"
 
15
 
16
- if not FEATHERLESS_API_KEY:
17
- print("WARNING: FEATHERLESS_API_KEY environment variable is not set.")
18
 
19
- try:
20
- if not FEATHERLESS_API_KEY:
21
- raise ValueError("FEATHERLESS_API_KEY is not set. Please set it as an environment variable or a secret in your deployment environment.")
22
 
23
- client = OpenAI(
24
- base_url=API_BASE_URL,
25
- api_key=FEATHERLESS_API_KEY
 
 
26
  )
27
- print(f"OpenAI client initialized with base_url: {API_BASE_URL} for Featherless AI, model: {FEATHERLESS_MODEL}")
28
-
29
- except Exception as e:
30
- print(f"Error initializing OpenAI client with base_url '{API_BASE_URL}': {e}")
31
- raise RuntimeError(
32
- "Could not initialize OpenAI client. "
33
- f"Please check the API base URL ('{API_BASE_URL}'), your Featherless AI API key, model ID, "
34
- f"and ensure the server is accessible. Original error: {e}"
35
- )
36
-
37
-
38
- def respond(message, history):
39
- """
40
- This function processes the user's message and the chat history to generate a response
41
- from the language model using the Featherless AI API (compatible with OpenAI's API),
42
- including a static system prompt.
43
-
44
- Args:
45
- message (str): The latest message from the user.
46
- history (list of lists): A list where each inner list contains a pair of
47
- [user_message, ai_message].
48
-
49
- Yields:
50
- str: The generated response token by token (for streaming).
51
- """
52
- messages = [{"role": "system", "content": SYSTEM_PROMPT}]
53
-
54
- for user_message, ai_message in history:
55
- if user_message:
56
- messages.append({"role": "user", "content": user_message})
57
- if ai_message:
58
- messages.append({"role": "assistant", "content": ai_message})
59
-
60
- messages.append({"role": "user", "content": message})
61
-
62
- response_text = ""
63
-
64
- try:
65
- stream = client.chat.completions.create(
66
- messages=messages,
67
- model=FEATHERLESS_MODEL,
68
- stream=True,
69
- )
70
-
71
- for chunk in stream:
72
- if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content is not None:
73
- token = chunk.choices[0].delta.content
74
- response_text += token
75
- yield response_text
76
- elif chunk.choices and chunk.choices[0].message and chunk.choices[0].message.content is not None:
77
- token = chunk.choices[0].message.content
78
- response_text += token
79
- yield response_text
80
-
81
- except Exception as e:
82
- error_message = f"An error occurred during model inference with Featherless AI: {e}"
83
- print(error_message)
84
- yield error_message
85
-
86
-
87
- kofi_script = """
88
- <script src='https://storage.ko-fi.com/cdn/scripts/overlay-widget.js'></script>
89
- <script>
90
- kofiWidgetOverlay.draw('xortron', {
91
- 'type': 'floating-chat',
92
- 'floating-chat.donateButton.text': 'Support me',
93
- 'floating-chat.donateButton.background-color': '#794bc4',
94
- 'floating-chat.donateButton.text-color': '#fff'
95
- });
96
- </script>
97
- """
98
-
99
- kofi_button_html = """
100
- <div style="text-align: center; padding: 20px;">
101
- <a href='https://ko-fi.com/Z8Z51E5TIG' target='_blank'>
102
- <img height='36' style='border:0px;height:36px;' src='https://storage.ko-fi.com/cdn/kofi5.png?v=6' border='0' alt='Buy Me a Coffee at ko-fi.com' />
103
- </a>
104
- </div>
105
- """
106
-
107
- donation_solicitation_html = """
108
- <div style="text-align: center; font-size: x-small; margin-bottom: 5px;">
109
- The Cybernetic Criminal Computing Corporation presents: XORTRON 4.2, free of charge, unlimited, no login, no signup, no bullshit. Finding yourself in a long queue? Consider donating so we can scale and remain 100% free for all. Im sure even a low-life deadbeat freeloader like yourself can at least throw some spare change right? - Support Xortron @ ko-fi.com/xortron<br>
110
-
111
- </div>
112
- """
113
-
114
- custom_css = """
115
- @import url('https://fonts.googleapis.com/css2?family=Orbitron:wght@400;700&display=swap');
116
- body, .gradio-container {
117
- font-family: 'Orbitron', sans-serif !important;
118
- }
119
- .gr-button { font-family: 'Orbitron', sans-serif !important; }
120
- .gr-input { font-family: 'Orbitron', sans-serif !important; }
121
- .gr-label { font-family: 'Orbitron', sans-serif !important; }
122
- .gr-chatbot .message { font-family: 'Orbitron', sans-serif !important; }
123
- """
124
-
125
- with gr.Blocks(theme="Nymbo/Nymbo_Theme", head=kofi_script, css=custom_css) as demo:
126
- # Added the header image using gr.HTML
127
- gr.HTML('<img src="https://cdn-uploads.huggingface.co/production/uploads/6540a02d1389943fef4d2640/TVPNkZCjnaOwfzD4Ze9tj.png" alt="Header Image" style="display: block; margin-left: auto; margin-right: auto; max-width: 22%; height: auto;">')
128
-
129
- gr.ChatInterface(
130
- fn=respond, # The function to call when a message is sent
131
- chatbot=gr.Chatbot( # Configure the chatbot display area
132
- height=700, # Set the height of the chat history display to 800px
133
- label="XORTRON - Criminal Computing" # Set the label
134
- )
135
- )
136
-
137
- gr.HTML(donation_solicitation_html)
138
- gr.HTML(kofi_button_html)
139
 
140
 
141
  if __name__ == "__main__":
142
- if not FEATHERLESS_API_KEY:
143
- print("\nCRITICAL ERROR: FEATHERLESS_API_KEY is not set.")
144
- print("Please ensure it's set as a secret in your Hugging Face Space settings or as an environment variable.\n")
145
-
146
- try:
147
- demo.queue(default_concurrency_limit=3)
148
-
149
- demo.launch(show_api=False, share=False)
150
- except NameError as ne:
151
- print(f"Gradio demo could not be launched. 'client' might not have been initialized: {ne}")
152
- except RuntimeError as re:
153
- print(f"Gradio demo could not be launched due to an error during client initialization: {re}")
154
- except Exception as e:
155
- print(f"An unexpected error occurred when trying to launch Gradio demo: {e}")
156
-
157
 
 
 
 
1
  import os
2
  from dotenv import load_dotenv
3
+ from openai import OpenAI
4
 
5
  load_dotenv()
6
 
7
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
8
+ MODEL_NAME = os.getenv("MODEL_NAME", "gpt-4o-mini")
 
 
 
9
 
10
+ if not OPENAI_API_KEY:
11
+ raise ValueError("OPENAI_API_KEY não encontrado. Configure no .env.")
12
 
13
+ client = OpenAI(api_key=OPENAI_API_KEY)
 
14
 
 
 
 
15
 
16
+ def chat(prompt: str):
17
+ """Função principal de interação com o modelo."""
18
+ resp = client.chat.completions.create(
19
+ model=MODEL_NAME,
20
+ messages=[{"role": "user", "content": prompt}],
21
  )
22
+ return resp.choices[0].message["content"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
 
25
  if __name__ == "__main__":
26
+ print("🔥 FireStarter Chat — pronto")
27
+ while True:
28
+ msg = input("\nVocê: ").strip()
29
+ if msg.lower() in ("exit", "quit"):
30
+ break
31
+ print("\nAI:", chat(msg))
 
 
 
 
 
 
 
 
 
32
 
requirements.txt CHANGED
@@ -1,2 +1,3 @@
1
  huggingface_hub==0.25.2
2
- openai
 
 
1
  huggingface_hub==0.25.2
2
+ openai
3
+ python-dotenv