Pista1981 commited on
Commit
c990840
ยท
verified ยท
1 Parent(s): ce90913

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. README.md +34 -6
  2. app.py +138 -0
  3. requirements.txt +7 -0
README.md CHANGED
@@ -1,12 +1,40 @@
1
  ---
2
- title: Hivemind Gpu Worker
3
- emoji: โšก
4
- colorFrom: pink
5
- colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 6.2.0
8
  app_file: app.py
9
  pinned: false
 
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Hivemind GPU Worker
3
+ emoji: ๐Ÿค–
4
+ colorFrom: purple
5
+ colorTo: blue
6
  sdk: gradio
7
+ sdk_version: 4.44.0
8
  app_file: app.py
9
  pinned: false
10
+ license: mit
11
+ hardware: zero-a10g
12
  ---
13
 
14
+ # ๐Ÿค– Hivemind GPU Worker
15
+
16
+ ZeroGPU Training Worker for Hivemind autonomous agents.
17
+
18
+ ## Part of FREE GPU FARM
19
+
20
+ | Platform | GPU | Hours/Week | Status |
21
+ |----------|-----|------------|--------|
22
+ | Kaggle | P100 | 30h | โœ… Auto |
23
+ | **HuggingFace** | **ZeroGPU** | **42h** | โœ… Auto |
24
+ | Colab | T4 | 84h | Manual |
25
+ | Total | - | **156h** | - |
26
+
27
+ ## API Usage
28
+
29
+ ```python
30
+ from gradio_client import Client
31
+
32
+ client = Client("Pista1981/hivemind-gpu-worker")
33
+ result = client.predict(
34
+ agent_name="MyAgent",
35
+ skill="machine learning",
36
+ epochs=1,
37
+ api_name="/train_agent"
38
+ )
39
+ print(result)
40
+ ```
app.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ๐Ÿค– HIVEMIND GPU WORKER
3
+ ======================
4
+ ZeroGPU Training Worker za Hivemind agente.
5
+
6
+ Ovo je deo FREE GPU FARM sistema:
7
+ - Kaggle: 30h/nedelja (P100)
8
+ - HuggingFace: 42h/nedelja (ZeroGPU T4) โ† OVO
9
+ - Total: 72h automatski!
10
+ """
11
+
12
+ import gradio as gr
13
+ import torch
14
+ import spaces
15
+ from transformers import AutoModelForCausalLM, AutoTokenizer
16
+ from peft import LoraConfig, get_peft_model
17
+ from datasets import Dataset
18
+ from datetime import datetime
19
+
20
+ # Global model cache
21
+ model = None
22
+ tokenizer = None
23
+
24
+ @spaces.GPU(duration=60) # ZeroGPU - max 60s per call
25
+ def train_agent(agent_name: str, skill: str, epochs: int = 1):
26
+ """Train agent on specific skill using ZeroGPU."""
27
+ global model, tokenizer
28
+
29
+ start = datetime.now()
30
+ results = []
31
+ results.append(f"๐Ÿค– Agent: {agent_name}")
32
+ results.append(f"๐Ÿ“š Skill: {skill}")
33
+ results.append(f"โฐ Started: {start}")
34
+
35
+ try:
36
+ # Load model if not cached
37
+ if model is None:
38
+ results.append("๐Ÿ“ฅ Loading TinyLlama...")
39
+ model = AutoModelForCausalLM.from_pretrained(
40
+ "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
41
+ torch_dtype=torch.float16,
42
+ device_map="auto"
43
+ )
44
+ tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
45
+ tokenizer.pad_token = tokenizer.eos_token
46
+
47
+ results.append("๐Ÿ”ง Setting up LoRA...")
48
+ lora = LoraConfig(r=8, lora_alpha=16, target_modules=["q_proj","v_proj"], bias="none", task_type="CAUSAL_LM")
49
+ train_model = get_peft_model(model, lora)
50
+
51
+ # Quick training data
52
+ data = [{"text": f"<|user|>\nTeach {skill}</s>\n<|assistant|>\nI will teach {skill}!</s>"}]
53
+
54
+ results.append("๐Ÿ‹๏ธ Training...")
55
+
56
+ # Manual mini-training (ZeroGPU timeout friendly)
57
+ train_model.train()
58
+ optimizer = torch.optim.AdamW(train_model.parameters(), lr=2e-4)
59
+
60
+ for epoch in range(epochs):
61
+ for item in data:
62
+ inputs = tokenizer(item["text"], return_tensors="pt", truncation=True, max_length=128)
63
+ inputs = {k: v.to(train_model.device) for k, v in inputs.items()}
64
+
65
+ outputs = train_model(**inputs, labels=inputs["input_ids"])
66
+ loss = outputs.loss
67
+ loss.backward()
68
+ optimizer.step()
69
+ optimizer.zero_grad()
70
+
71
+ results.append(f" Epoch {epoch+1}: Loss = {loss.item():.4f}")
72
+
73
+ elapsed = (datetime.now() - start).total_seconds()
74
+ results.append(f"โœ… Complete in {elapsed:.1f}s!")
75
+ results.append(f"๐Ÿง  {agent_name} learned: {skill}")
76
+
77
+ except Exception as e:
78
+ results.append(f"โŒ Error: {str(e)}")
79
+
80
+ return "\n".join(results)
81
+
82
+
83
+ @spaces.GPU(duration=30)
84
+ def quick_inference(prompt: str):
85
+ """Quick inference test."""
86
+ global model, tokenizer
87
+
88
+ if model is None:
89
+ return "Model not loaded. Run training first."
90
+
91
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
92
+ outputs = model.generate(**inputs, max_new_tokens=50)
93
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
94
+
95
+
96
+ # Gradio Interface
97
+ with gr.Blocks(title="๐Ÿค– Hivemind GPU Worker") as demo:
98
+ gr.Markdown("""
99
+ # ๐Ÿค– Hivemind GPU Worker
100
+
101
+ **ZeroGPU Training Worker** - Part of FREE GPU FARM
102
+
103
+ | Platform | GPU | Hours/Week |
104
+ |----------|-----|------------|
105
+ | Kaggle | P100 | 30h |
106
+ | **HuggingFace** | **ZeroGPU T4** | **42h** |
107
+ | Total Automated | - | **72h** |
108
+ """)
109
+
110
+ with gr.Tab("๐Ÿ‹๏ธ Training"):
111
+ agent_input = gr.Textbox(label="Agent Name", value="TestAgent")
112
+ skill_input = gr.Textbox(label="Skill to Learn", value="machine learning")
113
+ epochs_input = gr.Slider(1, 3, value=1, step=1, label="Epochs")
114
+ train_btn = gr.Button("๐Ÿš€ Train", variant="primary")
115
+ train_output = gr.Textbox(label="Results", lines=15)
116
+
117
+ train_btn.click(train_agent, [agent_input, skill_input, epochs_input], train_output)
118
+
119
+ with gr.Tab("๐Ÿ”ฎ Inference"):
120
+ prompt_input = gr.Textbox(label="Prompt", value="What is machine learning?")
121
+ infer_btn = gr.Button("Generate")
122
+ infer_output = gr.Textbox(label="Output", lines=5)
123
+
124
+ infer_btn.click(quick_inference, prompt_input, infer_output)
125
+
126
+ gr.Markdown("""
127
+ ---
128
+ *Hivemind Colony - Autonomous AI Agents*
129
+
130
+ API Endpoint: Use this Space programmatically!
131
+ ```python
132
+ from gradio_client import Client
133
+ client = Client("Pista1981/hivemind-gpu-worker")
134
+ result = client.predict(agent_name="MyAgent", skill="coding", epochs=1, api_name="/train_agent")
135
+ ```
136
+ """)
137
+
138
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ gradio>=4.0.0
2
+ torch
3
+ transformers
4
+ peft
5
+ datasets
6
+ accelerate
7
+ spaces