"""
Legion Coder - Hugging Face Space
A powerful coding assistant powered by the Legion Coder 8M model.
10k Edition - 2026
MADE WITH BY DEATH LEGION
POWERED BY nvdya-kit
2026 DEATH LEGION. All rights reserved.
"""
import os
import sys
import torch
import streamlit as st
import time
from transformers import AutoModelForCausalLM, AutoTokenizer
# Page config with custom branding - 10k Edition 2026
st.set_page_config(
page_title="Legion Coder 2026 | DEATH LEGION",
page_icon="https://img.icons8.com/color/48/000000/code.png",
layout="wide",
initial_sidebar_state="expanded"
)
# Enhanced Custom CSS with 10k Edition branding - No emojis, professional icons
st.markdown("""
""", unsafe_allow_html=True)
# Initialize session state
if "messages" not in st.session_state:
st.session_state.messages = []
# Model configuration - Using verified public repo
MODEL_ID = "dineth554/legion-coder-8m-10k"
# Cache the model loading
@st.cache_resource
def load_model():
"""Load the Legion Coder model and tokenizer."""
with st.spinner("Loading Legion Coder 8M model..."):
try:
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
torch_dtype=torch.float32,
device_map="cpu",
trust_remote_code=True
)
return model, tokenizer
except Exception as e:
st.error(f"Error loading model: {e}")
return None, None
# Header
st.markdown("""
""", unsafe_allow_html=True)
# Death Legion Banner
st.markdown("""
MADE WITH BY DEATH LEGION 2026
""", unsafe_allow_html=True)
# nvdya-kit Banner
st.markdown("""
Powered by nvdya-kit | Next-Gen AI Infrastructure
""", unsafe_allow_html=True)
# Sidebar with 10k Edition specs
with st.sidebar:
st.markdown("""
""", unsafe_allow_html=True)
# Deployment section
st.markdown("""
""", unsafe_allow_html=True)
# Load model
model, tokenizer = load_model()
if model is None:
st.error("Failed to load model. Please check the repository configuration.")
else:
st.success("Model loaded successfully!")
# Main chat interface
st.markdown("""
[CHAT] Start Coding
""", unsafe_allow_html=True)
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input
if prompt := st.chat_input("Ask Legion Coder to write or explain code..."):
# Add user message
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
# Generate response with typing animation
with st.chat_message("assistant"):
message_placeholder = st.empty()
# Typing animation
with message_placeholder:
st.markdown("""
Generating code
""", unsafe_allow_html=True)
if model is not None and tokenizer is not None:
try:
# Prepare input
system_prompt = "You are a helpful coding assistant. Write clean, efficient code."
full_prompt = f"{system_prompt}\n\nUser: {prompt}\n\nAssistant:"
# Tokenize
inputs = tokenizer(full_prompt, return_tensors="pt", max_length=1024, truncation=True)
# Generate
with torch.no_grad():
outputs = model.generate(
inputs["input_ids"],
max_new_tokens=200,
temperature=0.8,
top_p=0.95,
do_sample=True,
pad_token_id=tokenizer.eos_token_id
)
# Decode
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Extract just the assistant response
if "Assistant:" in response:
response = response.split("Assistant:")[-1].strip()
# Simulate typing delay for smooth animation
time.sleep(0.5)
except Exception as e:
response = f"Error generating response: {str(e)}"
else:
# Fallback response if model not loaded
time.sleep(1)
response = """Here is a solution for your request:
```python
# Legion Coder 2026 - Generated Code
# Powered by DEATH LEGION & nvdya-kit
def example_function():
\"\"\"
This is an example function generated by Legion Coder.
Replace this with your actual implementation.
\"\"\"
pass
# TODO: Implement your specific logic here
if __name__ == "__main__":
result = example_function()
print(f"Result: {result}")
```
**Explanation:**
- This code provides a starting structure for your request
- Modify the `example_function()` to implement your specific logic
- The code follows PEP 8 guidelines and best practices
- Generated by Legion Coder 2026 - DEATH LEGION
Would you like me to explain any part of this code or help you implement specific functionality?"""
# Display final response with typing effect
message_placeholder.markdown(f'{response}
', unsafe_allow_html=True)
# Add assistant message to history
st.session_state.messages.append({"role": "assistant", "content": response})
# Footer with 2026 branding
st.markdown("""
""", unsafe_allow_html=True)