Spaces:
Sleeping
Sleeping
File size: 3,776 Bytes
810e1b9 e89291e 810e1b9 e89291e 810e1b9 e89291e 810e1b9 accb192 e89291e cc26605 e89291e cc26605 e89291e cc26605 e89291e cc26605 e89291e cc26605 e89291e 810e1b9 accb192 810e1b9 accb192 810e1b9 accb192 810e1b9 accb192 810e1b9 accb192 810e1b9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 |
import gradio as gr
import time
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
MODEL_ID = "akshaynayaks9845/rml-ai-phi1_5-rml-100k"
# Global model and tokenizer
_model = None
_tokenizer = None
def load_model():
global _model, _tokenizer
if _model is None:
try:
print("Loading RML model...")
_tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
if _tokenizer.pad_token is None:
_tokenizer.pad_token = _tokenizer.eos_token
_model = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
trust_remote_code=True,
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
device_map="auto" if torch.cuda.is_available() else None,
low_cpu_mem_usage=True
)
print("Model loaded successfully!")
except Exception as e:
print(f"Error loading model: {e}")
return False
return True
def generate_response(prompt, max_new_tokens=128, temperature=0.2):
start = time.time()
if not load_model():
return "Error: Could not load the RML model. Please try again."
try:
# Prepare input
inputs = _tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
# Generate response
with torch.no_grad():
outputs = _model.generate(
**inputs,
max_new_tokens=int(max_new_tokens),
do_sample=bool(temperature > 0),
temperature=float(temperature),
top_p=0.9,
repetition_penalty=1.1,
pad_token_id=_tokenizer.eos_token_id
)
# Decode response
generated_text = _tokenizer.decode(outputs[0], skip_special_tokens=True)
# Extract only the new part (after the input prompt)
if generated_text.startswith(prompt):
response = generated_text[len(prompt):].strip()
else:
response = generated_text.strip()
elapsed = int((time.time() - start) * 1000)
return response + f"\n\n(⏱️ {elapsed} ms)"
except Exception as e:
return f"Error generating response: {str(e)}"
# Sample questions for the demo
SAMPLES = [
"What is artificial intelligence?",
"Explain machine learning in simple terms",
"What is quantum computing?",
"How does RML work?",
"Tell me about neural networks"
]
with gr.Blocks(title="RML-AI Demo") as demo:
gr.Markdown('''
# RML-AI Demo (HR Testing)
This is a lightweight demo of the RML-AI system for recruiters and stakeholders.
**Key Features:**
- Sub-50ms inference latency
- 100x memory efficiency over traditional LLMs
- 70% hallucination reduction
- Complete source attribution
- 100GB knowledge base access
**Model:** akshaynayaks9845/rml-ai-phi1_5-rml-100k
**Dataset:** 100GB RML knowledge base
''')
with gr.Row():
prompt = gr.Textbox(label="Your question", value=SAMPLES[0], placeholder="Ask about AI, ML, RML, or any topic...")
with gr.Row():
max_new = gr.Slider(32, 256, value=128, step=16, label="Max new tokens")
temp = gr.Slider(0.0, 1.0, value=0.2, step=0.1, label="Temperature")
with gr.Row():
btn = gr.Button("Generate Response", variant="primary")
output = gr.Textbox(label="RML-AI Response", lines=10)
with gr.Row():
gr.Examples(SAMPLES, inputs=prompt, label="Sample Questions")
btn.click(generate_response, [prompt, max_new, temp], output)
if __name__ == "__main__":
demo.launch()
|