anthonym21's picture
Upload app.py with huggingface_hub
55dca75 verified
"""
HuggingFace Space - PineScript v5 Code Generator
Gradio app for the fine-tuned model
To deploy:
1. Create a new Space on HuggingFace (Gradio SDK)
2. Upload this file as app.py
3. Add requirements.txt with: gradio, transformers, torch, accelerate, peft
4. Set the model repo in the Space settings or as HF_MODEL_REPO secret
"""
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
from peft import AutoPeftModelForCausalLM
import os
# Configuration
MODEL_REPO = "anthonym21/pinescript-v5-instructions-merged"
USE_PEFT = False # Merged model, no PEFT needed
# Load model
print(f"Loading model: {MODEL_REPO}")
if torch.cuda.is_available():
# GPU available (paid Space or local)
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
)
if USE_PEFT:
model = AutoPeftModelForCausalLM.from_pretrained(
MODEL_REPO,
quantization_config=bnb_config,
device_map="auto",
torch_dtype=torch.bfloat16,
)
else:
model = AutoModelForCausalLM.from_pretrained(
MODEL_REPO,
quantization_config=bnb_config,
device_map="auto",
torch_dtype=torch.bfloat16,
)
else:
# CPU fallback (free Space - will be slow)
if USE_PEFT:
model = AutoPeftModelForCausalLM.from_pretrained(
MODEL_REPO,
device_map="cpu",
torch_dtype=torch.float32,
)
else:
model = AutoModelForCausalLM.from_pretrained(
MODEL_REPO,
device_map="cpu",
torch_dtype=torch.float32,
)
tokenizer = AutoTokenizer.from_pretrained(MODEL_REPO)
tokenizer.pad_token = tokenizer.eos_token
print("Model loaded!")
def generate_pinescript(
prompt: str,
max_tokens: int = 1024,
temperature: float = 0.7,
top_p: float = 0.9,
) -> str:
"""Generate PineScript code from a prompt."""
# Format as instruction
formatted = f"""### Instruction:
{prompt}
### Response:
"""
inputs = tokenizer(formatted, return_tensors="pt")
if torch.cuda.is_available():
inputs = inputs.to("cuda")
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
do_sample=True,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id,
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Extract just the response part
if "### Response:" in response:
response = response.split("### Response:")[-1].strip()
return response
# Example prompts
EXAMPLES = [
["Write a PineScript v5 indicator that shows RSI with overbought/oversold zones colored on the chart"],
["Create a PineScript v5 strategy that buys when MACD crosses above signal and sells when it crosses below"],
["Write a PineScript v5 indicator that displays Bollinger Bands with squeeze detection"],
["Create a simple moving average crossover indicator in PineScript v5 with EMA 9 and EMA 21"],
["Write a PineScript v5 indicator that shows support and resistance levels based on pivot points"],
]
# Gradio interface
with gr.Blocks(title="PineScript v5 Generator", theme=gr.themes.Soft()) as demo:
gr.Markdown("""
# 🌲 PineScript v5 Code Generator
Generate TradingView PineScript v5 code using a fine-tuned CodeGemma model.
**Tips:**
- Be specific about what you want (indicator, strategy, specific features)
- Mention inputs, colors, and plot styles if you have preferences
- Ask for alerts, labels, or tables if needed
""")
with gr.Row():
with gr.Column(scale=2):
prompt = gr.Textbox(
label="What do you want to create?",
placeholder="e.g., Write a PineScript v5 indicator that shows RSI with dynamic overbought/oversold levels",
lines=3,
)
with gr.Row():
max_tokens = gr.Slider(
minimum=256,
maximum=2048,
value=1024,
step=128,
label="Max Tokens",
)
temperature = gr.Slider(
minimum=0.1,
maximum=1.5,
value=0.7,
step=0.1,
label="Temperature",
)
top_p = gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.9,
step=0.05,
label="Top P",
)
generate_btn = gr.Button("Generate PineScript", variant="primary")
with gr.Column(scale=3):
output = gr.Code(
label="Generated PineScript v5 Code",
language="javascript", # Closest to PineScript syntax
lines=25,
)
gr.Examples(
examples=EXAMPLES,
inputs=[prompt],
label="Example Prompts",
)
generate_btn.click(
fn=generate_pinescript,
inputs=[prompt, max_tokens, temperature, top_p],
outputs=output,
)
gr.Markdown("""
---
**Note:** This model was fine-tuned on the [PineScripts-Permissive](https://huggingface.co/datasets/mrmegatelo/PineScripts-Permissive) dataset.
Always review and test generated code before using in live trading.
""")
if __name__ == "__main__":
demo.launch()