anthonym21 commited on
Commit
55dca75
·
verified ·
1 Parent(s): 747308c

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +188 -0
app.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ HuggingFace Space - PineScript v5 Code Generator
3
+ Gradio app for the fine-tuned model
4
+
5
+ To deploy:
6
+ 1. Create a new Space on HuggingFace (Gradio SDK)
7
+ 2. Upload this file as app.py
8
+ 3. Add requirements.txt with: gradio, transformers, torch, accelerate, peft
9
+ 4. Set the model repo in the Space settings or as HF_MODEL_REPO secret
10
+ """
11
+
12
+ import gradio as gr
13
+ import torch
14
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
15
+ from peft import AutoPeftModelForCausalLM
16
+ import os
17
+
18
+ # Configuration
19
+ MODEL_REPO = "anthonym21/pinescript-v5-instructions-merged"
20
+ USE_PEFT = False # Merged model, no PEFT needed
21
+
22
+ # Load model
23
+ print(f"Loading model: {MODEL_REPO}")
24
+
25
+ if torch.cuda.is_available():
26
+ # GPU available (paid Space or local)
27
+ bnb_config = BitsAndBytesConfig(
28
+ load_in_4bit=True,
29
+ bnb_4bit_quant_type="nf4",
30
+ bnb_4bit_compute_dtype=torch.bfloat16,
31
+ )
32
+
33
+ if USE_PEFT:
34
+ model = AutoPeftModelForCausalLM.from_pretrained(
35
+ MODEL_REPO,
36
+ quantization_config=bnb_config,
37
+ device_map="auto",
38
+ torch_dtype=torch.bfloat16,
39
+ )
40
+ else:
41
+ model = AutoModelForCausalLM.from_pretrained(
42
+ MODEL_REPO,
43
+ quantization_config=bnb_config,
44
+ device_map="auto",
45
+ torch_dtype=torch.bfloat16,
46
+ )
47
+ else:
48
+ # CPU fallback (free Space - will be slow)
49
+ if USE_PEFT:
50
+ model = AutoPeftModelForCausalLM.from_pretrained(
51
+ MODEL_REPO,
52
+ device_map="cpu",
53
+ torch_dtype=torch.float32,
54
+ )
55
+ else:
56
+ model = AutoModelForCausalLM.from_pretrained(
57
+ MODEL_REPO,
58
+ device_map="cpu",
59
+ torch_dtype=torch.float32,
60
+ )
61
+
62
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_REPO)
63
+ tokenizer.pad_token = tokenizer.eos_token
64
+
65
+ print("Model loaded!")
66
+
67
+
68
+ def generate_pinescript(
69
+ prompt: str,
70
+ max_tokens: int = 1024,
71
+ temperature: float = 0.7,
72
+ top_p: float = 0.9,
73
+ ) -> str:
74
+ """Generate PineScript code from a prompt."""
75
+
76
+ # Format as instruction
77
+ formatted = f"""### Instruction:
78
+ {prompt}
79
+
80
+ ### Response:
81
+ """
82
+
83
+ inputs = tokenizer(formatted, return_tensors="pt")
84
+ if torch.cuda.is_available():
85
+ inputs = inputs.to("cuda")
86
+
87
+ with torch.no_grad():
88
+ outputs = model.generate(
89
+ **inputs,
90
+ max_new_tokens=max_tokens,
91
+ temperature=temperature,
92
+ top_p=top_p,
93
+ do_sample=True,
94
+ pad_token_id=tokenizer.eos_token_id,
95
+ eos_token_id=tokenizer.eos_token_id,
96
+ )
97
+
98
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
99
+
100
+ # Extract just the response part
101
+ if "### Response:" in response:
102
+ response = response.split("### Response:")[-1].strip()
103
+
104
+ return response
105
+
106
+
107
+ # Example prompts
108
+ EXAMPLES = [
109
+ ["Write a PineScript v5 indicator that shows RSI with overbought/oversold zones colored on the chart"],
110
+ ["Create a PineScript v5 strategy that buys when MACD crosses above signal and sells when it crosses below"],
111
+ ["Write a PineScript v5 indicator that displays Bollinger Bands with squeeze detection"],
112
+ ["Create a simple moving average crossover indicator in PineScript v5 with EMA 9 and EMA 21"],
113
+ ["Write a PineScript v5 indicator that shows support and resistance levels based on pivot points"],
114
+ ]
115
+
116
+ # Gradio interface
117
+ with gr.Blocks(title="PineScript v5 Generator", theme=gr.themes.Soft()) as demo:
118
+ gr.Markdown("""
119
+ # 🌲 PineScript v5 Code Generator
120
+
121
+ Generate TradingView PineScript v5 code using a fine-tuned CodeGemma model.
122
+
123
+ **Tips:**
124
+ - Be specific about what you want (indicator, strategy, specific features)
125
+ - Mention inputs, colors, and plot styles if you have preferences
126
+ - Ask for alerts, labels, or tables if needed
127
+ """)
128
+
129
+ with gr.Row():
130
+ with gr.Column(scale=2):
131
+ prompt = gr.Textbox(
132
+ label="What do you want to create?",
133
+ placeholder="e.g., Write a PineScript v5 indicator that shows RSI with dynamic overbought/oversold levels",
134
+ lines=3,
135
+ )
136
+
137
+ with gr.Row():
138
+ max_tokens = gr.Slider(
139
+ minimum=256,
140
+ maximum=2048,
141
+ value=1024,
142
+ step=128,
143
+ label="Max Tokens",
144
+ )
145
+ temperature = gr.Slider(
146
+ minimum=0.1,
147
+ maximum=1.5,
148
+ value=0.7,
149
+ step=0.1,
150
+ label="Temperature",
151
+ )
152
+ top_p = gr.Slider(
153
+ minimum=0.1,
154
+ maximum=1.0,
155
+ value=0.9,
156
+ step=0.05,
157
+ label="Top P",
158
+ )
159
+
160
+ generate_btn = gr.Button("Generate PineScript", variant="primary")
161
+
162
+ with gr.Column(scale=3):
163
+ output = gr.Code(
164
+ label="Generated PineScript v5 Code",
165
+ language="javascript", # Closest to PineScript syntax
166
+ lines=25,
167
+ )
168
+
169
+ gr.Examples(
170
+ examples=EXAMPLES,
171
+ inputs=[prompt],
172
+ label="Example Prompts",
173
+ )
174
+
175
+ generate_btn.click(
176
+ fn=generate_pinescript,
177
+ inputs=[prompt, max_tokens, temperature, top_p],
178
+ outputs=output,
179
+ )
180
+
181
+ gr.Markdown("""
182
+ ---
183
+ **Note:** This model was fine-tuned on the [PineScripts-Permissive](https://huggingface.co/datasets/mrmegatelo/PineScripts-Permissive) dataset.
184
+ Always review and test generated code before using in live trading.
185
+ """)
186
+
187
+ if __name__ == "__main__":
188
+ demo.launch()