cat4laugh commited on
Commit
23334ee
·
verified ·
1 Parent(s): d6a1e01

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -0
app.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
+
5
+ # 1. Setup the Model Name
6
+ model_name = "haykgrigorian/TimeCapsuleLLM-v2-llama-1.2B"
7
+
8
+ # 2. Load the Model and Tokenizer
9
+ # We use device_map="auto" to use available CPU RAM efficiently
10
+ print("Loading model... this usually takes 1-2 minutes on first run.")
11
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
12
+ model = AutoModelForCausalLM.from_pretrained(model_name)
13
+
14
+ # 3. Define the Generate Function
15
+ def generate_text(prompt, max_tokens=100, temperature=0.7):
16
+ # Format inputs
17
+ inputs = tokenizer(prompt, return_tensors="pt")
18
+
19
+ # Generate
20
+ # We disable gradients to save memory and speed up inference
21
+ with torch.no_grad():
22
+ outputs = model.generate(
23
+ **inputs,
24
+ max_new_tokens=int(max_tokens),
25
+ temperature=float(temperature),
26
+ do_sample=True, # Allows for creativity/temperature
27
+ pad_token_id=tokenizer.eos_token_id
28
+ )
29
+
30
+ # Decode result
31
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
32
+
33
+ # 4. Launch the Gradio Interface
34
+ # This creates the UI and the API endpoint automatically
35
+ iface = gr.Interface(
36
+ fn=generate_text,
37
+ inputs=[
38
+ gr.Textbox(label="Prompt", placeholder="Enter your text here..."),
39
+ gr.Slider(minimum=10, maximum=300, value=100, label="Max New Tokens"),
40
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.7, label="Temperature")
41
+ ],
42
+ outputs="text",
43
+ title="TimeCapsule LLM API",
44
+ description="API for n8n connection."
45
+ )
46
+
47
+ iface.launch()