charantejapolavarapu commited on
Commit
fe9260b
·
verified ·
1 Parent(s): aef7021

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +214 -67
app.py CHANGED
@@ -1,83 +1,230 @@
1
- import gradio as gr
2
- import requests
3
  import os
 
4
  import re
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- # Model and Router Configuration
7
- MODEL_ID = "mistralai/Mistral-7B-Instruct-v0.3"
8
- API_URL = f"https://router.huggingface.co/hf-inference/models/{MODEL_ID}"
 
 
 
 
9
 
10
- # This pulls the secret from your Space Settings
11
- HF_TOKEN = os.getenv("HF_TOKEN")
12
 
13
- def query_llm(prompt):
14
- # 1. Check if token exists at all
15
- if not HF_TOKEN:
16
- return "ERROR: HF_TOKEN not found in Space Secrets. Please add it in Settings."
 
 
 
 
 
17
 
18
- headers = {
19
- "Authorization": f"Bearer {HF_TOKEN.strip()}", # .strip() removes accidental spaces
20
- "Content-Type": "application/json"
21
- }
 
22
 
23
- payload = {
24
- "inputs": f"<s>[INST] {prompt} [/INST]",
25
- "parameters": {"max_new_tokens": 250, "temperature": 0.7},
26
- "options": {"wait_for_model": True}
27
- }
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  try:
30
- response = requests.post(API_URL, headers=headers, json=payload, timeout=60)
31
-
32
- if response.status_code == 401:
33
- return "AI Error (401): The token is invalid. Please check your HF_TOKEN secret."
34
-
35
- if response.status_code != 200:
36
- return f"AI Error ({response.status_code}): {response.text}"
37
-
38
- result = response.json()
39
- if isinstance(result, list) and len(result) > 0:
40
- return result[0]["generated_text"].split("[/INST]")[-1].strip()
41
- return "AI returned an empty response."
42
-
43
  except Exception as e:
44
- return f"Connection Error: {str(e)}"
45
-
46
- # Logic functions
47
- def generate_question(role, difficulty, resume_text):
48
- if not role: return "Enter a job role."
49
- return query_llm(f"Generate one {difficulty} level interview question for a {role}. {f'Context: {resume_text}' if resume_text else ''}")
50
-
51
- def evaluate_answer(question, answer):
52
- if not answer: return "Enter an answer.", "N/A"
53
- feedback = query_llm(f"Question: {question}\nAnswer: {answer}\nGive feedback and a score out of 10.")
54
- score = re.search(r"(\d+/10)", feedback).group(1) if re.search(r"(\d+/10)", feedback) else "N/A"
55
- return feedback, score
56
-
57
- # UI
58
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
59
- gr.Markdown("# 🤖 Interview AI (Fixed Authentication)")
60
-
61
- if not HF_TOKEN:
62
- gr.Markdown("### ⚠️ WARNING: `HF_TOKEN` secret is missing! Go to Settings > Secrets to add it.")
63
 
64
- with gr.Row():
65
- role = gr.Textbox(label="Job Role")
66
- diff = gr.Dropdown(["Easy", "Medium", "Hard"], label="Difficulty", value="Medium")
67
-
68
- resume = gr.Textbox(label="Resume/Skills", lines=2)
69
- q_btn = gr.Button("Generate Question", variant="primary")
70
- question = gr.Textbox(label="Question", interactive=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
72
- gr.Markdown("---")
73
- answer = gr.Textbox(label="Your Answer", lines=4)
74
- e_btn = gr.Button("Evaluate", variant="secondary")
75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  with gr.Row():
77
- feedback = gr.Textbox(label="Feedback", lines=5)
78
- score = gr.Label(label="Score")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
 
80
- q_btn.click(generate_question, [role, diff, resume], question)
81
- e_btn.click(evaluate_answer, [question, answer], [feedback, score])
 
82
 
83
- demo.launch()
 
 
1
+ import spaces
2
+ import logging
3
  import os
4
+ import random
5
  import re
6
+ import sys
7
+ import warnings
8
+ import gradio as gr
9
+ import math
10
+ import torch
11
+ import subprocess
12
+ subprocess.run(
13
+ "pip install flash-attn==2.8.2 --no-build-isolation",
14
+ env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"},
15
+ shell=True,
16
+ )
17
+
18
+ sys.path.append(os.path.dirname(os.path.abspath(__file__)))
19
+
20
+ try:
21
+ from modeling.t2i_pipeline import BitDanceT2IPipeline
22
+ except ImportError:
23
+ print("Warning: Could not import BitDanceT2IPipeline. Please ensure 'modeling' folder is present.")
24
+
25
+ from huggingface_hub import snapshot_download
26
+
27
+ save_dir = "models/BitDance-14B-64x"
28
+ repo_id = "shallowdream204/BitDance-14B-64x"
29
+ cache_dir = save_dir + "/cache"
30
+
31
+ snapshot_download(cache_dir=cache_dir,
32
+ local_dir=save_dir,
33
+ repo_id=repo_id,
34
+ local_dir_use_symlinks=False,
35
+ resume_download=True,
36
+ allow_patterns=["*.json", "*.safetensors", "*.bin", "*.py", "*.md", "*.txt"],
37
+ )
38
+
39
+ # ==================== Environment Variables ==================================
40
+ MODEL_PATH = save_dir
41
+
42
+ # =============================================================================
43
+ warnings.filterwarnings("ignore")
44
+ logging.getLogger("transformers").setLevel(logging.ERROR)
45
+
46
+ # ==================== Resolution Settings ====================================
47
+ RAW_RESOLUTIONS = [
48
+ [1024, 1024],
49
+ [1152, 896],
50
+ [896, 1152],
51
+ [1280, 768],
52
+ [768, 1280],
53
+ [1536, 640],
54
+ [640, 1536],
55
+ [1920, 512],
56
+ [512, 1920],
57
+ [2048, 512],
58
+ [512, 2048],
59
+ ]
60
 
61
+ RESOLUTION_CHOICES = []
62
+ for w, h in RAW_RESOLUTIONS:
63
+ divisor = math.gcd(w, h)
64
+ ratio_w = w // divisor
65
+ ratio_h = h // divisor
66
+ label = f"{w}x{h} ({ratio_w}:{ratio_h})"
67
+ RESOLUTION_CHOICES.append(label)
68
 
69
+ DEFAULT_RES = "1024x1024 (1:1)"
 
70
 
71
+ EXAMPLE_PROMPTS = [
72
+ ["一幅具有电影感的胶片肖像,一位美丽的中国女生,凌乱的黑发在风中飘动遮住脸庞,眼神灵动地看着镜头。她在画面的左1/3处。她围着一条厚实的鲜红色针织围巾,穿着一件破旧的米色羊羔毛外套。背景是日落时分寒冷、干枯的荒野和远山。强烈的金色逆光直射镜头,产生巨大的镜头眩光和朦胧的光晕效果,空气中有尘埃感。胶片颗粒质感,浅景深,自然原始的风格。"],
73
+ ['游戏海报,治愈系农场风,彩色小镇与田野插画,阳光明亮,大标题:『丰收日』,小字:『现已上线』,圆润粗字体,版式简洁。'],
74
+ ["一位穿着粉色吊带罗纹长裙的亚洲少女,外搭一件米白色毛绒短开襟衫,在阳光洒落的森林小径上侧身回眸。她拥有淡粉色薰衣草发色的甜美脸庞,发间别着一朵白色小花。黄金时段的光线穿过浓密的树叶,在深绿色的背景上形成美丽的景深光斑 和柔和光晕。电影级肖像摄影,超高画质,细腻的皮肤纹理,强调少女的温柔与唯美浪漫的日系氛围。"],
75
+ ["A surreal double exposure portrait that blends a woman’s face with a beautiful seascape. The overall mood is dreamy and mystical, with rich colors and intricate details."],
76
+ ["A close-up, macro photography stock photo of a strawberry intricately sculpted into the shape of a hummingbird in mid-flight, its wings a blur as it sips nectar from a vibrant, tubular flower. The backdrop features a lush, colorful garden with a soft, bokeh effect, creating a dreamlike atmosphere. The image is exceptionally detailed and captured with a shallow depth of field, ensuring a razor-sharp focus on the strawberry-hummingbird and gentle fading of the background. The high resolution, professional photographers style, and soft lighting illuminate the scene in a very detailed manner, professional color grading amplifies the vibrant colors and creates an image with exceptional clarity. The depth of field makes the hummingbird and flower stand out starkly against the bokeh background."],
77
+ ["这是一张以“阿勒泰印象”为主题的宣传海报,采用刺绣风格的插画形式,视觉上充满质感与艺术感。画面中,粉色与黑色相间的山峦巍峨矗立,山顶覆雪,山间点缀着翠绿的松树;山脚下是茂密的森林,绿色草地上散布着白色的羊群、棕色的马匹,还有一座木屋和具有新疆特色的白色毡房,几位人物在毡房附近活动,呈现出阿勒泰的游牧生活场景。文字内容方面,“XinJiang 新疆 阿勒泰印象”的标题醒目,其中“XinJiang”和“新疆”采用富有刺绣纹理的粉色艺术字体,展现了新疆阿勒泰自然风光与游牧风情,整体画面色彩柔和,充满诗意与人文气息。草地近景有一只白色绵羊与一头棕色牛悠然伫立,旁边还有一座木质小屋,整体色彩以粉、绿、黑、白为主"],
78
+ ["室内中景人像摄影,复古胶片风格,电影叙事感画面。一位清纯气质的年轻女性,留着黑色齐刘海长直发,妆容清透伪素颜,皮肤白皙透亮。她身穿一件质地柔软、淡绿色的马海毛(Mohair)绒毛毛衣,质感毛绒蓬松,下身搭配淡青色棉麻长裙。人物慵懒地蜷缩/侧卧在沙发角落,身体姿态放松柔软,呈现自然的C型曲线。一只手轻轻拿着一颗鲜红的番茄靠近脸颊和下巴,眼神迷离、温柔且深情地直视镜头,表情处于放空与凝视之间,极具故事感。复古文艺的室内一角,沙发上铺着淡雅的复古碎花布艺沙发罩,身旁放着一盘红色的番茄作为前景点缀。背景虚化,隐约可见室内的陈设与绿植,整体环境色调偏向青绿色的胶片感。极具艺术感的局部自然光(丁达尔效应光斑)。一束明亮的午后阳光精准地照射在手部、手中的番茄以及面部一侧,形成强烈的明暗对比(Chiaroscuro)。高光部分带有光晕(Bloom),阴影部分呈现胶片特有的青蓝色调,光影层次丰富。。慵懒、静谧、梦幻、日系文艺、情绪感强、高级且富有夏末秋初的诗意。。模拟胶片相机(如Contax T3或Pentax 67)拍摄,使用50mm标准定焦镜头,大光圈(f/1.8)制造柔和的背景虚化。后期加入明显的粗颗粒胶片滤镜(Heavy Film Grain)和色彩偏移,增强模拟摄影的真实感与年代感。极度真实的皮肤质感,保留面部微小的毛孔和纹理,拒绝过度磨皮;马海毛毛衣在逆光下呈现出清晰的绒毛光晕边缘;番茄表面光滑的高光反射;碎花布料的褶皱细节;整体画面覆盖一层复古的胶片噪点。"],
79
+ ]
80
 
81
+ def get_resolution(resolution_str):
82
+ match = re.search(r"(\d+)\s*[×x]\s*(\d+)", resolution_str)
83
+ if match:
84
+ return int(match.group(1)), int(match.group(2))
85
+ return 1024, 1024
86
 
87
+ def load_models(model_path):
88
+ print(f"Loading BitDance model from {model_path}...")
 
 
 
89
 
90
+ if not os.path.exists(model_path):
91
+ print(f"Warning: Model path {model_path} does not exist locally. Attempting to load anyway (or handle download logic here).")
92
+
93
+ pipe = BitDanceT2IPipeline(model_path=model_path, device="cuda")
94
+ return pipe
95
+
96
+ def generate_image(
97
+ pipe,
98
+ prompt,
99
+ resolution,
100
+ seed=42,
101
+ guidance_scale=7.5,
102
+ num_inference_steps=50,
103
+ ):
104
+ width, height = get_resolution(resolution)
105
+
106
+ images = pipe.generate(
107
+ prompt=prompt,
108
+ height=height,
109
+ width=width,
110
+ num_sampling_steps=num_inference_steps,
111
+ guidance_scale=guidance_scale,
112
+ num_images=1,
113
+ seed=seed
114
+ )
115
+
116
+ return images[0]
117
+
118
+ pipe = None
119
+
120
+ def init_app():
121
+ global pipe
122
  try:
123
+ pipe = load_models(MODEL_PATH)
124
+ print("Model loaded successfully.")
 
 
 
 
 
 
 
 
 
 
 
125
  except Exception as e:
126
+ print(f"Error loading model: {e}")
127
+ pipe = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
+ @spaces.GPU
130
+ def generate(
131
+ prompt,
132
+ resolution,
133
+ seed=42,
134
+ steps=50,
135
+ guidance_scale=7.5,
136
+ random_seed=True,
137
+ gallery_images=None,
138
+ progress=gr.Progress(track_tqdm=True),
139
+ ):
140
+ if random_seed:
141
+ new_seed = random.randint(1, 1000000)
142
+ else:
143
+ new_seed = seed if seed != -1 else random.randint(1, 1000000)
144
+
145
+ if pipe is None:
146
+ raise gr.Error("Model not loaded.")
147
+
148
+ print(f"Generating: Prompt='{prompt}', Res={resolution}, Seed={new_seed}, Steps={steps}, CFG={guidance_scale}")
149
+
150
+ try:
151
+ image = generate_image(
152
+ pipe=pipe,
153
+ prompt=prompt,
154
+ resolution=resolution,
155
+ seed=new_seed,
156
+ guidance_scale=guidance_scale,
157
+ num_inference_steps=int(steps),
158
+ )
159
+ except Exception as e:
160
+ raise gr.Error(f"Generation failed: {str(e)}")
161
+
162
+ if gallery_images is None:
163
+ gallery_images = []
164
 
165
+ gallery_images = [image] + gallery_images
 
 
166
 
167
+ return gallery_images, str(new_seed), int(new_seed)
168
+
169
+ init_app()
170
+
171
+ # ==================== Gradio UI ====================
172
+
173
+ with gr.Blocks(title="BitDance Demo") as demo:
174
+ gr.Markdown(
175
+ """<div align="center">
176
+ ### BitDance: Scaling Autoregressive Generative Models with Binary Tokens
177
+ [🕸️ Project Page](https://bitdance.csuhan.com/) • [📄 Paper](https://arxiv.org/abs/2602.14041) • [💻 Code](https://github.com/shallowdream204/BitDance) • [📦 Model](https://huggingface.co/collections/shallowdream204/bitdance)
178
+ </div>""",
179
+ elem_id="title",
180
+ )
181
+
182
+
183
  with gr.Row():
184
+ with gr.Column(scale=1):
185
+ prompt_input = gr.Textbox(label="Prompt", lines=3, placeholder="Enter your prompt here...")
186
+
187
+ resolution = gr.Dropdown(
188
+ value=DEFAULT_RES,
189
+ choices=RESOLUTION_CHOICES,
190
+ label="Resolution (Width x Height)"
191
+ )
192
+
193
+ with gr.Row():
194
+ seed = gr.Number(label="Seed", value=42, precision=0)
195
+ random_seed = gr.Checkbox(label="Random Seed", value=True)
196
+
197
+ with gr.Row():
198
+ steps = gr.Slider(label="Diffusion Sampling Steps", minimum=10, maximum=100, value=50, step=1)
199
+ guidance_scale = gr.Slider(label="CFG Guidance Scale", minimum=1.0, maximum=15.0, value=7.5, step=0.5)
200
+
201
+ generate_btn = gr.Button("Generate", variant="primary")
202
+
203
+ gr.Markdown("### 📝 Example Prompts")
204
+ gr.Examples(examples=EXAMPLE_PROMPTS, inputs=prompt_input, label=None)
205
+
206
+ with gr.Column(scale=1):
207
+ output_gallery = gr.Gallery(
208
+ label="Generated Images",
209
+ columns=2,
210
+ rows=2,
211
+ height=600,
212
+ object_fit="contain",
213
+ format="png",
214
+ interactive=False,
215
+ )
216
+ used_seed = gr.Textbox(label="Seed Used", interactive=False)
217
+
218
+ generate_btn.click(
219
+ generate,
220
+ inputs=[prompt_input, resolution, seed, steps, guidance_scale, random_seed, output_gallery],
221
+ outputs=[output_gallery, used_seed, seed],
222
+ api_visibility="public",
223
+ )
224
 
225
+ css = """
226
+ .fillable{max-width: 1230px !important}
227
+ """
228
 
229
+ if __name__ == "__main__":
230
+ demo.launch(css=css, mcp_server=True)