Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -32,13 +32,17 @@ def initClient():
|
|
| 32 |
)
|
| 33 |
return client
|
| 34 |
|
| 35 |
-
def greet(
|
| 36 |
modelName = "chenluuli/test-text-vis"
|
| 37 |
-
text2text_generator = pipeline("text-generation", model="Qwen/Qwen2.5-0.5B-Instruct")
|
| 38 |
-
prompt = "##你是一个可视化专家,通过我提供的信息,推荐合理的图表配置##
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
response = text2text_generator(
|
| 40 |
-
|
| 41 |
-
max_length=
|
| 42 |
)
|
| 43 |
print(response, response[0]['generated_text'])
|
| 44 |
return response[0]['generated_text']
|
|
|
|
| 32 |
)
|
| 33 |
return client
|
| 34 |
|
| 35 |
+
def greet(input):
|
| 36 |
modelName = "chenluuli/test-text-vis"
|
| 37 |
+
text2text_generator = pipeline("text-generation", model="Qwen/Qwen2.5-0.5B-Instruct", torch_dtype="auto", device_map="auto")
|
| 38 |
+
prompt = "##你是一个可视化专家,通过我提供的信息,推荐合理的图表配置##请根据这些信息,返回合理的图表类型 >>我输入的数据如下:"
|
| 39 |
+
messages = [{
|
| 40 |
+
"role": "user",
|
| 41 |
+
"content": prompt+input,
|
| 42 |
+
}]
|
| 43 |
response = text2text_generator(
|
| 44 |
+
messages
|
| 45 |
+
max_length=128
|
| 46 |
)
|
| 47 |
print(response, response[0]['generated_text'])
|
| 48 |
return response[0]['generated_text']
|