Spaces:
Running
Running
fix import issue
Browse files- project_importer.py +85 -0
project_importer.py
CHANGED
|
@@ -174,6 +174,40 @@ class ProjectImporter:
|
|
| 174 |
# Get model info
|
| 175 |
model_info = self.api.model_info(model_id)
|
| 176 |
pipeline_tag = getattr(model_info, "pipeline_tag", None)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 177 |
|
| 178 |
# Try to get inference provider code
|
| 179 |
inference_code = self._generate_inference_code(model_id, pipeline_tag)
|
|
@@ -624,6 +658,57 @@ with open("output.mp3", "wb") as f:
|
|
| 624 |
return sorted_blocks[0][0] or "python", sorted_blocks[0][1]
|
| 625 |
|
| 626 |
return None, None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 627 |
|
| 628 |
|
| 629 |
# ==================== CLI Interface ====================
|
|
|
|
| 174 |
# Get model info
|
| 175 |
model_info = self.api.model_info(model_id)
|
| 176 |
pipeline_tag = getattr(model_info, "pipeline_tag", None)
|
| 177 |
+
library_name = getattr(model_info, "library_name", None)
|
| 178 |
+
tags = getattr(model_info, "tags", [])
|
| 179 |
+
|
| 180 |
+
# Check if this is an ONNX model (especially from onnx-community)
|
| 181 |
+
is_onnx_model = (
|
| 182 |
+
"onnx" in model_id.lower() or
|
| 183 |
+
"onnx" in str(library_name).lower() or
|
| 184 |
+
any("onnx" in str(tag).lower() for tag in tags) or
|
| 185 |
+
"transformers.js" in str(library_name).lower() or
|
| 186 |
+
any("transformers.js" in str(tag).lower() for tag in tags)
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
# For ONNX models, try to extract Transformers.js code from README first
|
| 190 |
+
if is_onnx_model:
|
| 191 |
+
try:
|
| 192 |
+
readme = self._fetch_hf_model_readme(model_id)
|
| 193 |
+
if readme:
|
| 194 |
+
transformersjs_code = self._extract_transformersjs_code(readme, model_id)
|
| 195 |
+
if transformersjs_code:
|
| 196 |
+
return {
|
| 197 |
+
"status": "success",
|
| 198 |
+
"message": f"Successfully imported ONNX model: {model_id} (Transformers.js code)",
|
| 199 |
+
"code": transformersjs_code,
|
| 200 |
+
"language": "transformers.js",
|
| 201 |
+
"url": f"https://huggingface.co/{model_id}",
|
| 202 |
+
"metadata": {
|
| 203 |
+
"pipeline_tag": pipeline_tag,
|
| 204 |
+
"library_name": library_name,
|
| 205 |
+
"code_type": "transformers.js",
|
| 206 |
+
"is_onnx": True
|
| 207 |
+
}
|
| 208 |
+
}
|
| 209 |
+
except Exception as e:
|
| 210 |
+
print(f"Failed to extract Transformers.js code: {e}")
|
| 211 |
|
| 212 |
# Try to get inference provider code
|
| 213 |
inference_code = self._generate_inference_code(model_id, pipeline_tag)
|
|
|
|
| 658 |
return sorted_blocks[0][0] or "python", sorted_blocks[0][1]
|
| 659 |
|
| 660 |
return None, None
|
| 661 |
+
|
| 662 |
+
def _extract_transformersjs_code(self, readme: str, model_id: str) -> Optional[str]:
|
| 663 |
+
"""Extract Transformers.js code from README"""
|
| 664 |
+
if not readme:
|
| 665 |
+
return None
|
| 666 |
+
|
| 667 |
+
# Find all code blocks
|
| 668 |
+
code_blocks = []
|
| 669 |
+
for match in re.finditer(r"```([\w+-]+)?\s*\n([\s\S]*?)```", readme, re.IGNORECASE):
|
| 670 |
+
lang = (match.group(1) or "").lower()
|
| 671 |
+
code = match.group(2) or ""
|
| 672 |
+
code_blocks.append((lang, code.strip()))
|
| 673 |
+
|
| 674 |
+
# Look for JavaScript/TypeScript blocks with Transformers.js code
|
| 675 |
+
for lang, code in code_blocks:
|
| 676 |
+
if lang in ('js', 'javascript', 'ts', 'typescript'):
|
| 677 |
+
# Check if it contains Transformers.js imports
|
| 678 |
+
if '@huggingface/transformers' in code or '@xenova/transformers' in code:
|
| 679 |
+
return code
|
| 680 |
+
|
| 681 |
+
# If no specific block found, generate default Transformers.js code
|
| 682 |
+
return self._generate_transformersjs_code(model_id)
|
| 683 |
+
|
| 684 |
+
def _generate_transformersjs_code(self, model_id: str) -> str:
|
| 685 |
+
"""Generate default Transformers.js code for a model"""
|
| 686 |
+
return f'''import {{ pipeline, TextStreamer }} from "@huggingface/transformers";
|
| 687 |
+
|
| 688 |
+
// Create a text generation pipeline
|
| 689 |
+
const generator = await pipeline(
|
| 690 |
+
"text-generation",
|
| 691 |
+
"{model_id}",
|
| 692 |
+
{{ dtype: "fp32" }},
|
| 693 |
+
);
|
| 694 |
+
|
| 695 |
+
// Define the list of messages
|
| 696 |
+
const messages = [
|
| 697 |
+
{{ role: "system", content: "You are a helpful assistant." }},
|
| 698 |
+
{{ role: "user", content: "Write a poem about machine learning." }},
|
| 699 |
+
];
|
| 700 |
+
|
| 701 |
+
// Generate a response
|
| 702 |
+
const output = await generator(messages, {{
|
| 703 |
+
max_new_tokens: 512,
|
| 704 |
+
do_sample: false,
|
| 705 |
+
streamer: new TextStreamer(generator.tokenizer, {{
|
| 706 |
+
skip_prompt: true,
|
| 707 |
+
skip_special_tokens: true,
|
| 708 |
+
// callback_function: (text) => {{ /* Optional callback function */ }},
|
| 709 |
+
}}),
|
| 710 |
+
}});
|
| 711 |
+
console.log(output[0].generated_text.at(-1).content);'''
|
| 712 |
|
| 713 |
|
| 714 |
# ==================== CLI Interface ====================
|