Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +30 -0
- LICENSE +34 -0
- OPEN SOURCE SOFTWARE NOTICE +635 -0
- README.md +109 -5
- README_EN.md +113 -0
- chat_template.json +3 -0
- config.json +69 -0
- configuration_openpangu_vl.py +148 -0
- cookbooks/assets/grounding/depth_1.jpg +3 -0
- cookbooks/assets/grounding/depth_2.jpg +3 -0
- cookbooks/assets/grounding/dinner.jpg +3 -0
- cookbooks/assets/grounding/macaron.jpg +3 -0
- cookbooks/assets/grounding/peoples.jpg +3 -0
- cookbooks/assets/grounding/test_example_point_01.png +3 -0
- cookbooks/assets/grounding/test_example_point_02.png +3 -0
- cookbooks/assets/grounding/tools.jpg +0 -0
- cookbooks/assets/ocr/example1_1.png +3 -0
- cookbooks/assets/ocr/example1_2.png +3 -0
- cookbooks/assets/ocr/example2.png +3 -0
- cookbooks/assets/ocr/example3.jpg +0 -0
- cookbooks/assets/ocr/example4.png +3 -0
- cookbooks/assets/ocr/example5.png +3 -0
- cookbooks/assets/ocr/example6.png +3 -0
- cookbooks/assets/ocr/example7.png +3 -0
- cookbooks/assets/ocr/example7_2.png +3 -0
- cookbooks/assets/reasoning/biology.png +0 -0
- cookbooks/assets/reasoning/chemistry.png +3 -0
- cookbooks/assets/reasoning/geometry.png +3 -0
- cookbooks/assets/reasoning/logical.png +0 -0
- cookbooks/assets/video/example_video_1.mp4 +3 -0
- cookbooks/assets/video/example_video_2.mp4 +3 -0
- cookbooks/assets/video/example_video_3.mp4 +3 -0
- cookbooks/assets/video/example_video_4.mp4 +3 -0
- cookbooks/assets/video/example_video_5.mp4 +3 -0
- cookbooks/assets/video/example_video_6.mp4 +3 -0
- cookbooks/assets/video/example_video_7.mp4 +3 -0
- cookbooks/assets/video/example_video_8.mp4 +3 -0
- cookbooks/assets/video/example_video_9.mp4 +3 -0
- cookbooks/grounding.ipynb +3 -0
- cookbooks/ocr.ipynb +0 -0
- cookbooks/reasoning.ipynb +0 -0
- cookbooks/video.ipynb +329 -0
- doc/technical_report.pdf +3 -0
- doc/vllm_ascend_for_openpangu_vl_7b.md +227 -0
- doc/vllm_ascend_for_openpangu_vl_7b_EN.md +225 -0
- generation_config.json +11 -0
- imageprocessor_openpangu_vl.py +418 -0
- inference/generate.py +58 -0
- inference/requirements.txt +4 -0
- inference/vllm_ascend/examples/quick_start.py +89 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,33 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.model filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
model.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
cookbooks/assets/grounding/depth_1.jpg filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
cookbooks/assets/grounding/depth_2.jpg filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
cookbooks/assets/grounding/dinner.jpg filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
cookbooks/assets/grounding/macaron.jpg filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
cookbooks/assets/grounding/peoples.jpg filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
cookbooks/assets/grounding/test_example_point_01.png filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
cookbooks/assets/grounding/test_example_point_02.png filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
cookbooks/assets/ocr/example1_1.png filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
cookbooks/assets/ocr/example1_2.png filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
cookbooks/assets/ocr/example2.png filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
cookbooks/assets/ocr/example4.png filter=lfs diff=lfs merge=lfs -text
|
| 49 |
+
cookbooks/assets/ocr/example5.png filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
cookbooks/assets/ocr/example6.png filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
cookbooks/assets/ocr/example7.png filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
cookbooks/assets/ocr/example7_2.png filter=lfs diff=lfs merge=lfs -text
|
| 53 |
+
cookbooks/assets/reasoning/chemistry.png filter=lfs diff=lfs merge=lfs -text
|
| 54 |
+
cookbooks/assets/reasoning/geometry.png filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
cookbooks/assets/video/example_video_1.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
cookbooks/assets/video/example_video_2.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 57 |
+
cookbooks/assets/video/example_video_3.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 58 |
+
cookbooks/assets/video/example_video_4.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
+
cookbooks/assets/video/example_video_5.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
cookbooks/assets/video/example_video_6.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
cookbooks/assets/video/example_video_7.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
cookbooks/assets/video/example_video_8.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 63 |
+
cookbooks/assets/video/example_video_9.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 64 |
+
cookbooks/grounding.ipynb filter=lfs diff=lfs merge=lfs -text
|
| 65 |
+
doc/technical_report.pdf filter=lfs diff=lfs merge=lfs -text
|
LICENSE
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
OPENPANGU MODEL LICENSE AGREEMENT VERSION 1.0
|
| 2 |
+
|
| 3 |
+
This OPENPANGU MODEL LICENSE AGREEMENT VERSION 1.0 (the "Agreement") is a legal agreement between You and Huawei Technologies Co., Ltd. ("Huawei", "We" or "Us"), and it governs Your reproducing, use, modification, and distribution of openPangu as made available by Huawei under this Agreement.
|
| 4 |
+
|
| 5 |
+
By using, reproducing, modifying, distributing, performing or displaying any portion or element of openPangu, or otherwise accepting the terms of this Agreement, You agree to be bound by this Agreement.
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
1.1. “openPangu” or “Model” means openPangu large language models and software, including trained model weights, parameters (including optimizer states), accompanying source code and scripts released under this Agreement.
|
| 9 |
+
1.2. “Derivative Model” means all (1) modifications to the Model, (2) works based on the Model, and (3) any other derivative works of the Model. For clarity, information or content results from operating or otherwise using the Model is not a Derivative Model.
|
| 10 |
+
1.3. “You” or “Your” means an individual or Legal Entity exercising permissions granted by this Agreement and/or using the Model for any purpose.
|
| 11 |
+
1.4. “Third Party” or “Third Parties” means individuals or legal entities that are not under common control with Us or You.
|
| 12 |
+
|
| 13 |
+
2. License Grant. Subject to Your full compliance with the terms and conditions of this Agreement, We hereby grant to You a perpetual, worldwide, non-exclusive, non-transferable, no-charge, royalty-free license (except as stated in Section 3) to use, reproduce, modify, and distribute the Model.
|
| 14 |
+
|
| 15 |
+
3. Conditions for License Grant. You represent and warrant that You will not, access, download, install, run, deploy, integrate, modify, or otherwise use the Model, directly or indirectly, within the European Union.
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
4. Redistribution.
|
| 19 |
+
4.1. If You distribute the Model or Derivative Model, You shall retain in Your distribution (1) a copy of this agreement, and (2) all copyright notices and other notices of origin included in the Model that are applicable to Your distribution.
|
| 20 |
+
4.2. Further, if You distribute or make available to Third Parties a product or service (including another AI model) based on the Model, You are required to (1) display the acknowledgement “Powered by openPangu” and (2) include a trademark notice “openPangu is a trademark of Huawei Technologies Co., Ltd.” on related webpages, user manuals, product documentations or other advertising materials mentioning features of the Model.
|
| 21 |
+
4.3. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for Derivative Model made by You as a whole, provided Your use, reproduction, and distribution of the Model otherwise complies with the terms and conditions of this Agreement.
|
| 22 |
+
|
| 23 |
+
5. Ownership. We do not claim ownership to any information or content generated using the Model or Derivative Model that are made by You. You are solely responsible for evaluating the accuracy and appropriateness of such information or content for Your use case.
|
| 24 |
+
|
| 25 |
+
6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of Huawei, except as required for complying with Section 4.2.
|
| 26 |
+
|
| 27 |
+
7. Indemnity. You will indemnify and hold harmless Huawei from and against any claim by any third party arising out of or related to Your use or distribution of the Model or Derivative Model made by You (e.g. a violation against Section 3). For avoidance of doubt, “third party” in this clause include supervisory authorities.
|
| 28 |
+
|
| 29 |
+
8. THE MODEL IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE, NONINFRINGEMENT, ACCURACY, OR THE ABSENCE OF LATENT OR OTHER DEFECTS OR ERRORS, WHETHER OR NOT DISCOVERABLE, ALL TO THE GREATEST EXTENT PERMISSIBLE UNDER APPLICABLE LAW.
|
| 30 |
+
|
| 31 |
+
9. IN NO EVENT SHALL WE BE LIABLE TO YOU FOR ANY DAMAGES, INCLUDING, BUT NOT LIMITED TO ANY DIRECT, OR INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING FROM YOUR USE OR INABILITY TO USE THE MODEL, IN WHOLE OR IN PART, NO MATTER HOW IT’S CAUSED OR THE LEGAL THEORY IT IS BASED ON, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
END OF THE TERMS AND CONDITIONS
|
OPEN SOURCE SOFTWARE NOTICE
ADDED
|
@@ -0,0 +1,635 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
OPEN SOURCE SOFTWARE NOTICE
|
| 2 |
+
|
| 3 |
+
Please note we provide an open source software notice along with this product and/or this product firmware (in the following just “this product”). The open source software licenses are granted by the respective right holders. And the open source licenses prevail all other license information with regard to the respective open source software contained in the product, including but not limited to End User Software Licensing Agreement. This notice is provided on behalf of Huawei Technologies Co. Ltd. and any of its local subsidiaries which may have provided this product to you in your local country.
|
| 4 |
+
|
| 5 |
+
Warranty Disclaimer
|
| 6 |
+
THE OPEN SOURCE SOFTWARE IN THIS PRODUCT IS DISTRIBUTED IN THE HOPE THAT IT WILL BE USEFUL, BUT WITHOUT ANY WARRANTY, WITHOUT EVEN THE IMPLIED WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. SEE THE APPLICABLE LICENSES FOR MORE DETAILS.
|
| 7 |
+
|
| 8 |
+
Copyright Notice and License Texts
|
| 9 |
+
|
| 10 |
+
Software: transformers 4.52.4
|
| 11 |
+
Copyright notice:
|
| 12 |
+
Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
|
| 13 |
+
|
| 14 |
+
License Text:
|
| 15 |
+
----------------------------------------
|
| 16 |
+
|
| 17 |
+
Apache License
|
| 18 |
+
Version 2.0, January 2004
|
| 19 |
+
http://www.apache.org/licenses/
|
| 20 |
+
|
| 21 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 22 |
+
|
| 23 |
+
1. Definitions.
|
| 24 |
+
|
| 25 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 26 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 27 |
+
|
| 28 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 29 |
+
the copyright owner that is granting the License.
|
| 30 |
+
|
| 31 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 32 |
+
other entities that control, are controlled by, or are under common
|
| 33 |
+
control with that entity. For the purposes of this definition,
|
| 34 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 35 |
+
direction or management of such entity, whether by contract or
|
| 36 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 37 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 38 |
+
|
| 39 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 40 |
+
exercising permissions granted by this License.
|
| 41 |
+
|
| 42 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 43 |
+
including but not limited to software source code, documentation
|
| 44 |
+
source, and configuration files.
|
| 45 |
+
|
| 46 |
+
"Object" form shall mean any form resulting from mechanical
|
| 47 |
+
transformation or translation of a Source form, including but
|
| 48 |
+
not limited to compiled object code, generated documentation,
|
| 49 |
+
and conversions to other media types.
|
| 50 |
+
|
| 51 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 52 |
+
Object form, made available under the License, as indicated by a
|
| 53 |
+
copyright notice that is included in or attached to the work
|
| 54 |
+
(an example is provided in the Appendix below).
|
| 55 |
+
|
| 56 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 57 |
+
form, that is based on (or derived from) the Work and for which the
|
| 58 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 59 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 60 |
+
of this License, Derivative Works shall not include works that remain
|
| 61 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 62 |
+
the Work and Derivative Works thereof.
|
| 63 |
+
|
| 64 |
+
"Contribution" shall mean any work of authorship, including
|
| 65 |
+
the original version of the Work and any modifications or additions
|
| 66 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 67 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 68 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 69 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 70 |
+
means any form of electronic, verbal, or written communication sent
|
| 71 |
+
to the Licensor or its representatives, including but not limited to
|
| 72 |
+
communication on electronic mailing lists, source code control systems,
|
| 73 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 74 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 75 |
+
excluding communication that is conspicuously marked or otherwise
|
| 76 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 77 |
+
|
| 78 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 79 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 80 |
+
subsequently incorporated within the Work.
|
| 81 |
+
|
| 82 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 83 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 84 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 85 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 86 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 87 |
+
Work and such Derivative Works in Source or Object form.
|
| 88 |
+
|
| 89 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 90 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 91 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 92 |
+
(except as stated in this section) patent license to make, have made,
|
| 93 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 94 |
+
where such license applies only to those patent claims licensable
|
| 95 |
+
by such Contributor that are necessarily infringed by their
|
| 96 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 97 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 98 |
+
institute patent litigation against any entity (including a
|
| 99 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 100 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 101 |
+
or contributory patent infringement, then any patent licenses
|
| 102 |
+
granted to You under this License for that Work shall terminate
|
| 103 |
+
as of the date such litigation is filed.
|
| 104 |
+
|
| 105 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 106 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 107 |
+
modifications, and in Source or Object form, provided that You
|
| 108 |
+
meet the following conditions:
|
| 109 |
+
|
| 110 |
+
(a) You must give any other recipients of the Work or
|
| 111 |
+
Derivative Works a copy of this License; and
|
| 112 |
+
|
| 113 |
+
(b) You must cause any modified files to carry prominent notices
|
| 114 |
+
stating that You changed the files; and
|
| 115 |
+
|
| 116 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 117 |
+
that You distribute, all copyright, patent, trademark, and
|
| 118 |
+
attribution notices from the Source form of the Work,
|
| 119 |
+
excluding those notices that do not pertain to any part of
|
| 120 |
+
the Derivative Works; and
|
| 121 |
+
|
| 122 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 123 |
+
distribution, then any Derivative Works that You distribute must
|
| 124 |
+
include a readable copy of the attribution notices contained
|
| 125 |
+
within such NOTICE file, excluding those notices that do not
|
| 126 |
+
pertain to any part of the Derivative Works, in at least one
|
| 127 |
+
of the following places: within a NOTICE text file distributed
|
| 128 |
+
as part of the Derivative Works; within the Source form or
|
| 129 |
+
documentation, if provided along with the Derivative Works; or,
|
| 130 |
+
within a display generated by the Derivative Works, if and
|
| 131 |
+
wherever such third-party notices normally appear. The contents
|
| 132 |
+
of the NOTICE file are for informational purposes only and
|
| 133 |
+
do not modify the License. You may add Your own attribution
|
| 134 |
+
notices within Derivative Works that You distribute, alongside
|
| 135 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 136 |
+
that such additional attribution notices cannot be construed
|
| 137 |
+
as modifying the License.
|
| 138 |
+
|
| 139 |
+
You may add Your own copyright statement to Your modifications and
|
| 140 |
+
may provide additional or different license terms and conditions
|
| 141 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 142 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 143 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 144 |
+
the conditions stated in this License.
|
| 145 |
+
|
| 146 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 147 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 148 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 149 |
+
this License, without any additional terms or conditions.
|
| 150 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 151 |
+
the terms of any separate license agreement you may have executed
|
| 152 |
+
with Licensor regarding such Contributions.
|
| 153 |
+
|
| 154 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 155 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 156 |
+
except as required for reasonable and customary use in describing the
|
| 157 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 158 |
+
|
| 159 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 160 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 161 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 162 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 163 |
+
implied, including, without limitation, any warranties or conditions
|
| 164 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 165 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 166 |
+
appropriateness of using or redistributing the Work and assume any
|
| 167 |
+
risks associated with Your exercise of permissions under this License.
|
| 168 |
+
|
| 169 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 170 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 171 |
+
unless required by applicable law (such as deliberate and grossly
|
| 172 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 173 |
+
liable to You for damages, including any direct, indirect, special,
|
| 174 |
+
incidental, or consequential damages of any character arising as a
|
| 175 |
+
result of this License or out of the use or inability to use the
|
| 176 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 177 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 178 |
+
other commercial damages or losses), even if such Contributor
|
| 179 |
+
has been advised of the possibility of such damages.
|
| 180 |
+
|
| 181 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 182 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 183 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 184 |
+
or other liability obligations and/or rights consistent with this
|
| 185 |
+
License. However, in accepting such obligations, You may act only
|
| 186 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 187 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 188 |
+
defend, and hold each Contributor harmless for any liability
|
| 189 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 190 |
+
of your accepting any such warranty or additional liability.
|
| 191 |
+
|
| 192 |
+
END OF TERMS AND CONDITIONS
|
| 193 |
+
|
| 194 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 195 |
+
|
| 196 |
+
To apply the Apache License to your work, attach the following
|
| 197 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 198 |
+
replaced with your own identifying information. (Don't include
|
| 199 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 200 |
+
comment syntax for the file format. We also recommend that a
|
| 201 |
+
file or class name and description of purpose be included on the
|
| 202 |
+
same "printed page" as the copyright notice for easier
|
| 203 |
+
identification within third-party archives.
|
| 204 |
+
|
| 205 |
+
Copyright [yyyy] [name of copyright owner]
|
| 206 |
+
|
| 207 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 208 |
+
you may not use this file except in compliance with the License.
|
| 209 |
+
You may obtain a copy of the License at
|
| 210 |
+
|
| 211 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 212 |
+
|
| 213 |
+
Unless required by applicable law or agreed to in writing, software
|
| 214 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 215 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 216 |
+
See the License for the specific language governing permissions and
|
| 217 |
+
limitations under the License.
|
| 218 |
+
|
| 219 |
+
Software: vllm 0.9.1
|
| 220 |
+
Copyright notice:
|
| 221 |
+
Copyright 2025 The vLLM team.
|
| 222 |
+
|
| 223 |
+
License Text:
|
| 224 |
+
----------------------------------------
|
| 225 |
+
|
| 226 |
+
Apache License
|
| 227 |
+
Version 2.0, January 2004
|
| 228 |
+
http://www.apache.org/licenses/
|
| 229 |
+
|
| 230 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 231 |
+
|
| 232 |
+
1. Definitions.
|
| 233 |
+
|
| 234 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 235 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 236 |
+
|
| 237 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 238 |
+
the copyright owner that is granting the License.
|
| 239 |
+
|
| 240 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 241 |
+
other entities that control, are controlled by, or are under common
|
| 242 |
+
control with that entity. For the purposes of this definition,
|
| 243 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 244 |
+
direction or management of such entity, whether by contract or
|
| 245 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 246 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 247 |
+
|
| 248 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 249 |
+
exercising permissions granted by this License.
|
| 250 |
+
|
| 251 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 252 |
+
including but not limited to software source code, documentation
|
| 253 |
+
source, and configuration files.
|
| 254 |
+
|
| 255 |
+
"Object" form shall mean any form resulting from mechanical
|
| 256 |
+
transformation or translation of a Source form, including but
|
| 257 |
+
not limited to compiled object code, generated documentation,
|
| 258 |
+
and conversions to other media types.
|
| 259 |
+
|
| 260 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 261 |
+
Object form, made available under the License, as indicated by a
|
| 262 |
+
copyright notice that is included in or attached to the work
|
| 263 |
+
(an example is provided in the Appendix below).
|
| 264 |
+
|
| 265 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 266 |
+
form, that is based on (or derived from) the Work and for which the
|
| 267 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 268 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 269 |
+
of this License, Derivative Works shall not include works that remain
|
| 270 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 271 |
+
the Work and Derivative Works thereof.
|
| 272 |
+
|
| 273 |
+
"Contribution" shall mean any work of authorship, including
|
| 274 |
+
the original version of the Work and any modifications or additions
|
| 275 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 276 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 277 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 278 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 279 |
+
means any form of electronic, verbal, or written communication sent
|
| 280 |
+
to the Licensor or its representatives, including but not limited to
|
| 281 |
+
communication on electronic mailing lists, source code control systems,
|
| 282 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 283 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 284 |
+
excluding communication that is conspicuously marked or otherwise
|
| 285 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 286 |
+
|
| 287 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 288 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 289 |
+
subsequently incorporated within the Work.
|
| 290 |
+
|
| 291 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 292 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 293 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 294 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 295 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 296 |
+
Work and such Derivative Works in Source or Object form.
|
| 297 |
+
|
| 298 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 299 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 300 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 301 |
+
(except as stated in this section) patent license to make, have made,
|
| 302 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 303 |
+
where such license applies only to those patent claims licensable
|
| 304 |
+
by such Contributor that are necessarily infringed by their
|
| 305 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 306 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 307 |
+
institute patent litigation against any entity (including a
|
| 308 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 309 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 310 |
+
or contributory patent infringement, then any patent licenses
|
| 311 |
+
granted to You under this License for that Work shall terminate
|
| 312 |
+
as of the date such litigation is filed.
|
| 313 |
+
|
| 314 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 315 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 316 |
+
modifications, and in Source or Object form, provided that You
|
| 317 |
+
meet the following conditions:
|
| 318 |
+
|
| 319 |
+
(a) You must give any other recipients of the Work or
|
| 320 |
+
Derivative Works a copy of this License; and
|
| 321 |
+
|
| 322 |
+
(b) You must cause any modified files to carry prominent notices
|
| 323 |
+
stating that You changed the files; and
|
| 324 |
+
|
| 325 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 326 |
+
that You distribute, all copyright, patent, trademark, and
|
| 327 |
+
attribution notices from the Source form of the Work,
|
| 328 |
+
excluding those notices that do not pertain to any part of
|
| 329 |
+
the Derivative Works; and
|
| 330 |
+
|
| 331 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 332 |
+
distribution, then any Derivative Works that You distribute must
|
| 333 |
+
include a readable copy of the attribution notices contained
|
| 334 |
+
within such NOTICE file, excluding those notices that do not
|
| 335 |
+
pertain to any part of the Derivative Works, in at least one
|
| 336 |
+
of the following places: within a NOTICE text file distributed
|
| 337 |
+
as part of the Derivative Works; within the Source form or
|
| 338 |
+
documentation, if provided along with the Derivative Works; or,
|
| 339 |
+
within a display generated by the Derivative Works, if and
|
| 340 |
+
wherever such third-party notices normally appear. The contents
|
| 341 |
+
of the NOTICE file are for informational purposes only and
|
| 342 |
+
do not modify the License. You may add Your own attribution
|
| 343 |
+
notices within Derivative Works that You distribute, alongside
|
| 344 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 345 |
+
that such additional attribution notices cannot be construed
|
| 346 |
+
as modifying the License.
|
| 347 |
+
|
| 348 |
+
You may add Your own copyright statement to Your modifications and
|
| 349 |
+
may provide additional or different license terms and conditions
|
| 350 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 351 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 352 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 353 |
+
the conditions stated in this License.
|
| 354 |
+
|
| 355 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 356 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 357 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 358 |
+
this License, without any additional terms or conditions.
|
| 359 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 360 |
+
the terms of any separate license agreement you may have executed
|
| 361 |
+
with Licensor regarding such Contributions.
|
| 362 |
+
|
| 363 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 364 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 365 |
+
except as required for reasonable and customary use in describing the
|
| 366 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 367 |
+
|
| 368 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 369 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 370 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 371 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 372 |
+
implied, including, without limitation, any warranties or conditions
|
| 373 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 374 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 375 |
+
appropriateness of using or redistributing the Work and assume any
|
| 376 |
+
risks associated with Your exercise of permissions under this License.
|
| 377 |
+
|
| 378 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 379 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 380 |
+
unless required by applicable law (such as deliberate and grossly
|
| 381 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 382 |
+
liable to You for damages, including any direct, indirect, special,
|
| 383 |
+
incidental, or consequential damages of any character arising as a
|
| 384 |
+
result of this License or out of the use or inability to use the
|
| 385 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 386 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 387 |
+
other commercial damages or losses), even if such Contributor
|
| 388 |
+
has been advised of the possibility of such damages.
|
| 389 |
+
|
| 390 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 391 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 392 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 393 |
+
or other liability obligations and/or rights consistent with this
|
| 394 |
+
License. However, in accepting such obligations, You may act only
|
| 395 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 396 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 397 |
+
defend, and hold each Contributor harmless for any liability
|
| 398 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 399 |
+
of your accepting any such warranty or additional liability.
|
| 400 |
+
|
| 401 |
+
END OF TERMS AND CONDITIONS
|
| 402 |
+
|
| 403 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 404 |
+
|
| 405 |
+
To apply the Apache License to your work, attach the following
|
| 406 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 407 |
+
replaced with your own identifying information. (Don't include
|
| 408 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 409 |
+
comment syntax for the file format. We also recommend that a
|
| 410 |
+
file or class name and description of purpose be included on the
|
| 411 |
+
same "printed page" as the copyright notice for easier
|
| 412 |
+
identification within third-party archives.
|
| 413 |
+
|
| 414 |
+
Copyright [yyyy] [name of copyright owner]
|
| 415 |
+
|
| 416 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 417 |
+
you may not use this file except in compliance with the License.
|
| 418 |
+
You may obtain a copy of the License at
|
| 419 |
+
|
| 420 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 421 |
+
|
| 422 |
+
Unless required by applicable law or agreed to in writing, software
|
| 423 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 424 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 425 |
+
See the License for the specific language governing permissions and
|
| 426 |
+
limitations under the License.
|
| 427 |
+
|
| 428 |
+
Software: vllm-ascend 0.9.1
|
| 429 |
+
Copyright notice:
|
| 430 |
+
Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
| 431 |
+
|
| 432 |
+
License Text:
|
| 433 |
+
----------------------------------------
|
| 434 |
+
|
| 435 |
+
Apache License
|
| 436 |
+
Version 2.0, January 2004
|
| 437 |
+
http://www.apache.org/licenses/
|
| 438 |
+
|
| 439 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 440 |
+
|
| 441 |
+
1. Definitions.
|
| 442 |
+
|
| 443 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 444 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 445 |
+
|
| 446 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 447 |
+
the copyright owner that is granting the License.
|
| 448 |
+
|
| 449 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 450 |
+
other entities that control, are controlled by, or are under common
|
| 451 |
+
control with that entity. For the purposes of this definition,
|
| 452 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 453 |
+
direction or management of such entity, whether by contract or
|
| 454 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 455 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 456 |
+
|
| 457 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 458 |
+
exercising permissions granted by this License.
|
| 459 |
+
|
| 460 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 461 |
+
including but not limited to software source code, documentation
|
| 462 |
+
source, and configuration files.
|
| 463 |
+
|
| 464 |
+
"Object" form shall mean any form resulting from mechanical
|
| 465 |
+
transformation or translation of a Source form, including but
|
| 466 |
+
not limited to compiled object code, generated documentation,
|
| 467 |
+
and conversions to other media types.
|
| 468 |
+
|
| 469 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 470 |
+
Object form, made available under the License, as indicated by a
|
| 471 |
+
copyright notice that is included in or attached to the work
|
| 472 |
+
(an example is provided in the Appendix below).
|
| 473 |
+
|
| 474 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 475 |
+
form, that is based on (or derived from) the Work and for which the
|
| 476 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 477 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 478 |
+
of this License, Derivative Works shall not include works that remain
|
| 479 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 480 |
+
the Work and Derivative Works thereof.
|
| 481 |
+
|
| 482 |
+
"Contribution" shall mean any work of authorship, including
|
| 483 |
+
the original version of the Work and any modifications or additions
|
| 484 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 485 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 486 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 487 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 488 |
+
means any form of electronic, verbal, or written communication sent
|
| 489 |
+
to the Licensor or its representatives, including but not limited to
|
| 490 |
+
communication on electronic mailing lists, source code control systems,
|
| 491 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 492 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 493 |
+
excluding communication that is conspicuously marked or otherwise
|
| 494 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 495 |
+
|
| 496 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 497 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 498 |
+
subsequently incorporated within the Work.
|
| 499 |
+
|
| 500 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 501 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 502 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 503 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 504 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 505 |
+
Work and such Derivative Works in Source or Object form.
|
| 506 |
+
|
| 507 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 508 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 509 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 510 |
+
(except as stated in this section) patent license to make, have made,
|
| 511 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 512 |
+
where such license applies only to those patent claims licensable
|
| 513 |
+
by such Contributor that are necessarily infringed by their
|
| 514 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 515 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 516 |
+
institute patent litigation against any entity (including a
|
| 517 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 518 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 519 |
+
or contributory patent infringement, then any patent licenses
|
| 520 |
+
granted to You under this License for that Work shall terminate
|
| 521 |
+
as of the date such litigation is filed.
|
| 522 |
+
|
| 523 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 524 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 525 |
+
modifications, and in Source or Object form, provided that You
|
| 526 |
+
meet the following conditions:
|
| 527 |
+
|
| 528 |
+
(a) You must give any other recipients of the Work or
|
| 529 |
+
Derivative Works a copy of this License; and
|
| 530 |
+
|
| 531 |
+
(b) You must cause any modified files to carry prominent notices
|
| 532 |
+
stating that You changed the files; and
|
| 533 |
+
|
| 534 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 535 |
+
that You distribute, all copyright, patent, trademark, and
|
| 536 |
+
attribution notices from the Source form of the Work,
|
| 537 |
+
excluding those notices that do not pertain to any part of
|
| 538 |
+
the Derivative Works; and
|
| 539 |
+
|
| 540 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 541 |
+
distribution, then any Derivative Works that You distribute must
|
| 542 |
+
include a readable copy of the attribution notices contained
|
| 543 |
+
within such NOTICE file, excluding those notices that do not
|
| 544 |
+
pertain to any part of the Derivative Works, in at least one
|
| 545 |
+
of the following places: within a NOTICE text file distributed
|
| 546 |
+
as part of the Derivative Works; within the Source form or
|
| 547 |
+
documentation, if provided along with the Derivative Works; or,
|
| 548 |
+
within a display generated by the Derivative Works, if and
|
| 549 |
+
wherever such third-party notices normally appear. The contents
|
| 550 |
+
of the NOTICE file are for informational purposes only and
|
| 551 |
+
do not modify the License. You may add Your own attribution
|
| 552 |
+
notices within Derivative Works that You distribute, alongside
|
| 553 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 554 |
+
that such additional attribution notices cannot be construed
|
| 555 |
+
as modifying the License.
|
| 556 |
+
|
| 557 |
+
You may add Your own copyright statement to Your modifications and
|
| 558 |
+
may provide additional or different license terms and conditions
|
| 559 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 560 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 561 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 562 |
+
the conditions stated in this License.
|
| 563 |
+
|
| 564 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 565 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 566 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 567 |
+
this License, without any additional terms or conditions.
|
| 568 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 569 |
+
the terms of any separate license agreement you may have executed
|
| 570 |
+
with Licensor regarding such Contributions.
|
| 571 |
+
|
| 572 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 573 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 574 |
+
except as required for reasonable and customary use in describing the
|
| 575 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 576 |
+
|
| 577 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 578 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 579 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 580 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 581 |
+
implied, including, without limitation, any warranties or conditions
|
| 582 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 583 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 584 |
+
appropriateness of using or redistributing the Work and assume any
|
| 585 |
+
risks associated with Your exercise of permissions under this License.
|
| 586 |
+
|
| 587 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 588 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 589 |
+
unless required by applicable law (such as deliberate and grossly
|
| 590 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 591 |
+
liable to You for damages, including any direct, indirect, special,
|
| 592 |
+
incidental, or consequential damages of any character arising as a
|
| 593 |
+
result of this License or out of the use or inability to use the
|
| 594 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 595 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 596 |
+
other commercial damages or losses), even if such Contributor
|
| 597 |
+
has been advised of the possibility of such damages.
|
| 598 |
+
|
| 599 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 600 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 601 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 602 |
+
or other liability obligations and/or rights consistent with this
|
| 603 |
+
License. However, in accepting such obligations, You may act only
|
| 604 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 605 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 606 |
+
defend, and hold each Contributor harmless for any liability
|
| 607 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 608 |
+
of your accepting any such warranty or additional liability.
|
| 609 |
+
|
| 610 |
+
END OF TERMS AND CONDITIONS
|
| 611 |
+
|
| 612 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 613 |
+
|
| 614 |
+
To apply the Apache License to your work, attach the following
|
| 615 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 616 |
+
replaced with your own identifying information. (Don't include
|
| 617 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 618 |
+
comment syntax for the file format. We also recommend that a
|
| 619 |
+
file or class name and description of purpose be included on the
|
| 620 |
+
same "printed page" as the copyright notice for easier
|
| 621 |
+
identification within third-party archives.
|
| 622 |
+
|
| 623 |
+
Copyright [yyyy] [name of copyright owner]
|
| 624 |
+
|
| 625 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 626 |
+
you may not use this file except in compliance with the License.
|
| 627 |
+
You may obtain a copy of the License at
|
| 628 |
+
|
| 629 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 630 |
+
|
| 631 |
+
Unless required by applicable law or agreed to in writing, software
|
| 632 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 633 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 634 |
+
See the License for the specific language governing permissions and
|
| 635 |
+
limitations under the License.
|
README.md
CHANGED
|
@@ -1,5 +1,109 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# openPangu-VL-7B
|
| 2 |
+
中文 | [English](README_EN.md) | [技术报告](doc/technical_report.pdf)
|
| 3 |
+
|
| 4 |
+
## 1. 简介
|
| 5 |
+
|
| 6 |
+
openPangu-VL-7B 是基于昇腾 NPU ,基于openPangu-Embedded-7B-V1.1语言基模和openPangu-ViT-600M视觉编码器训练的高效多模态模型。openPangu-VL-7B 训练了约 3T tokens,具备通用视觉对话、文档理解、目标定位与计数、视频理解、视觉高阶推理等能力。该模型为快思考模型。
|
| 7 |
+
|
| 8 |
+
## 2. 模型架构
|
| 9 |
+
|
| 10 |
+
| | openPangu-VL-7B |
|
| 11 |
+
| :---------------------------: | :----------------: |
|
| 12 |
+
| **语言模型** | |
|
| 13 |
+
| Architecture | Dense |
|
| 14 |
+
| Parameters (Non-Embedding) | 7B |
|
| 15 |
+
| Number of Layers | 34 |
|
| 16 |
+
| Hidden Dimension | 12800 |
|
| 17 |
+
| Attention Mechanism | GQA |
|
| 18 |
+
| Number of Attention Heads | 32 for Q,8 for KV |
|
| 19 |
+
| Vocabulary Size | 153k |
|
| 20 |
+
| Context Length (Natively) | 128k |
|
| 21 |
+
| **视觉编码器** | |
|
| 22 |
+
| Architecture | 22 Window Attention + 4 Full Attention |
|
| 23 |
+
| Number of Layers | 26 |
|
| 24 |
+
| Attention Hidden Size | 1536 |
|
| 25 |
+
| FFN Hidden Size | 4608 |
|
| 26 |
+
| Number of Attention Heads | 16 |
|
| 27 |
+
| Parameters | 615M |
|
| 28 |
+
|
| 29 |
+
## 3. 测评结果
|
| 30 |
+
|
| 31 |
+
| 测评集 | openPangu-VL-7B |
|
| 32 |
+
| :------------: | :--------: |
|
| 33 |
+
| **通用视觉对话** | |
|
| 34 |
+
| MMBench V1.1 DEV | 86.5 |
|
| 35 |
+
| MMStar | 70.1 |
|
| 36 |
+
| RealWorldQA | 76.1 |
|
| 37 |
+
| AI2D | 84.7 |
|
| 38 |
+
| **光学符号识别/文档理解** | |
|
| 39 |
+
| OCRBench | 907 |
|
| 40 |
+
| TextVQA | 85.1 |
|
| 41 |
+
| DocVQA | 96.0 |
|
| 42 |
+
| ChartQA | 88.3 |
|
| 43 |
+
| CharXiv dq/rq | 83.9/54.3 |
|
| 44 |
+
| **视觉学科能力** | |
|
| 45 |
+
| MMMU | 65.2 |
|
| 46 |
+
| MMMU-Pro | 52.6 |
|
| 47 |
+
| MathVista | 75.0 |
|
| 48 |
+
| **目标定位/计数** | |
|
| 49 |
+
| RefCOCO-avg | 90.6 |
|
| 50 |
+
| ODinW-13 | 51.5 |
|
| 51 |
+
| CountBench | 96.1 |
|
| 52 |
+
| Point-Bench | 65.4 |
|
| 53 |
+
| **多图** | |
|
| 54 |
+
| BLINK | 63.3 |
|
| 55 |
+
| MUIRBench | 61.6 |
|
| 56 |
+
| **视频理解** | |
|
| 57 |
+
| MVBench | 74.0 |
|
| 58 |
+
| VideoMME (w/o sub) | 68.0 |
|
| 59 |
+
| MLVU | 76.9 |
|
| 60 |
+
| **语言能力** | |
|
| 61 |
+
| MMLU-Pro | 78.2 |
|
| 62 |
+
| MMLU-Redux | 87.3 |
|
| 63 |
+
| GPQA-Diamond | 65.2 |
|
| 64 |
+
| C-Eval | 83.2 |
|
| 65 |
+
| AIME25 | 36.5 |
|
| 66 |
+
| Math-500 | 89.4 |
|
| 67 |
+
| LiveCodeBenchV6 | 24.6 |
|
| 68 |
+
| MBPP+ | 68.5 |
|
| 69 |
+
| IFEval | 83.0 |
|
| 70 |
+
|
| 71 |
+
**注:** 评测使用**vllm-ascend部署推理,系统prompt为空**。一般而言,图片最小分辨率设置为2304\*28\*28能获得最优的测评效果。(OCRBench中的极小图OCR除外,建议设置为不大于64\*28\*28。)具体prompt和分辨率设置参见[技术报告](doc/technical_report.pdf)附录。
|
| 72 |
+
|
| 73 |
+
## 4. 部署和使用
|
| 74 |
+
|
| 75 |
+
### vllm-ascend部署(推荐)
|
| 76 |
+
- 使用vllm-ascend推理框架,参考[[vllm_ascend_for_openpangu_vl_7b](doc/vllm_ascend_for_openpangu_vl_7b.md)]进行服务部署。
|
| 77 |
+
|
| 78 |
+
- 完成推理服务部署后,使用此脚本测试是否部署成功。
|
| 79 |
+
```bash
|
| 80 |
+
cd inference/vllm_ascend/examples; python quick_start.py
|
| 81 |
+
```
|
| 82 |
+
|
| 83 |
+
### 直接推理
|
| 84 |
+
环境配置:
|
| 85 |
+
- python==3.10
|
| 86 |
+
- CANN==8.1.RC1
|
| 87 |
+
```bash
|
| 88 |
+
cd inference; pip install -r requirements.txt
|
| 89 |
+
```
|
| 90 |
+
|
| 91 |
+
推理:
|
| 92 |
+
```bash
|
| 93 |
+
cd inference; python generate.py
|
| 94 |
+
```
|
| 95 |
+
|
| 96 |
+
### 能力展示
|
| 97 |
+
- 更多推理样例和能力展示,请参见`cookbooks`。
|
| 98 |
+
|
| 99 |
+
## 5. 模型许可证
|
| 100 |
+
除文件中对开源许可证另有约定外,openPangu-VL-7B 模型根据 OPENPANGU MODEL LICENSE AGREEMENT VERSION 1.0 授权,旨在允许使用并促进人工智能技术的进一步发展。有关详细信息,请参阅模型存储库根目录中的 [LICENSE](LICENSE) 文件。
|
| 101 |
+
|
| 102 |
+
## 6. 免责声明
|
| 103 |
+
由于 openPangu-VL-7B (“模型”)所依赖的技术固有的限制,以及人工智能生成的内容是由盘古自动生成的,华为无法对以下事项做出任何保证:
|
| 104 |
+
- 该模型的输出通过AI算法自动生成,不能排除某些信息可能存在缺陷、不合理或引起不适的可能性,生成的内容不代表华为的态度或立场;
|
| 105 |
+
- 无法保证该模型100%准确、可靠、功能齐全、及时、安全、无错误、不间断、持续稳定或无任何故障;
|
| 106 |
+
- 该模型的输出内容不构成任何建议或决策,也不保证生成的内容的真实性、完整性、准确性、及时性、合法性、功能性或实用性。生成的内容不能替代医疗、法律等领域的专业人士回答您的问题。生成的内容仅供参考,不代表华为的任何态度、立场或观点。您需要根据实际情况做出独立判断,华为不承担任何责任。
|
| 107 |
+
|
| 108 |
+
## 7. 反馈
|
| 109 |
+
如果有任何意见和建议,请提交issue或联系[openPangu@huawei.com](url)。
|
README_EN.md
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# openPangu-VL-7B
|
| 2 |
+
|
| 3 |
+
[中文](README.md) | English | [Technical Report](doc/technical_report.pdf)
|
| 4 |
+
|
| 5 |
+
## 1. Introduction
|
| 6 |
+
The openPangu-VL-7B is an efficient multimodal model based on the Ascend NPU, trained using the openPangu-Embedded-7B-V1.1 base language model and the openPangu-ViT-600M vision encoder. The openPangu-VL-7B has been trained on approximately 3T tokens and possesses capabilities such as general Visual Question Answering, chart and document understanding, object grounding and counting, video understanding, and advanced visual reasoning. This model is designed for fast thinking mode.
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
## 2. Model Architecture
|
| 10 |
+
|
| 11 |
+
| | openPangu-VL-7B |
|
| 12 |
+
| :---------------------------: | :----------------: |
|
| 13 |
+
| **LLM** | |
|
| 14 |
+
| Architecture | Dense |
|
| 15 |
+
| Parameters (Non-Embedding) | 7B |
|
| 16 |
+
| Number of Layers | 34 |
|
| 17 |
+
| Hidden Dimension | 12800 |
|
| 18 |
+
| Attention Mechanism | GQA |
|
| 19 |
+
| Number of Attention Heads | 32 for Q,8 for KV |
|
| 20 |
+
| Vocabulary Size | 153k |
|
| 21 |
+
| Context Length (Natively) | 128k |
|
| 22 |
+
| **Vision Encoder** | |
|
| 23 |
+
| Architecture | 22 Window Attention + 4 Full Attention |
|
| 24 |
+
| Number of Layers | 26 |
|
| 25 |
+
| Attention Hidden Size | 1536 |
|
| 26 |
+
| FFN Hidden Size | 4608 |
|
| 27 |
+
| Number of Attention Heads | 16 |
|
| 28 |
+
| Parameters | 615M |
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
## 3. Results
|
| 32 |
+
|
| 33 |
+
| Benchmark | openPangu-VL-7B |
|
| 34 |
+
| :------------: | :--------: |
|
| 35 |
+
| **General VQA** | |
|
| 36 |
+
| MMBench V1.1 DEV | 86.5 |
|
| 37 |
+
| MMStar | 70.1 |
|
| 38 |
+
| RealWorldQA | 76.1 |
|
| 39 |
+
| AI2D | 84.7 |
|
| 40 |
+
| **OCR & Chart/Document Understanding** | |
|
| 41 |
+
| OCRBench | 907 |
|
| 42 |
+
| TextVQA | 85.1 |
|
| 43 |
+
| DocVQA | 96.0 |
|
| 44 |
+
| ChartQA | 88.3 |
|
| 45 |
+
| CharXiv dq/rq | 83.9/54.3 |
|
| 46 |
+
| **STEM** | |
|
| 47 |
+
| MMMU | 65.2 |
|
| 48 |
+
| MMMU-Pro | 52.6 |
|
| 49 |
+
| MathVista | 75.0 |
|
| 50 |
+
| **Object Grounding/Counting** | |
|
| 51 |
+
| RefCOCO-avg | 90.6 |
|
| 52 |
+
| ODinW-13 | 51.5 |
|
| 53 |
+
| CountBench | 96.1 |
|
| 54 |
+
| Point-Bench | 65.4 |
|
| 55 |
+
| **Multi-Image** | |
|
| 56 |
+
| BLINK | 63.3 |
|
| 57 |
+
| MUIRBench | 61.6 |
|
| 58 |
+
| **Video Understanding** | |
|
| 59 |
+
| MVBench | 74.0 |
|
| 60 |
+
| VideoMME (w/o sub) | 68.0 |
|
| 61 |
+
| MLVU | 76.9 |
|
| 62 |
+
| **Text-Centric Benchmark** | |
|
| 63 |
+
| MMLU-Pro | 78.2 |
|
| 64 |
+
| MMLU-Redux | 87.3 |
|
| 65 |
+
| GPQA-Diamond | 65.2 |
|
| 66 |
+
| C-Eval | 83.2 |
|
| 67 |
+
| AIME25 | 36.5 |
|
| 68 |
+
| Math-500 | 89.4 |
|
| 69 |
+
| LiveCodeBenchV6 | 24.6 |
|
| 70 |
+
| MBPP+ | 68.5 |
|
| 71 |
+
| IFEval | 83.0 |
|
| 72 |
+
|
| 73 |
+
**Note:** The evaluation is conducted with **vllm-ascend deploy** and **the system prompt remains empty**. Generally, setting the minimum resolution to 2304\*28\*28 can yield the best evaluation results. (Except for the extremely small image OCR in OCRBench, it is recommended to set the resolution to no more than 64\*28\*28.) Detailed settings for different benchmarks can be found in [Technical Report](doc/technical_report.pdf).
|
| 74 |
+
|
| 75 |
+
## 4. Deployment
|
| 76 |
+
|
| 77 |
+
### vllm-ascend deploy (recommended)
|
| 78 |
+
- vllm-ascend:please refer to [[vllm_ascend_for_openpangu_vl_7b](doc/vllm_ascend_for_openpangu_vl_7b_EN.md)] to deploy the inference serving.
|
| 79 |
+
|
| 80 |
+
- After finish deploying, you can test the api with the following script.
|
| 81 |
+
```bash
|
| 82 |
+
cd inference/vllm_ascend/examples; python quick_start.py
|
| 83 |
+
```
|
| 84 |
+
|
| 85 |
+
### Direct inference
|
| 86 |
+
|
| 87 |
+
Environment:
|
| 88 |
+
- python==3.10
|
| 89 |
+
- CANN==8.1.RC1
|
| 90 |
+
```bash
|
| 91 |
+
cd inference; pip install -r requirements.txt
|
| 92 |
+
```
|
| 93 |
+
|
| 94 |
+
Inference:
|
| 95 |
+
```bash
|
| 96 |
+
cd inference; python generate.py
|
| 97 |
+
```
|
| 98 |
+
|
| 99 |
+
### Model abilities
|
| 100 |
+
- For more examples and demomstrations of model abilities, please refer to `cookbooks`.
|
| 101 |
+
|
| 102 |
+
## 5. Model License
|
| 103 |
+
Unless otherwise noted, the openPangu-VL-7B model is licensed under the terms and conditions of OPENPANGU MODEL LICENSE AGREEMENT VERSION 1.0, which is intended to be used permissively and enable the further development of artificial intelligence technologies. Please refer to the [LICENSE](LICENSE) file located in the root directory of the model repository for details.
|
| 104 |
+
|
| 105 |
+
## 6. Disclaimer
|
| 106 |
+
Due to the technical limitations inherent in the technology on which the openPangu-VL-7B (“Model”) relies and the fact that the artificial intelligence generated content is automatically produced by Model, Huawei cannot make any guarantees regarding the following matters:
|
| 107 |
+
|
| 108 |
+
- The output of this Model is automatically generated via AI algorithms, it does not rule out the possibility that some of the information may be flawed, unreasonable, or cause discomfort, and the generated content does not represent Huawei's attitude or standpoint;
|
| 109 |
+
- There is no guarantee that this Model is 100% accurate, reliable, functional, timely, secure and safety, error-free, uninterrupted, continuously stable, or free of any faults;
|
| 110 |
+
- The output of this Model does not constitute any advices or decisions for you, and it does not guarantee the authenticity, completeness, accuracy, timeliness, legality, functionality, or practicality of the generated content. The generated content cannot replace professionals in medical, legal, and other fields in answering your questions. The generated content is for your reference only and does not represent any attitude, standpoint, or position of Huawei. You need to make independent judgments based on your actual situation, and Huawei does not assume any responsibilities.
|
| 111 |
+
|
| 112 |
+
## 7. Contact
|
| 113 |
+
If you have any question, please raise an issue or contact us at [openPangu@huawei.com](url).
|
chat_template.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"chat_template": "{% macro role_name(role) -%}{%- if role == 'assistant' -%}助手{%- elif role == 'system' -%}系统{%- elif role == 'user' -%}用户{%- elif role == 'tool' -%}工具{%- elif role == 'function' -%}方法{%- else -%}{{ role }}{%- endif -%}{%- endmacro %}{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message.role != 'system' %}<s>[unused9]系统:[unused10]{% endif %}{% if loop.first and message.role == 'system' %}<s>{% endif %}[unused9]{{ role_name(message.role) }}:{% if message.content is string %}{{ message.content }}[unused10]{% else %}{% for content in message.content %}{% set ctype = content.type|default('') %}{% set is_img = (ctype == 'image') or ('image' in content) or ('image_url' in content) %}{% set is_vid = (ctype == 'video') or ('video' in content) %}{% if is_img %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}图片 {{ image_count.value }}: {% endif %}[unused18][unused19][unused20]{% elif is_vid %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}视频 {{ video_count.value }}: {% endif %}[unused18][unused32][unused20]{% elif content.text is defined %}{{ content.text }}{% endif %}{% endfor %}[unused10]{% endif %}{% endfor %}{% if add_generation_prompt %}[unused9]助手:{% endif %}"
|
| 3 |
+
}
|
config.json
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"OpenPanguVLForConditionalGeneration"
|
| 4 |
+
],
|
| 5 |
+
"bias": true,
|
| 6 |
+
"auto_map": {
|
| 7 |
+
"AutoConfig": "configuration_openpangu_vl.OpenPanguVLConfig",
|
| 8 |
+
"AutoModel": "modeling_openpangu_vl.OpenPanguVLModel",
|
| 9 |
+
"AutoModelForCausalLM": "modeling_openpangu_vl.OpenPanguVL"
|
| 10 |
+
},
|
| 11 |
+
"attention_dropout": 0.0,
|
| 12 |
+
"bos_token_id": 1,
|
| 13 |
+
"eos_token_id": 45892,
|
| 14 |
+
"vision_start_token_id": 45986,
|
| 15 |
+
"vision_end_token_id": 46007,
|
| 16 |
+
"image_token_id": 46005,
|
| 17 |
+
"video_token_id": 144208,
|
| 18 |
+
"hidden_act": "silu",
|
| 19 |
+
"hidden_size": 4096,
|
| 20 |
+
"initializer_range": 0.02,
|
| 21 |
+
"intermediate_size": 12800,
|
| 22 |
+
"max_position_embeddings": 131072,
|
| 23 |
+
"max_window_layers": 34,
|
| 24 |
+
"model_type": "openpangu_vl",
|
| 25 |
+
"num_attention_heads": 32,
|
| 26 |
+
"num_hidden_layers": 34,
|
| 27 |
+
"num_key_value_heads": 8,
|
| 28 |
+
"rms_norm_eps": 1e-05,
|
| 29 |
+
"rope_theta": 64000000.0,
|
| 30 |
+
|
| 31 |
+
"tie_word_embeddings": false,
|
| 32 |
+
"torch_dtype": "bfloat16",
|
| 33 |
+
"transformers_version": "4.52.4",
|
| 34 |
+
"use_cache": true,
|
| 35 |
+
"use_sliding_window": false,
|
| 36 |
+
"vision_config": {
|
| 37 |
+
"rms_norm_eps": 1e-06,
|
| 38 |
+
"depth": 26,
|
| 39 |
+
"hidden_act": "gelu",
|
| 40 |
+
"hidden_size": 1536,
|
| 41 |
+
"intermediate_size": 4608,
|
| 42 |
+
"num_heads": 16,
|
| 43 |
+
"in_chans": 3,
|
| 44 |
+
"out_hidden_size": 3584,
|
| 45 |
+
"patch_size": 14,
|
| 46 |
+
"spatial_merge_size": 2,
|
| 47 |
+
"spatial_patch_size": 14,
|
| 48 |
+
"window_size": 112,
|
| 49 |
+
"fullatt_block_indexes": [
|
| 50 |
+
5,
|
| 51 |
+
12,
|
| 52 |
+
19,
|
| 53 |
+
25
|
| 54 |
+
],
|
| 55 |
+
"tokens_per_second": 2,
|
| 56 |
+
"temporal_patch_size": 2
|
| 57 |
+
},
|
| 58 |
+
"rope_scaling": {
|
| 59 |
+
"rope_type": "default",
|
| 60 |
+
"type": "default",
|
| 61 |
+
"mrope_section": [
|
| 62 |
+
10,
|
| 63 |
+
27,
|
| 64 |
+
27
|
| 65 |
+
],
|
| 66 |
+
"mrope_interleaved": true
|
| 67 |
+
},
|
| 68 |
+
"vocab_size": 153376
|
| 69 |
+
}
|
configuration_openpangu_vl.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
| 3 |
+
# Copyright 2025 The HuggingFace Inc. team.
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 17 |
+
from transformers.modeling_rope_utils import rope_config_validation
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class OpenPanguVLVisionConfig(PretrainedConfig):
|
| 21 |
+
model_type = "openpangu_vl"
|
| 22 |
+
base_config_key = "vision_config"
|
| 23 |
+
|
| 24 |
+
def __init__(
|
| 25 |
+
self,
|
| 26 |
+
depth=26,
|
| 27 |
+
num_heads=16,
|
| 28 |
+
rms_norm_eps=1e-06,
|
| 29 |
+
hidden_size=1536,
|
| 30 |
+
hidden_act="gelu",
|
| 31 |
+
intermediate_size=4608,
|
| 32 |
+
out_hidden_size=3584,
|
| 33 |
+
in_chans=3,
|
| 34 |
+
patch_size=14,
|
| 35 |
+
spatial_merge_size=2,
|
| 36 |
+
window_size=112,
|
| 37 |
+
fullatt_block_indexes=[5, 12, 19, 25],
|
| 38 |
+
tokens_per_second=2,
|
| 39 |
+
temporal_patch_size=2,
|
| 40 |
+
**kwargs,
|
| 41 |
+
):
|
| 42 |
+
super().__init__(**kwargs)
|
| 43 |
+
self.depth = depth
|
| 44 |
+
self.num_heads = num_heads
|
| 45 |
+
self.rms_norm_eps = rms_norm_eps
|
| 46 |
+
self.hidden_size = hidden_size
|
| 47 |
+
self.hidden_act = hidden_act
|
| 48 |
+
self.intermediate_size = intermediate_size
|
| 49 |
+
self.out_hidden_size = out_hidden_size
|
| 50 |
+
self.in_channels = in_chans
|
| 51 |
+
self.patch_size = patch_size
|
| 52 |
+
self.spatial_merge_size = spatial_merge_size
|
| 53 |
+
self.window_size = window_size
|
| 54 |
+
self.fullatt_block_indexes = fullatt_block_indexes
|
| 55 |
+
self.tokens_per_second = tokens_per_second
|
| 56 |
+
self.temporal_patch_size = temporal_patch_size
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class OpenPanguVLTextConfig(PretrainedConfig):
|
| 60 |
+
|
| 61 |
+
model_type = "openpangu_vl_text"
|
| 62 |
+
base_config_key = "text_config"
|
| 63 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
| 64 |
+
|
| 65 |
+
def __init__(
|
| 66 |
+
self,
|
| 67 |
+
num_hidden_layers=34,
|
| 68 |
+
num_attention_heads=32,
|
| 69 |
+
num_key_value_heads=8,
|
| 70 |
+
rms_norm_eps=1e-05,
|
| 71 |
+
hidden_size=4096,
|
| 72 |
+
hidden_act="silu",
|
| 73 |
+
intermediate_size=12800,
|
| 74 |
+
initializer_range=0.02,
|
| 75 |
+
tie_word_embeddings=False,
|
| 76 |
+
use_sliding_window=False,
|
| 77 |
+
sliding_window=None,
|
| 78 |
+
max_window_layers=80,
|
| 79 |
+
vocab_size=153376,
|
| 80 |
+
max_position_embeddings=32768,
|
| 81 |
+
use_cache=True,
|
| 82 |
+
rope_theta=64000000.0,
|
| 83 |
+
attention_dropout=0.0,
|
| 84 |
+
rope_scaling=None,
|
| 85 |
+
image_token_id=None,
|
| 86 |
+
video_token_id=None,
|
| 87 |
+
**kwargs,
|
| 88 |
+
):
|
| 89 |
+
self.num_hidden_layers = num_hidden_layers
|
| 90 |
+
self.num_attention_heads = num_attention_heads
|
| 91 |
+
self.num_key_value_heads = num_key_value_heads
|
| 92 |
+
self.rms_norm_eps = rms_norm_eps
|
| 93 |
+
self.hidden_size = hidden_size
|
| 94 |
+
self.hidden_act = hidden_act
|
| 95 |
+
self.intermediate_size = intermediate_size
|
| 96 |
+
self.initializer_range = initializer_range
|
| 97 |
+
self.use_sliding_window = use_sliding_window
|
| 98 |
+
self.sliding_window = sliding_window
|
| 99 |
+
self.max_window_layers = max_window_layers
|
| 100 |
+
self.vocab_size = vocab_size
|
| 101 |
+
self.max_position_embeddings = max_position_embeddings
|
| 102 |
+
self.use_cache = use_cache
|
| 103 |
+
self.rope_theta = rope_theta
|
| 104 |
+
self.attention_dropout = attention_dropout
|
| 105 |
+
self.rope_scaling = rope_scaling
|
| 106 |
+
|
| 107 |
+
if self.rope_scaling is not None and "type" in self.rope_scaling:
|
| 108 |
+
if self.rope_scaling["type"] == "mrope":
|
| 109 |
+
self.rope_scaling["type"] = "default"
|
| 110 |
+
self.rope_scaling["rope_type"] = self.rope_scaling["type"]
|
| 111 |
+
rope_config_validation(self, ignore_keys={"mrope_section", "mrope_interleaved"})
|
| 112 |
+
self.image_token_id = image_token_id
|
| 113 |
+
self.video_token_id = video_token_id
|
| 114 |
+
|
| 115 |
+
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class OpenPanguVLConfig(PretrainedConfig):
|
| 119 |
+
|
| 120 |
+
model_type = "openpangu_vl"
|
| 121 |
+
sub_configs = {"vision_config": OpenPanguVLVisionConfig, "text_config": OpenPanguVLTextConfig}
|
| 122 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
| 123 |
+
|
| 124 |
+
def __init__(
|
| 125 |
+
self,
|
| 126 |
+
text_config=None,
|
| 127 |
+
vision_config=None,
|
| 128 |
+
image_token_id=46005,
|
| 129 |
+
video_token_id=144208,
|
| 130 |
+
**kwargs,
|
| 131 |
+
):
|
| 132 |
+
if isinstance(vision_config, dict):
|
| 133 |
+
self.vision_config = self.sub_configs["vision_config"](**vision_config)
|
| 134 |
+
elif vision_config is None:
|
| 135 |
+
self.vision_config = self.sub_configs["vision_config"]()
|
| 136 |
+
|
| 137 |
+
if isinstance(text_config, dict):
|
| 138 |
+
self.text_config = self.sub_configs["text_config"](**text_config)
|
| 139 |
+
elif text_config is None:
|
| 140 |
+
self.text_config = self.sub_configs["text_config"](**kwargs)
|
| 141 |
+
|
| 142 |
+
self.image_token_id = image_token_id
|
| 143 |
+
self.video_token_id = video_token_id
|
| 144 |
+
|
| 145 |
+
super().__init__(**kwargs)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
__all__ = ["OpenPanguVLConfig", "OpenPanguVLTextConfig"]
|
cookbooks/assets/grounding/depth_1.jpg
ADDED
|
Git LFS Details
|
cookbooks/assets/grounding/depth_2.jpg
ADDED
|
Git LFS Details
|
cookbooks/assets/grounding/dinner.jpg
ADDED
|
Git LFS Details
|
cookbooks/assets/grounding/macaron.jpg
ADDED
|
Git LFS Details
|
cookbooks/assets/grounding/peoples.jpg
ADDED
|
Git LFS Details
|
cookbooks/assets/grounding/test_example_point_01.png
ADDED
|
Git LFS Details
|
cookbooks/assets/grounding/test_example_point_02.png
ADDED
|
Git LFS Details
|
cookbooks/assets/grounding/tools.jpg
ADDED
|
cookbooks/assets/ocr/example1_1.png
ADDED
|
Git LFS Details
|
cookbooks/assets/ocr/example1_2.png
ADDED
|
Git LFS Details
|
cookbooks/assets/ocr/example2.png
ADDED
|
Git LFS Details
|
cookbooks/assets/ocr/example3.jpg
ADDED
|
cookbooks/assets/ocr/example4.png
ADDED
|
Git LFS Details
|
cookbooks/assets/ocr/example5.png
ADDED
|
Git LFS Details
|
cookbooks/assets/ocr/example6.png
ADDED
|
Git LFS Details
|
cookbooks/assets/ocr/example7.png
ADDED
|
Git LFS Details
|
cookbooks/assets/ocr/example7_2.png
ADDED
|
Git LFS Details
|
cookbooks/assets/reasoning/biology.png
ADDED
|
cookbooks/assets/reasoning/chemistry.png
ADDED
|
Git LFS Details
|
cookbooks/assets/reasoning/geometry.png
ADDED
|
Git LFS Details
|
cookbooks/assets/reasoning/logical.png
ADDED
|
cookbooks/assets/video/example_video_1.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7a39adc894b3e3b3a2031ea2d0f957ddfad34362b76397321b11bfefa62c40de
|
| 3 |
+
size 12700201
|
cookbooks/assets/video/example_video_2.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2a90a370b6a768b10c3b003faf2b36c0e3fb6a6c6afecd2de7f60678351d7bed
|
| 3 |
+
size 14892846
|
cookbooks/assets/video/example_video_3.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a005fdc45c1ea3e8fccb151a4275bb9bde53a0ef7034ef3f52688dbc22b782d3
|
| 3 |
+
size 26385309
|
cookbooks/assets/video/example_video_4.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cc2a87bf37fee1c608eb81856d0201ff3716de4e6ce972c8dac3c468d46f8a5c
|
| 3 |
+
size 8082173
|
cookbooks/assets/video/example_video_5.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0613a6189fb546dbc35085fb48efb909a30898bb674476391a6179a202dbeef7
|
| 3 |
+
size 4770828
|
cookbooks/assets/video/example_video_6.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2bd56ba8ef4d1eb1e9d3744bbe2d97ff962126cfc9586796a0447e54085a00ca
|
| 3 |
+
size 19854080
|
cookbooks/assets/video/example_video_7.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f3020815b636cbffde7a3504d6f9d8589d79e80a5f839dfb6e65a4a76422c7cc
|
| 3 |
+
size 23466837
|
cookbooks/assets/video/example_video_8.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cf41e2c4a51111ddcc0ac04c46804932d9ae0c35d0bdd37c64233e9c171cf961
|
| 3 |
+
size 25891046
|
cookbooks/assets/video/example_video_9.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:121f77f79e2898bba71fa0a51810567e9ea4f86364d71acd7ed3c3672dcd0ff4
|
| 3 |
+
size 16076994
|
cookbooks/grounding.ipynb
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:68a1d3e9b01755f894b65b86393200858304fea466a1ada8ac09cd11ec1414b8
|
| 3 |
+
size 10670893
|
cookbooks/ocr.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cookbooks/reasoning.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
cookbooks/video.ipynb
ADDED
|
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"id": "96bbbe85-9ff1-436c-b3ed-bf01b891c3e8",
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"source": [
|
| 8 |
+
"### Video understanding with openPangu-VL-7B"
|
| 9 |
+
]
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"cell_type": "markdown",
|
| 13 |
+
"id": "0dc96c01-6327-4495-9c2d-02b525a84cf2",
|
| 14 |
+
"metadata": {},
|
| 15 |
+
"source": [
|
| 16 |
+
"#### Inference with api"
|
| 17 |
+
]
|
| 18 |
+
},
|
| 19 |
+
{
|
| 20 |
+
"cell_type": "code",
|
| 21 |
+
"execution_count": 1,
|
| 22 |
+
"id": "d6a397f0-8ca9-4ad3-b2c7-30c58a52350e",
|
| 23 |
+
"metadata": {},
|
| 24 |
+
"outputs": [],
|
| 25 |
+
"source": [
|
| 26 |
+
"import json\n",
|
| 27 |
+
"import base64\n",
|
| 28 |
+
"import os\n",
|
| 29 |
+
"import requests\n",
|
| 30 |
+
"import json\n",
|
| 31 |
+
"from PIL import Image, ImageDraw, ImageFont\n",
|
| 32 |
+
"import ast\n",
|
| 33 |
+
"import re\n",
|
| 34 |
+
"def encode_image_to_base64(img_path):\n",
|
| 35 |
+
" \"\"\"\n",
|
| 36 |
+
" :param img_path: image path\n",
|
| 37 |
+
" :return: Base64 encoding\n",
|
| 38 |
+
" \"\"\"\n",
|
| 39 |
+
" try:\n",
|
| 40 |
+
" with open(img_path, 'rb') as img_file:\n",
|
| 41 |
+
" img_data = img_file.read()\n",
|
| 42 |
+
" base64_str = base64.b64encode(img_data).decode('utf-8')\n",
|
| 43 |
+
" return base64_str\n",
|
| 44 |
+
" except Exception as e:\n",
|
| 45 |
+
" print(f\"Encoding Error: {e}\")\n",
|
| 46 |
+
" return None\n",
|
| 47 |
+
"\n",
|
| 48 |
+
"def encode_video_to_base64(video_path):\n",
|
| 49 |
+
" \"\"\"\n",
|
| 50 |
+
" :param video_path: video path\n",
|
| 51 |
+
" :return: Base64 encoding \n",
|
| 52 |
+
" \"\"\"\n",
|
| 53 |
+
" try:\n",
|
| 54 |
+
" with open(video_path, 'rb') as video_file:\n",
|
| 55 |
+
" video_data = video_file.read()\n",
|
| 56 |
+
" base64_str = base64.b64encode(video_data).decode('utf-8')\n",
|
| 57 |
+
" return base64_str\n",
|
| 58 |
+
" except Exception as e:\n",
|
| 59 |
+
" print(f\"Encoding Error: {e}\")\n",
|
| 60 |
+
" return None\n",
|
| 61 |
+
" \n",
|
| 62 |
+
"def replace_grouding_tokens(response):\n",
|
| 63 |
+
" return response.replace(\"[unused23]\",\"\").replace(\"[unused24]\",\":\").replace(\"[unused21]\",\"\").replace(\"[unused22]\",\"\\n\")\n",
|
| 64 |
+
"\n",
|
| 65 |
+
"def extract_with_regex(data):\n",
|
| 66 |
+
" text_list = []\n",
|
| 67 |
+
" coordinates_list = []\n",
|
| 68 |
+
" \n",
|
| 69 |
+
" # 使用正则表达式匹配模式\n",
|
| 70 |
+
" pattern = r'([^:]+):\\((\\d+),(\\d+)\\),\\((\\d+),(\\d+)\\)'\n",
|
| 71 |
+
" matches = re.findall(pattern, data)\n",
|
| 72 |
+
" \n",
|
| 73 |
+
" for match in matches:\n",
|
| 74 |
+
" text_list.append(match[0])\n",
|
| 75 |
+
" coord1 = (int(match[1]), int(match[2]))\n",
|
| 76 |
+
" coord2 = (int(match[3]), int(match[4]))\n",
|
| 77 |
+
" coordinates_list.append([coord1, coord2])\n",
|
| 78 |
+
" \n",
|
| 79 |
+
" return text_list, coordinates_list\n",
|
| 80 |
+
"\n",
|
| 81 |
+
"def plot_text_bounding_boxes(image_path, response):\n",
|
| 82 |
+
" \"\"\"\n",
|
| 83 |
+
" Plots bounding boxes on an image.\n",
|
| 84 |
+
"\n",
|
| 85 |
+
" Args:\n",
|
| 86 |
+
" image_path: The path to the image file.\n",
|
| 87 |
+
" response: model response without grouding tokens.\n",
|
| 88 |
+
" \"\"\"\n",
|
| 89 |
+
" \n",
|
| 90 |
+
" text_list, coordinates_list = extract_with_regex(response)\n",
|
| 91 |
+
" img = Image.open(image_path)\n",
|
| 92 |
+
" width, height = img.size\n",
|
| 93 |
+
" draw = ImageDraw.Draw(img)\n",
|
| 94 |
+
" font = ImageFont.truetype(\"NotoSansCJK-Regular.ttc\", size=10)\n",
|
| 95 |
+
"\n",
|
| 96 |
+
" for text, coord in zip(text_list,coordinates_list):\n",
|
| 97 |
+
" color = 'green'\n",
|
| 98 |
+
"\n",
|
| 99 |
+
" abs_y1 = int(coord[0][1]/999 * height)\n",
|
| 100 |
+
" abs_x1 = int(coord[0][0]/999 * width)\n",
|
| 101 |
+
" abs_y2 = int(coord[1][1]/999 * height)\n",
|
| 102 |
+
" abs_x2 = int(coord[1][0]/999 * width)\n",
|
| 103 |
+
"\n",
|
| 104 |
+
" if abs_x1 > abs_x2:\n",
|
| 105 |
+
" abs_x1, abs_x2 = abs_x2, abs_x1\n",
|
| 106 |
+
"\n",
|
| 107 |
+
" if abs_y1 > abs_y2:\n",
|
| 108 |
+
" abs_y1, abs_y2 = abs_y2, abs_y1\n",
|
| 109 |
+
"\n",
|
| 110 |
+
" draw.rectangle(\n",
|
| 111 |
+
" ((abs_x1, abs_y1), (abs_x2, abs_y2)), outline=color, width=1\n",
|
| 112 |
+
" )\n",
|
| 113 |
+
" draw.text((abs_x1, abs_y2), text, fill=color, font=font)\n",
|
| 114 |
+
" img.show()\n",
|
| 115 |
+
"\n",
|
| 116 |
+
"\n",
|
| 117 |
+
"def infer_image_with_api(image_path, prompt):\n",
|
| 118 |
+
" url = \"http://127.0.0.1:8000/v1/chat/completions\"\n",
|
| 119 |
+
" base64_image = encode_image_to_base64(image_path)\n",
|
| 120 |
+
" payload = json.dumps({\n",
|
| 121 |
+
" \"messages\": [\n",
|
| 122 |
+
" {\n",
|
| 123 |
+
" \"role\": \"user\",\n",
|
| 124 |
+
" \"content\": [\n",
|
| 125 |
+
" {\"type\": \"image_url\", \"image_url\": {\"url\": f\"data:image/jpg;base64,{base64_image }\"}},\n",
|
| 126 |
+
" {\"type\": \"text\", \"text\": prompt}, \n",
|
| 127 |
+
" ]\n",
|
| 128 |
+
" }\n",
|
| 129 |
+
" ], \n",
|
| 130 |
+
" \"model\": \"pangu_vl\",\n",
|
| 131 |
+
" \"max_tokens\": 2048,\n",
|
| 132 |
+
" \"temperature\": 0,\n",
|
| 133 |
+
" \"stream\": False,\n",
|
| 134 |
+
"\n",
|
| 135 |
+
" })\n",
|
| 136 |
+
"\n",
|
| 137 |
+
" headers = {\n",
|
| 138 |
+
" 'Content-Type': 'application/json'\n",
|
| 139 |
+
" }\n",
|
| 140 |
+
"\n",
|
| 141 |
+
" response = requests.request(\"POST\", url, headers=headers, data=payload)\n",
|
| 142 |
+
" return json.loads(response.text)[\"choices\"][0][\"message\"][\"content\"]\n",
|
| 143 |
+
"\n",
|
| 144 |
+
"def infer_video_with_api(video_path, prompt):\n",
|
| 145 |
+
" url = \"http://127.0.0.1:8000/v1/chat/completions\"\n",
|
| 146 |
+
" base64_str_video = encode_video_to_base64(video_path)\n",
|
| 147 |
+
" payload = json.dumps({\n",
|
| 148 |
+
" \"messages\": [\n",
|
| 149 |
+
" {\n",
|
| 150 |
+
" \"role\": \"user\",\n",
|
| 151 |
+
" \"content\": [\n",
|
| 152 |
+
" {\"type\": \"video_url\", \"video_url\": {\"url\": f\"data:video/mp4;base64,{base64_str_video }\"}},\n",
|
| 153 |
+
" {\"type\": \"text\", \"text\": prompt}, \n",
|
| 154 |
+
" ]\n",
|
| 155 |
+
" }\n",
|
| 156 |
+
" ], \n",
|
| 157 |
+
" \"model\": \"pangu_vl\",\n",
|
| 158 |
+
" \"max_tokens\": 2048,\n",
|
| 159 |
+
" \"temperature\": 0,\n",
|
| 160 |
+
" \"stream\": False,\n",
|
| 161 |
+
"\n",
|
| 162 |
+
" })\n",
|
| 163 |
+
"\n",
|
| 164 |
+
" headers = {\n",
|
| 165 |
+
" 'Content-Type': 'application/json'\n",
|
| 166 |
+
" }\n",
|
| 167 |
+
"\n",
|
| 168 |
+
" response = requests.request(\"POST\", url, headers=headers, data=payload)\n",
|
| 169 |
+
" return json.loads(response.text)[\"choices\"][0][\"message\"][\"content\"]\n"
|
| 170 |
+
]
|
| 171 |
+
},
|
| 172 |
+
{
|
| 173 |
+
"cell_type": "markdown",
|
| 174 |
+
"id": "4f9a1b5c",
|
| 175 |
+
"metadata": {},
|
| 176 |
+
"source": [
|
| 177 |
+
"#### Video description"
|
| 178 |
+
]
|
| 179 |
+
},
|
| 180 |
+
{
|
| 181 |
+
"cell_type": "code",
|
| 182 |
+
"execution_count": 2,
|
| 183 |
+
"id": "54f4630a",
|
| 184 |
+
"metadata": {},
|
| 185 |
+
"outputs": [
|
| 186 |
+
{
|
| 187 |
+
"output_type": "stream",
|
| 188 |
+
"name": "stdout",
|
| 189 |
+
"text": [
|
| 190 |
+
"The video begins with a person standing next to a small wooden table. On the table, there is a black and silver electric kettle and a pink mug with a white bottle inside it. The person is wearing a pink and white striped crop top and black pants with white stripes on the sides. They are holding a black laptop in their hands, showing the keyboard side to the camera. The person then closes the laptop and sets it aside. Next, they pick up a smartphone, showing the back side to the camera, and then the front side. After that, they pick up a stainless steel pot with a lid, showing it to the camera. They then set the pot aside and pick up a large kitchen knife, showing it to the camera. Finally, they pick up a pink garment, possibly a dress or a shirt, and show it to the camera. Throughout the video, the person is standing in the same position, and the background remains consistent with a white wall and a window with sheer curtains.\n",
|
| 191 |
+
"\n",
|
| 192 |
+
"\n",
|
| 193 |
+
"视频开始时,画面展示了一张带有黄色和灰色花纹的桌布,上面放置着一个透明的玻璃杯,杯中装有深色液体,旁边是一个黑色的遥控器。画面中央有一只手正在用黑色的笔在一张白色的便签纸上写字。便签纸上有水平的蓝色线条。手正在书写字母“a”,随后继续书写,最终写出了“art”这个词。写完“art”后,手将笔放下,便签纸被拿起并展示给镜头,可以看到“art”这个词清晰地写在纸上。接着,手将便签纸放回原位,然后画面中出现了一个绿色的字母“t”形状的纸片,被放置在“art”这个词的下方。随后,一个蓝色的字母“a”形状的纸片被放置在绿色“t”旁边,最后,一个红色的字母“c”形状的纸片被放置在蓝色“a”旁边,形成了“t a c”这三个字母。整个过程中,背景和物品的位置没有发生变化。\n"
|
| 194 |
+
]
|
| 195 |
+
}
|
| 196 |
+
],
|
| 197 |
+
"source": [
|
| 198 |
+
"video_path = \"assets/video/example_video_1.mp4\"\n",
|
| 199 |
+
"prompt = \"Please describe the video in detail.\"\n",
|
| 200 |
+
"response = infer_video_with_api(video_path, prompt)\n",
|
| 201 |
+
"print(response)\n",
|
| 202 |
+
"\n",
|
| 203 |
+
"print(\"\")\n",
|
| 204 |
+
"print(\"\")\n",
|
| 205 |
+
"\n",
|
| 206 |
+
"video_path = \"assets/video/example_video_2.mp4\"\n",
|
| 207 |
+
"prompt = \"请使用中文详细描述该视频。\"\n",
|
| 208 |
+
"response = infer_video_with_api(video_path, prompt)\n",
|
| 209 |
+
"print(response)"
|
| 210 |
+
]
|
| 211 |
+
},
|
| 212 |
+
{
|
| 213 |
+
"cell_type": "markdown",
|
| 214 |
+
"id": "594ab293",
|
| 215 |
+
"metadata": {},
|
| 216 |
+
"source": [
|
| 217 |
+
"#### Temporal Action Localization"
|
| 218 |
+
]
|
| 219 |
+
},
|
| 220 |
+
{
|
| 221 |
+
"cell_type": "code",
|
| 222 |
+
"execution_count": 3,
|
| 223 |
+
"id": "c177b6f1",
|
| 224 |
+
"metadata": {},
|
| 225 |
+
"outputs": [
|
| 226 |
+
{
|
| 227 |
+
"output_type": "stream",
|
| 228 |
+
"name": "stdout",
|
| 229 |
+
"text": [
|
| 230 |
+
"[unused14]0.0 - 4.2 seconds[unused15], A person is putting objects on the floor.[unused14]4.2 - 9.2 seconds[unused15], They put a ball, a cup, and a box on the floor.[unused14]9.2 - 20.4 seconds[unused15], They close the door.\n"
|
| 231 |
+
]
|
| 232 |
+
}
|
| 233 |
+
],
|
| 234 |
+
"source": [
|
| 235 |
+
"video_path = \"assets/video/example_video_3.mp4\"\n",
|
| 236 |
+
"prompt = \"Please locate all the actions in the video and output the corresponding start and end timestamps.\"\n",
|
| 237 |
+
"response = infer_video_with_api(video_path, prompt)\n",
|
| 238 |
+
"print(response)"
|
| 239 |
+
]
|
| 240 |
+
},
|
| 241 |
+
{
|
| 242 |
+
"cell_type": "markdown",
|
| 243 |
+
"id": "ba6ecf29",
|
| 244 |
+
"metadata": {},
|
| 245 |
+
"source": [
|
| 246 |
+
"#### Video General QA"
|
| 247 |
+
]
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"cell_type": "code",
|
| 251 |
+
"execution_count": 4,
|
| 252 |
+
"id": "66aef615",
|
| 253 |
+
"metadata": {},
|
| 254 |
+
"outputs": [
|
| 255 |
+
{
|
| 256 |
+
"output_type": "stream",
|
| 257 |
+
"name": "stdout",
|
| 258 |
+
"text": [
|
| 259 |
+
"视频中出现了两次五角星。第一次是在视频开始时,双手合拢后张开手掌,可以看到左手掌心有一个五角星图案。第二次是在视频接近尾声时,双手再次合拢后张开手掌,可以看到左手掌心又出现了一个五角星图案。\n",
|
| 260 |
+
"\n",
|
| 261 |
+
"\n",
|
| 262 |
+
"他穿着短袖。\n",
|
| 263 |
+
"\n",
|
| 264 |
+
"\n",
|
| 265 |
+
"The second item placed on the table is a clear glass mug with floral designs.\n",
|
| 266 |
+
"\n",
|
| 267 |
+
"\n",
|
| 268 |
+
"The word 'SUGAR' is written on the bottle on the left.\n"
|
| 269 |
+
]
|
| 270 |
+
}
|
| 271 |
+
],
|
| 272 |
+
"source": [
|
| 273 |
+
"\n",
|
| 274 |
+
"video_path = \"assets/video/example_video_4.mp4\"\n",
|
| 275 |
+
"prompt = \"视频中出现了几次五角星?在什么位置?\"\n",
|
| 276 |
+
"response = infer_video_with_api(video_path, prompt)\n",
|
| 277 |
+
"print(response)\n",
|
| 278 |
+
"\n",
|
| 279 |
+
"print(\"\")\n",
|
| 280 |
+
"print(\"\")\n",
|
| 281 |
+
"\n",
|
| 282 |
+
"video_path = \"assets/video/example_video_5.mp4\"\n",
|
| 283 |
+
"prompt = \"他穿着长袖还是短袖?\"\n",
|
| 284 |
+
"response = infer_video_with_api(video_path, prompt)\n",
|
| 285 |
+
"print(response)\n",
|
| 286 |
+
"\n",
|
| 287 |
+
"print(\"\")\n",
|
| 288 |
+
"print(\"\")\n",
|
| 289 |
+
"\n",
|
| 290 |
+
"video_path = \"assets/video/example_video_6.mp4\"\n",
|
| 291 |
+
"prompt = \"What is the second item placed on the table by the person in the video?\"\n",
|
| 292 |
+
"response = infer_video_with_api(video_path, prompt)\n",
|
| 293 |
+
"print(response)\n",
|
| 294 |
+
"\n",
|
| 295 |
+
"print(\"\")\n",
|
| 296 |
+
"print(\"\")\n",
|
| 297 |
+
"\n",
|
| 298 |
+
"video_path = \"assets/video/example_video_7.mp4\"\n",
|
| 299 |
+
"prompt = \"What words are written on the bottle on the left?\"\n",
|
| 300 |
+
"response = infer_video_with_api(video_path, prompt)\n",
|
| 301 |
+
"print(response)\n",
|
| 302 |
+
"\n",
|
| 303 |
+
"\n"
|
| 304 |
+
]
|
| 305 |
+
},
|
| 306 |
+
{
|
| 307 |
+
"cell_type": "code",
|
| 308 |
+
"execution_count": null,
|
| 309 |
+
"metadata": {},
|
| 310 |
+
"outputs": [],
|
| 311 |
+
"source": []
|
| 312 |
+
}
|
| 313 |
+
],
|
| 314 |
+
"metadata": {
|
| 315 |
+
"kernelspec": {
|
| 316 |
+
"name": "python3",
|
| 317 |
+
"display_name": "Python 3.11.13 64-bit"
|
| 318 |
+
},
|
| 319 |
+
"language_info": {
|
| 320 |
+
"name": "",
|
| 321 |
+
"version": "3.11.13"
|
| 322 |
+
},
|
| 323 |
+
"interpreter": {
|
| 324 |
+
"hash": "32cd74d773da33073fa79be3834e1375de230f7ebe90dceaad933455c7929c10"
|
| 325 |
+
}
|
| 326 |
+
},
|
| 327 |
+
"nbformat": 4,
|
| 328 |
+
"nbformat_minor": 5
|
| 329 |
+
}
|
doc/technical_report.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3d9b7f7fd8719aafb014cda4ca4358fd1bb778ce08501182c2c11bb084a0b2bd
|
| 3 |
+
size 1255829
|
doc/vllm_ascend_for_openpangu_vl_7b.md
ADDED
|
@@ -0,0 +1,227 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## openPangu-VL-7B 在[vllm-ascend](https://github.com/vllm-project/vllm-ascend)部署指导文档
|
| 2 |
+
|
| 3 |
+
### 部署环境要求
|
| 4 |
+
|
| 5 |
+
Atlas 800T A2(64GB) 1、2、4、8卡均可部署openPangu-VL-7B
|
| 6 |
+
|
| 7 |
+
### 镜像构建和启动
|
| 8 |
+
|
| 9 |
+
选用vllm-ascend社区镜像v0.9.1
|
| 10 |
+
|
| 11 |
+
拉取方式如下:
|
| 12 |
+
|
| 13 |
+
```bash
|
| 14 |
+
docker pull quay.io/ascend/vllm-ascend:v0.9.1
|
| 15 |
+
```
|
| 16 |
+
|
| 17 |
+
以下操作需在每个节点都执行。
|
| 18 |
+
启动镜像。
|
| 19 |
+
|
| 20 |
+
```bash
|
| 21 |
+
# Update the vllm-ascend image
|
| 22 |
+
export IMAGE=quay.io/ascend/vllm-ascend:v0.9.1 # Use correct image id
|
| 23 |
+
export NAME=vllm-ascend # Custom docker name
|
| 24 |
+
|
| 25 |
+
# Run the container using the defined variables
|
| 26 |
+
# Note if you are running bridge network with docker, Please expose available ports for multiple nodes communication in advance
|
| 27 |
+
# To prevent device interference from other docker containers, add the argument "--privileged"
|
| 28 |
+
docker run --rm \
|
| 29 |
+
--name $NAME \
|
| 30 |
+
--network host \
|
| 31 |
+
--ipc=host \
|
| 32 |
+
--device /dev/davinci0 \
|
| 33 |
+
--device /dev/davinci1 \
|
| 34 |
+
--device /dev/davinci2 \
|
| 35 |
+
--device /dev/davinci3 \
|
| 36 |
+
--device /dev/davinci4 \
|
| 37 |
+
--device /dev/davinci5 \
|
| 38 |
+
--device /dev/davinci6 \
|
| 39 |
+
--device /dev/davinci7 \
|
| 40 |
+
--device /dev/davinci_manager \
|
| 41 |
+
--device /dev/devmm_svm \
|
| 42 |
+
--device /dev/hisi_hdc \
|
| 43 |
+
-v /usr/local/dcmi:/usr/local/dcmi \
|
| 44 |
+
-v /usr/local/Ascend/driver/tools/hccn_tool:/usr/local/Ascend/driver/tools/hccn_tool \
|
| 45 |
+
-v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
|
| 46 |
+
-v /usr/local/Ascend/driver/lib64/:/usr/local/Ascend/driver/lib64/ \
|
| 47 |
+
-v /usr/local/Ascend/driver/version.info:/usr/local/Ascend/driver/version.info \
|
| 48 |
+
-v /etc/ascend_install.info:/etc/ascend_install.info \
|
| 49 |
+
-v /mnt/sfs_turbo/.cache:/root/.cache \
|
| 50 |
+
-it $IMAGE bash
|
| 51 |
+
```
|
| 52 |
+
|
| 53 |
+
需要保证模型权重和本项目代码可在容器中访问。如果未进入容器,需以root用户进入容器。
|
| 54 |
+
```bash
|
| 55 |
+
docker exec -itu root $NAME /bin/bash
|
| 56 |
+
```
|
| 57 |
+
|
| 58 |
+
### PD混部推理
|
| 59 |
+
|
| 60 |
+
示例启动脚本:`LOAD_CKPT_DIR=xxx bash examples/start_serving_openpangu_vl_7b.sh`。该启动脚本为8卡推理(变量TENSOR_PARALLEL_SIZE_LOCAL=8)。拉起服务后,可向首节点(主节点)发送请求。
|
| 61 |
+
|
| 62 |
+
### 发请求测试
|
| 63 |
+
|
| 64 |
+
服务启动后,可发送测试请求。推荐使用示例中的system prompt。
|
| 65 |
+
|
| 66 |
+
推理示例:图片+文字
|
| 67 |
+
|
| 68 |
+
```python
|
| 69 |
+
import json
|
| 70 |
+
import base64
|
| 71 |
+
import os
|
| 72 |
+
import requests
|
| 73 |
+
import json
|
| 74 |
+
|
| 75 |
+
def encode_image_to_base64(img_path, img_name):
|
| 76 |
+
#load image to base64
|
| 77 |
+
try:
|
| 78 |
+
with open(os.path.join(img_path, img_name), 'rb') as img_file:
|
| 79 |
+
img_data = img_file.read()
|
| 80 |
+
base64_str = base64.b64encode(img_data).decode('utf-8')
|
| 81 |
+
return base64_str
|
| 82 |
+
except Exception as e:
|
| 83 |
+
print(f"image load failed: {e}")
|
| 84 |
+
return None
|
| 85 |
+
|
| 86 |
+
base64_image = encode_image_to_base64("/image_path", "image_name.jpg")
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
payload_image_example = json.dumps({
|
| 90 |
+
"messages": [
|
| 91 |
+
{
|
| 92 |
+
"role": "system",
|
| 93 |
+
"content": [
|
| 94 |
+
{"type": "text", "text": "你是华为公司开发的多模态大模型,名字是openPangu-VL-7B。你能够处理文本和视觉模态的输入,并给出文本输出。"},
|
| 95 |
+
]
|
| 96 |
+
},
|
| 97 |
+
{
|
| 98 |
+
"role": "user",
|
| 99 |
+
"content": [
|
| 100 |
+
{"type": "image_url", "image_url": {"url": f"data:image/jpg;base64,{base64_image}"}},
|
| 101 |
+
{"type": "text", "text": "Please describe this picture."},
|
| 102 |
+
]
|
| 103 |
+
}
|
| 104 |
+
],
|
| 105 |
+
"model": "pangu_vl",
|
| 106 |
+
"max_tokens": 500,
|
| 107 |
+
"temperature": 1.0,
|
| 108 |
+
"stream": False,
|
| 109 |
+
})
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
url = "http://127.0.0.1:8000/v1/chat/completions"
|
| 113 |
+
headers = {
|
| 114 |
+
'Content-Type': 'application/json'
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
response_image_example = requests.request("POST", url, headers=headers, data=payload_image_example)
|
| 118 |
+
print(f"the response of image example is {response_image_example.text}")
|
| 119 |
+
|
| 120 |
+
```
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
推理示例:视频+文字
|
| 124 |
+
|
| 125 |
+
```python
|
| 126 |
+
import json
|
| 127 |
+
import base64
|
| 128 |
+
import os
|
| 129 |
+
import requests
|
| 130 |
+
import json
|
| 131 |
+
|
| 132 |
+
def encode_video_to_base64(video_path, video_name):
|
| 133 |
+
#load video to base64
|
| 134 |
+
try:
|
| 135 |
+
with open(os.path.join(video_path, video_name), 'rb') as video_file:
|
| 136 |
+
video_data = video_file.read()
|
| 137 |
+
base64_str = base64.b64encode(video_data).decode('utf-8')
|
| 138 |
+
return base64_str
|
| 139 |
+
except Exception as e:
|
| 140 |
+
print(f"video load failed: {e}")
|
| 141 |
+
return None
|
| 142 |
+
|
| 143 |
+
base64_video = encode_video_to_base64("/video_path", "video_name.mp4")
|
| 144 |
+
|
| 145 |
+
payload_video_example = json.dumps({
|
| 146 |
+
"messages": [
|
| 147 |
+
{
|
| 148 |
+
"role": "system",
|
| 149 |
+
"content": [
|
| 150 |
+
{"type": "text", "text": "你是华为公司开发的多模态大模型,名字是openPangu-VL-7B。你能够处理文本和视觉模态的输入,并给出文本输出。"},
|
| 151 |
+
]
|
| 152 |
+
},
|
| 153 |
+
{
|
| 154 |
+
"role": "user",
|
| 155 |
+
"content": [
|
| 156 |
+
{"type": "video_url", "video_url": {"url": f"data:video/mp4;base64,{base64_video}"}},
|
| 157 |
+
{"type": "text", "text": "Please describe this video."},
|
| 158 |
+
]
|
| 159 |
+
}
|
| 160 |
+
],
|
| 161 |
+
"model": "pangu_vl",
|
| 162 |
+
"max_tokens": 500,
|
| 163 |
+
"temperature": 1.0,
|
| 164 |
+
"stream": False,
|
| 165 |
+
})
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
url = "http://127.0.0.1:8000/v1/chat/completions"
|
| 169 |
+
headers = {
|
| 170 |
+
'Content-Type': 'application/json'
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
response_video_example = requests.request("POST", url, headers=headers, data=payload_video_example)
|
| 174 |
+
print(f"the response of video example is {response_video_example.text}")
|
| 175 |
+
|
| 176 |
+
```
|
| 177 |
+
|
| 178 |
+
### 128k 视频长序列推理
|
| 179 |
+
在/preprocessor_config.json中添加字段,输入视频会被抽取为768帧
|
| 180 |
+
```
|
| 181 |
+
"num_frames": 768,
|
| 182 |
+
"sample_fps": -1.0
|
| 183 |
+
```
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
启动脚本(/inference/vllm_ascend/examples/start_serving_openpangu_vl_7b.sh)内设置参数:
|
| 187 |
+
```
|
| 188 |
+
export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True
|
| 189 |
+
MAX_MODEL_LEN=128000
|
| 190 |
+
MAX_NUM_BATCHED_TOKENS=100000
|
| 191 |
+
GPU_MEMORY_UTILIZATION=0.7
|
| 192 |
+
|
| 193 |
+
--no-enable-chunked-prefill \
|
| 194 |
+
--no-enable-prefix-caching \
|
| 195 |
+
```
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
### Int8推理
|
| 199 |
+
|
| 200 |
+
#### ModelSlim量化
|
| 201 |
+
|
| 202 |
+
openPangu-VL-7B模型支持使用开源量化框架ModelSlim,参考[[ModelSlim_openPangu-VL-7B]](https://gitcode.com/Ascend/msit/blob/msModelslim_Pangu_VL/msmodelslim/example/multimodal_vlm/openPangu-VL/ReadMe.md),当前模型支持W8A8权重激活量化。
|
| 203 |
+
|
| 204 |
+
##### openPangu-VL-7B W8A8 动态量化
|
| 205 |
+
|
| 206 |
+
```bash
|
| 207 |
+
export QUANT_PATH=your_quant_save_dir
|
| 208 |
+
export MODEL_PATH=your_model_ckpt_dir
|
| 209 |
+
export CALI_DATASET=your_cali_dataset_dir
|
| 210 |
+
python quant_pangu_vl.py \
|
| 211 |
+
--model_path $MODEL_PATH --calib_images $CALI_DATASET \
|
| 212 |
+
--save_directory $QUANT_PATH --w_bit 8 --a_bit 8 --device_type npu \
|
| 213 |
+
--trust_remote_code True --anti_method m2 --act_method 3 --is_dynamic True
|
| 214 |
+
```
|
| 215 |
+
|
| 216 |
+
相较于BF16模型,int8量化模型的config.json增加以下字段:
|
| 217 |
+
```
|
| 218 |
+
"quantize": "w8a8_dynamic",
|
| 219 |
+
```
|
| 220 |
+
|
| 221 |
+
ModelSlim量化脚本生成量化模型后会自动追加上述字段到config.json中。
|
| 222 |
+
|
| 223 |
+
#### Int8推理
|
| 224 |
+
|
| 225 |
+
相较于BF16模型推理,int8量化模型推理可使用同样的启动脚本,仅需:
|
| 226 |
+
* 减少节点数、卡数;
|
| 227 |
+
* 修改模型Checkpoint路径。
|
doc/vllm_ascend_for_openpangu_vl_7b_EN.md
ADDED
|
@@ -0,0 +1,225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## Deployment Guide of openPangu-VL-7B Based on [vllm-ascend](https://github.com/vllm-project/vllm-ascend)
|
| 2 |
+
|
| 3 |
+
### Deployment Environment Description
|
| 4 |
+
|
| 5 |
+
Atlas 800T A2(64GB), openPangu-VL-7B can be deployed on 1、2、4 or 8 cards.
|
| 6 |
+
|
| 7 |
+
### Docker Boot and Inference Code
|
| 8 |
+
|
| 9 |
+
Use the vllm-ascend community image v0.9.1.
|
| 10 |
+
|
| 11 |
+
Pull the image with the following command:
|
| 12 |
+
|
| 13 |
+
```bash
|
| 14 |
+
docker pull quay.io/ascend/vllm-ascend:v0.9.1
|
| 15 |
+
```
|
| 16 |
+
|
| 17 |
+
The following operations need to be performed on each node.
|
| 18 |
+
Start the container.
|
| 19 |
+
|
| 20 |
+
```bash
|
| 21 |
+
# Update the vllm-ascend image
|
| 22 |
+
export IMAGE=quay.io/ascend/vllm-ascend:v0.9.1 # Use correct image id
|
| 23 |
+
export NAME=vllm-ascend # Custom docker name
|
| 24 |
+
|
| 25 |
+
# Run the container using the defined variables
|
| 26 |
+
# Note if you are running bridge network with docker, Please expose available ports for multiple nodes communication in advance
|
| 27 |
+
# To prevent device interference from other docker containers, add the argument "--privileged"
|
| 28 |
+
docker run --rm \
|
| 29 |
+
--name $NAME \
|
| 30 |
+
--network host \
|
| 31 |
+
--ipc=host \
|
| 32 |
+
--device /dev/davinci0 \
|
| 33 |
+
--device /dev/davinci1 \
|
| 34 |
+
--device /dev/davinci2 \
|
| 35 |
+
--device /dev/davinci3 \
|
| 36 |
+
--device /dev/davinci4 \
|
| 37 |
+
--device /dev/davinci5 \
|
| 38 |
+
--device /dev/davinci6 \
|
| 39 |
+
--device /dev/davinci7 \
|
| 40 |
+
--device /dev/davinci_manager \
|
| 41 |
+
--device /dev/devmm_svm \
|
| 42 |
+
--device /dev/hisi_hdc \
|
| 43 |
+
-v /usr/local/dcmi:/usr/local/dcmi \
|
| 44 |
+
-v /usr/local/Ascend/driver/tools/hccn_tool:/usr/local/Ascend/driver/tools/hccn_tool \
|
| 45 |
+
-v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
|
| 46 |
+
-v /usr/local/Ascend/driver/lib64/:/usr/local/Ascend/driver/lib64/ \
|
| 47 |
+
-v /usr/local/Ascend/driver/version.info:/usr/local/Ascend/driver/version.info \
|
| 48 |
+
-v /etc/ascend_install.info:/etc/ascend_install.info \
|
| 49 |
+
-v /mnt/sfs_turbo/.cache:/root/.cache \
|
| 50 |
+
-it $IMAGE bash
|
| 51 |
+
```
|
| 52 |
+
|
| 53 |
+
Ensure that the model checkpoint and the project code are accessible within the container. If not inside the container, enter the container as the root user:
|
| 54 |
+
```bash
|
| 55 |
+
docker exec -itu root $NAME /bin/bash
|
| 56 |
+
```
|
| 57 |
+
|
| 58 |
+
### PD Aggregation Inference
|
| 59 |
+
|
| 60 |
+
Example startup script: `LOAD_CKPT_DIR=xxx bash examples/start_serving_openpangu_vl_7b.sh`。This example script requires 8 nodes(TENSOR_PARALLEL_SIZE_LOCAL=8)) to deploy the openPangu-VL-7B model. After starting the service, we can send requests to the first node (master node).
|
| 61 |
+
|
| 62 |
+
### Send Testing Requests
|
| 63 |
+
|
| 64 |
+
After the service is started, we can send testing requests. It is recommended to use the system prompt provided in the examples.
|
| 65 |
+
|
| 66 |
+
example:image + text
|
| 67 |
+
|
| 68 |
+
```python
|
| 69 |
+
import json
|
| 70 |
+
import base64
|
| 71 |
+
import os
|
| 72 |
+
import requests
|
| 73 |
+
import json
|
| 74 |
+
|
| 75 |
+
def encode_image_to_base64(img_path, img_name):
|
| 76 |
+
#load image to base64
|
| 77 |
+
try:
|
| 78 |
+
with open(os.path.join(img_path, img_name), 'rb') as img_file:
|
| 79 |
+
img_data = img_file.read()
|
| 80 |
+
base64_str = base64.b64encode(img_data).decode('utf-8')
|
| 81 |
+
return base64_str
|
| 82 |
+
except Exception as e:
|
| 83 |
+
print(f"image load failed: {e}")
|
| 84 |
+
return None
|
| 85 |
+
|
| 86 |
+
base64_image = encode_image_to_base64("/image_path", "image_name.jpg")
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
payload_image_example = json.dumps({
|
| 90 |
+
"messages": [
|
| 91 |
+
{
|
| 92 |
+
"role": "system",
|
| 93 |
+
"content": [
|
| 94 |
+
{"type": "text", "text": "You are a multimodal large model developed by Huawei, named openPangu-VL-7B. You can process both text and visual inputs and generate text outputs."},
|
| 95 |
+
]
|
| 96 |
+
},
|
| 97 |
+
{
|
| 98 |
+
"role": "user",
|
| 99 |
+
"content": [
|
| 100 |
+
{"type": "image_url", "image_url": {"url": f"data:image/jpg;base64,{base64_image}"}},
|
| 101 |
+
{"type": "text", "text": "Please describe this picture."},
|
| 102 |
+
]
|
| 103 |
+
}
|
| 104 |
+
],
|
| 105 |
+
"model": "pangu_vl",
|
| 106 |
+
"max_tokens": 500,
|
| 107 |
+
"temperature": 1.0,
|
| 108 |
+
"stream": False,
|
| 109 |
+
})
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
url = "http://127.0.0.1:8000/v1/chat/completions"
|
| 113 |
+
headers = {
|
| 114 |
+
'Content-Type': 'application/json'
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
response_image_example = requests.request("POST", url, headers=headers, data=payload_image_example)
|
| 118 |
+
print(f"the response of image example is {response_image_example.text}")
|
| 119 |
+
|
| 120 |
+
```
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
example:video + text
|
| 124 |
+
|
| 125 |
+
```python
|
| 126 |
+
import json
|
| 127 |
+
import base64
|
| 128 |
+
import os
|
| 129 |
+
import requests
|
| 130 |
+
import json
|
| 131 |
+
|
| 132 |
+
def encode_video_to_base64(video_path, video_name):
|
| 133 |
+
#load video to base64
|
| 134 |
+
try:
|
| 135 |
+
with open(os.path.join(video_path, video_name), 'rb') as video_file:
|
| 136 |
+
video_data = video_file.read()
|
| 137 |
+
base64_str = base64.b64encode(video_data).decode('utf-8')
|
| 138 |
+
return base64_str
|
| 139 |
+
except Exception as e:
|
| 140 |
+
print(f"video load failed: {e}")
|
| 141 |
+
return None
|
| 142 |
+
|
| 143 |
+
base64_video = encode_video_to_base64("/video_path", "video_name.mp4")
|
| 144 |
+
|
| 145 |
+
payload_video_example = json.dumps({
|
| 146 |
+
"messages": [
|
| 147 |
+
{
|
| 148 |
+
"role": "system",
|
| 149 |
+
"content": [
|
| 150 |
+
{"type": "text", "text": "You are a multimodal large model developed by Huawei, named openPangu-VL-7B. You can process both text and visual inputs and generate text outputs."},
|
| 151 |
+
]
|
| 152 |
+
},
|
| 153 |
+
{
|
| 154 |
+
"role": "user",
|
| 155 |
+
"content": [
|
| 156 |
+
{"type": "video_url", "video_url": {"url": f"data:video/mp4;base64,{base64_video}"}},
|
| 157 |
+
{"type": "text", "text": "Please describe this video."},
|
| 158 |
+
]
|
| 159 |
+
}
|
| 160 |
+
],
|
| 161 |
+
"model": "pangu_vl",
|
| 162 |
+
"max_tokens": 500,
|
| 163 |
+
"temperature": 1.0,
|
| 164 |
+
"stream": False,
|
| 165 |
+
})
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
url = "http://127.0.0.1:8000/v1/chat/completions"
|
| 169 |
+
headers = {
|
| 170 |
+
'Content-Type': 'application/json'
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
response_video_example = requests.request("POST", url, headers=headers, data=payload_video_example)
|
| 174 |
+
print(f"the response of video example is {response_video_example.text}")
|
| 175 |
+
|
| 176 |
+
```
|
| 177 |
+
|
| 178 |
+
### 128k Sequence Video Inference
|
| 179 |
+
Add fields to /preprocessor_config.json,The input video will be sampled into 768 frames.
|
| 180 |
+
```
|
| 181 |
+
"num_frames": 768,
|
| 182 |
+
"sample_fps": -1.0
|
| 183 |
+
```
|
| 184 |
+
|
| 185 |
+
In the startup script(/inference/vllm_ascend/examples/start_serving_openpangu_vl_7b.sh) setting the parameters as follows:
|
| 186 |
+
```
|
| 187 |
+
export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True
|
| 188 |
+
MAX_MODEL_LEN=128000
|
| 189 |
+
MAX_NUM_BATCHED_TOKENS=100000
|
| 190 |
+
GPU_MEMORY_UTILIZATION=0.7
|
| 191 |
+
|
| 192 |
+
--no-enable-chunked-prefill \
|
| 193 |
+
--no-enable-prefix-caching \
|
| 194 |
+
```
|
| 195 |
+
|
| 196 |
+
### Int8 Inference
|
| 197 |
+
|
| 198 |
+
#### ModelSlim Quantization
|
| 199 |
+
|
| 200 |
+
openPangu-VL-7B model supports quantization using the open source quantization framework,Please refer to [[ModelSlim_openPangu-VL-7B_README]](https://gitcode.com/Ascend/msit/blob/msModelslim_Pangu_VL/msmodelslim/example/multimodal_vlm/openPangu-VL/ReadMe.md),The current model supports W8A8 quantization.
|
| 201 |
+
|
| 202 |
+
##### openPangu-VL-7B W8A8 Dynamic quantization
|
| 203 |
+
|
| 204 |
+
```bash
|
| 205 |
+
export QUANT_PATH=your_quant_save_dir
|
| 206 |
+
export MODEL_PATH=your_model_ckpt_dir
|
| 207 |
+
export CALI_DATASET=your_cali_dataset_dir
|
| 208 |
+
python quant_pangu_vl.py \
|
| 209 |
+
--model_path $MODEL_PATH --calib_images $CALI_DATASET \
|
| 210 |
+
--save_directory $QUANT_PATH --w_bit 8 --a_bit 8 --device_type npu \
|
| 211 |
+
--trust_remote_code True --anti_method m2 --act_method 3 --is_dynamic True
|
| 212 |
+
```
|
| 213 |
+
|
| 214 |
+
Compared with the BF16 model, the following fields are added to the config.json file of the int8 quantization model:
|
| 215 |
+
```
|
| 216 |
+
"quantize": "w8a8_dynamic",
|
| 217 |
+
```
|
| 218 |
+
|
| 219 |
+
After the ModelSlim quantization script generates a quantization model, the preceding fields are automatically added to the config.json file.
|
| 220 |
+
|
| 221 |
+
#### Int8 Inference
|
| 222 |
+
|
| 223 |
+
Compared to BF16 model inference, the int8 quantized model inference uses the same startup script, requiring only:
|
| 224 |
+
* Reducing the number of nodes and GPUs;
|
| 225 |
+
* Modifying the model checkpoint path.
|
generation_config.json
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token_id": 1,
|
| 3 |
+
"pad_token_id": 2,
|
| 4 |
+
"do_sample": true,
|
| 5 |
+
"eos_token_id": [
|
| 6 |
+
45892
|
| 7 |
+
],
|
| 8 |
+
"temperature": 0.000001,
|
| 9 |
+
"top_k": 1,
|
| 10 |
+
"transformers_version": "4.53.2"
|
| 11 |
+
}
|
imageprocessor_openpangu_vl.py
ADDED
|
@@ -0,0 +1,418 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
| 3 |
+
# Copyright 2025 The HuggingFace Inc. team.
|
| 4 |
+
# Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
|
| 5 |
+
# Adapted from transformers/models/qwen2_vl/image_processing_qwen2_vl_fast.py
|
| 6 |
+
|
| 7 |
+
#
|
| 8 |
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
| 9 |
+
# and OPT implementations in this library. It has been modified from its
|
| 10 |
+
# original forms to accommodate minor architectural differences compared
|
| 11 |
+
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
| 12 |
+
#
|
| 13 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 14 |
+
# you may not use this file except in compliance with the License.
|
| 15 |
+
# You may obtain a copy of the License at
|
| 16 |
+
#
|
| 17 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 18 |
+
#
|
| 19 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 20 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 21 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 22 |
+
# See the License for the specific language governing permissions and
|
| 23 |
+
# limitations under the License.
|
| 24 |
+
|
| 25 |
+
from typing import Optional, Union
|
| 26 |
+
from types import SimpleNamespace
|
| 27 |
+
from transformers.models.qwen2_vl.image_processing_qwen2_vl_fast import Qwen2VLImageProcessorFast
|
| 28 |
+
from functools import partial, lru_cache
|
| 29 |
+
from transformers.image_processing_utils import BatchFeature
|
| 30 |
+
from transformers.image_utils import (
|
| 31 |
+
ChannelDimension,
|
| 32 |
+
SizeDict,
|
| 33 |
+
make_flat_list_of_images,
|
| 34 |
+
valid_images,
|
| 35 |
+
pil_torch_interpolation_mapping,
|
| 36 |
+
)
|
| 37 |
+
from torchvision.transforms.v2 import functional as F
|
| 38 |
+
import torch
|
| 39 |
+
from transformers.models.qwen2_vl.image_processing_qwen2_vl import smart_resize
|
| 40 |
+
# from transformers.image_processing_utils_fast import (
|
| 41 |
+
# group_images_by_shape,
|
| 42 |
+
# reorder_images,
|
| 43 |
+
# )
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def rescale(image, scale):
|
| 47 |
+
return image * scale
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def normalize(image, mean, std):
|
| 51 |
+
return F.normalize(image, mean, std)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
@lru_cache(maxsize=10)
|
| 55 |
+
def _fuse_mean_std_and_rescale_factor(
|
| 56 |
+
do_normalize: Optional[bool] = None,
|
| 57 |
+
image_mean: Optional[Union[float, list[float]]] = None,
|
| 58 |
+
image_std: Optional[Union[float, list[float]]] = None,
|
| 59 |
+
do_rescale: Optional[bool] = None,
|
| 60 |
+
rescale_factor: Optional[float] = None,
|
| 61 |
+
device: Optional["torch.device"] = None,
|
| 62 |
+
) -> tuple:
|
| 63 |
+
if do_rescale and do_normalize:
|
| 64 |
+
# Fused rescale and normalize
|
| 65 |
+
image_mean = torch.tensor(image_mean, device=device) * (1.0 / rescale_factor)
|
| 66 |
+
image_std = torch.tensor(image_std, device=device) * (1.0 / rescale_factor)
|
| 67 |
+
do_rescale = False
|
| 68 |
+
return image_mean, image_std, do_rescale
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def rescale_and_normalize(
|
| 72 |
+
images: "torch.Tensor",
|
| 73 |
+
do_rescale: bool,
|
| 74 |
+
rescale_factor: float,
|
| 75 |
+
do_normalize: bool,
|
| 76 |
+
image_mean: Union[float, list[float]],
|
| 77 |
+
image_std: Union[float, list[float]],
|
| 78 |
+
) -> "torch.Tensor":
|
| 79 |
+
"""
|
| 80 |
+
Rescale and normalize images.
|
| 81 |
+
"""
|
| 82 |
+
image_mean, image_std, do_rescale = _fuse_mean_std_and_rescale_factor(
|
| 83 |
+
do_normalize=do_normalize,
|
| 84 |
+
image_mean=image_mean,
|
| 85 |
+
image_std=image_std,
|
| 86 |
+
do_rescale=do_rescale,
|
| 87 |
+
rescale_factor=rescale_factor,
|
| 88 |
+
device=images.device,
|
| 89 |
+
)
|
| 90 |
+
# if/elif as we use fused rescale and normalize if both are set to True
|
| 91 |
+
if do_normalize:
|
| 92 |
+
images = normalize(images.to(dtype=torch.float32), image_mean, image_std)
|
| 93 |
+
elif do_rescale:
|
| 94 |
+
images = rescale(images, rescale_factor)
|
| 95 |
+
images = images.to(OpenPanguVLImageProcessorFast.dtype)
|
| 96 |
+
|
| 97 |
+
return images
|
| 98 |
+
|
| 99 |
+
# This part will be removed in the future.
|
| 100 |
+
from collections import defaultdict
|
| 101 |
+
def _group_images_by_shape(nested_images, is_nested: bool = False):
|
| 102 |
+
"""Helper function to flatten a single level of nested image structures and group by shape."""
|
| 103 |
+
grouped_images = defaultdict(list)
|
| 104 |
+
grouped_images_index = {}
|
| 105 |
+
nested_images = [nested_images] if not is_nested else nested_images
|
| 106 |
+
for i, sublist in enumerate(nested_images):
|
| 107 |
+
for j, image in enumerate(sublist):
|
| 108 |
+
key = (i, j) if is_nested else j
|
| 109 |
+
shape = image.shape[1:]
|
| 110 |
+
grouped_images[shape].append(image)
|
| 111 |
+
grouped_images_index[key] = (shape, len(grouped_images[shape]) - 1)
|
| 112 |
+
|
| 113 |
+
return grouped_images, grouped_images_index
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def _reconstruct_nested_structure(indices, processed_images):
|
| 117 |
+
"""Helper function to reconstruct a single level nested structure."""
|
| 118 |
+
# Find the maximum outer index
|
| 119 |
+
max_outer_idx = max(idx[0] for idx in indices.keys())
|
| 120 |
+
|
| 121 |
+
# Create the outer list
|
| 122 |
+
result = [None] * (max_outer_idx + 1)
|
| 123 |
+
|
| 124 |
+
# Group indices by outer index
|
| 125 |
+
nested_indices = defaultdict(list)
|
| 126 |
+
for i, j in indices.keys():
|
| 127 |
+
nested_indices[i].append(j)
|
| 128 |
+
|
| 129 |
+
for i in range(max_outer_idx + 1):
|
| 130 |
+
if i in nested_indices:
|
| 131 |
+
inner_max_idx = max(nested_indices[i])
|
| 132 |
+
inner_list = [None] * (inner_max_idx + 1)
|
| 133 |
+
for j in range(inner_max_idx + 1):
|
| 134 |
+
if (i, j) in indices:
|
| 135 |
+
shape, idx = indices[(i, j)]
|
| 136 |
+
inner_list[j] = processed_images[shape][idx]
|
| 137 |
+
result[i] = inner_list
|
| 138 |
+
|
| 139 |
+
return result
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def group_images_by_shape(
|
| 143 |
+
images: Union[list["torch.Tensor"], "torch.Tensor"],
|
| 144 |
+
disable_grouping: bool,
|
| 145 |
+
is_nested: bool = False,
|
| 146 |
+
) -> tuple[
|
| 147 |
+
dict[tuple[int, int], list["torch.Tensor"]], dict[Union[int, tuple[int, int]], tuple[tuple[int, int], int]]
|
| 148 |
+
]:
|
| 149 |
+
# If disable grouping is not explicitely provided, we favor disabling it if the images are on CPU, and enabling it otherwise.
|
| 150 |
+
if disable_grouping is None:
|
| 151 |
+
device = images[0][0].device if is_nested else images[0].device
|
| 152 |
+
disable_grouping = device == "cpu"
|
| 153 |
+
|
| 154 |
+
if disable_grouping:
|
| 155 |
+
if is_nested:
|
| 156 |
+
return {(i, j): images[i][j].unsqueeze(0) for i in range(len(images)) for j in range(len(images[i]))}, {
|
| 157 |
+
(i, j): ((i, j), 0) for i in range(len(images)) for j in range(len(images[i]))
|
| 158 |
+
}
|
| 159 |
+
else:
|
| 160 |
+
return {i: images[i].unsqueeze(0) for i in range(len(images))}, {i: (i, 0) for i in range(len(images))}
|
| 161 |
+
|
| 162 |
+
# Handle single level nested structure
|
| 163 |
+
grouped_images, grouped_images_index = _group_images_by_shape(images, is_nested)
|
| 164 |
+
|
| 165 |
+
# Stack images with the same shape
|
| 166 |
+
grouped_images = {shape: torch.stack(images_list, dim=0) for shape, images_list in grouped_images.items()}
|
| 167 |
+
|
| 168 |
+
return grouped_images, grouped_images_index
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def reorder_images(
|
| 172 |
+
processed_images: dict[tuple[int, int], "torch.Tensor"],
|
| 173 |
+
grouped_images_index: dict[Union[int, tuple[int, int]], tuple[tuple[int, int], int]],
|
| 174 |
+
is_nested: bool = False,
|
| 175 |
+
) -> Union[list["torch.Tensor"], "torch.Tensor"]:
|
| 176 |
+
if not is_nested:
|
| 177 |
+
return [
|
| 178 |
+
processed_images[grouped_images_index[i][0]][grouped_images_index[i][1]]
|
| 179 |
+
for i in range(len(grouped_images_index))
|
| 180 |
+
]
|
| 181 |
+
|
| 182 |
+
return _reconstruct_nested_structure(grouped_images_index, processed_images)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
class OpenPanguVLImageProcessorFast(Qwen2VLImageProcessorFast):
|
| 186 |
+
temporal_patch_size = 1
|
| 187 |
+
min_pxl = 28
|
| 188 |
+
min_edge = 56
|
| 189 |
+
dtype = torch.bfloat16
|
| 190 |
+
|
| 191 |
+
def _prepare_input_images(
|
| 192 |
+
self,
|
| 193 |
+
images,
|
| 194 |
+
do_convert_rgb,
|
| 195 |
+
input_data_format,
|
| 196 |
+
device,
|
| 197 |
+
) -> list["torch.Tensor"]:
|
| 198 |
+
"""
|
| 199 |
+
Prepare the input images for processing.
|
| 200 |
+
"""
|
| 201 |
+
images = self._prepare_images_structure(images)
|
| 202 |
+
process_image_fn = partial(
|
| 203 |
+
self._process_image,
|
| 204 |
+
do_convert_rgb=do_convert_rgb,
|
| 205 |
+
input_data_format=input_data_format,
|
| 206 |
+
device=device,
|
| 207 |
+
)
|
| 208 |
+
|
| 209 |
+
processed_images = []
|
| 210 |
+
for image in images:
|
| 211 |
+
if image.size[0] <= OpenPanguVLImageProcessorFast.min_pxl or image.size[1] <= OpenPanguVLImageProcessorFast.min_pxl:
|
| 212 |
+
|
| 213 |
+
if image.size[0] >= image.size[1]:
|
| 214 |
+
aspect_ratio = OpenPanguVLImageProcessorFast.min_edge * 1.0 / image.size[1]
|
| 215 |
+
new_image_height = OpenPanguVLImageProcessorFast.min_edge
|
| 216 |
+
new_image_width = int(aspect_ratio * image.size[0])
|
| 217 |
+
else:
|
| 218 |
+
aspect_ratio = OpenPanguVLImageProcessorFast.min_edge * 1.0 / image.size[0]
|
| 219 |
+
new_image_height = int(aspect_ratio * image.size[1])
|
| 220 |
+
new_image_width = OpenPanguVLImageProcessorFast.min_edge
|
| 221 |
+
image = image.resize((new_image_width, new_image_height))
|
| 222 |
+
|
| 223 |
+
processed_images.append(process_image_fn(image))
|
| 224 |
+
return processed_images
|
| 225 |
+
|
| 226 |
+
def preprocess(
|
| 227 |
+
self,
|
| 228 |
+
images = None,
|
| 229 |
+
videos = None,
|
| 230 |
+
do_resize = None,
|
| 231 |
+
size = None,
|
| 232 |
+
resample = None,
|
| 233 |
+
do_rescale = None,
|
| 234 |
+
rescale_factor = None,
|
| 235 |
+
do_normalize = None,
|
| 236 |
+
image_mean = None,
|
| 237 |
+
image_std = None,
|
| 238 |
+
min_pixels = None,
|
| 239 |
+
max_pixels = None,
|
| 240 |
+
patch_size = None,
|
| 241 |
+
temporal_patch_size = None,
|
| 242 |
+
merge_size = None,
|
| 243 |
+
do_convert_rgb = None,
|
| 244 |
+
return_tensors = None,
|
| 245 |
+
data_format = ChannelDimension.FIRST,
|
| 246 |
+
input_data_format = None,
|
| 247 |
+
device = None,
|
| 248 |
+
disable_grouping = False,
|
| 249 |
+
**kwargs,
|
| 250 |
+
):
|
| 251 |
+
temporal_patch_size=OpenPanguVLImageProcessorFast.temporal_patch_size
|
| 252 |
+
params = self._resolve_preprocess_params(
|
| 253 |
+
do_resize=do_resize,
|
| 254 |
+
size=size,
|
| 255 |
+
min_pixels=min_pixels,
|
| 256 |
+
max_pixels=max_pixels,
|
| 257 |
+
resample=resample,
|
| 258 |
+
do_rescale=do_rescale,
|
| 259 |
+
rescale_factor=rescale_factor,
|
| 260 |
+
do_normalize=do_normalize,
|
| 261 |
+
image_mean=image_mean,
|
| 262 |
+
image_std=image_std,
|
| 263 |
+
patch_size=patch_size,
|
| 264 |
+
temporal_patch_size=temporal_patch_size,
|
| 265 |
+
merge_size=merge_size,
|
| 266 |
+
do_convert_rgb=do_convert_rgb,
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
data = self._process_images(
|
| 270 |
+
images,
|
| 271 |
+
params,
|
| 272 |
+
input_data_format,
|
| 273 |
+
device,
|
| 274 |
+
disable_grouping,
|
| 275 |
+
return_tensors
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
return data
|
| 279 |
+
|
| 280 |
+
def _resolve_preprocess_params(self, **kwargs):
|
| 281 |
+
params = SimpleNamespace()
|
| 282 |
+
for key, value in kwargs.items():
|
| 283 |
+
setattr(params, key, value if value is not None else getattr(self, key))
|
| 284 |
+
if params.size is None:
|
| 285 |
+
params.size = {"shortest_edge": params.min_pixels, "longest_edge": params.max_pixels}
|
| 286 |
+
params.size = SizeDict(**params.size)
|
| 287 |
+
params.image_mean = tuple(params.image_mean) if params.image_mean else None
|
| 288 |
+
params.image_std = tuple(params.image_std) if params.image_std else None
|
| 289 |
+
return params
|
| 290 |
+
|
| 291 |
+
def _process_images(self, images, params, input_data_format, device, disable_grouping, return_tensors):
|
| 292 |
+
images = make_flat_list_of_images(images)
|
| 293 |
+
if not valid_images(images):
|
| 294 |
+
raise ValueError("Invalid image type.")
|
| 295 |
+
|
| 296 |
+
images = self._prepare_input_images(
|
| 297 |
+
images=images,
|
| 298 |
+
do_convert_rgb=params.do_convert_rgb,
|
| 299 |
+
input_data_format=input_data_format,
|
| 300 |
+
device=device,
|
| 301 |
+
)
|
| 302 |
+
|
| 303 |
+
data = self._preprocess(
|
| 304 |
+
images=images,
|
| 305 |
+
do_resize=params.do_resize,
|
| 306 |
+
size=params.size,
|
| 307 |
+
interpolation=pil_torch_interpolation_mapping.get(params.resample, params.resample),
|
| 308 |
+
do_rescale=params.do_rescale,
|
| 309 |
+
rescale_factor=params.rescale_factor,
|
| 310 |
+
do_normalize=params.do_normalize,
|
| 311 |
+
image_mean=params.image_mean,
|
| 312 |
+
image_std=params.image_std,
|
| 313 |
+
patch_size=params.patch_size,
|
| 314 |
+
temporal_patch_size=params.temporal_patch_size,
|
| 315 |
+
merge_size=params.merge_size,
|
| 316 |
+
do_convert_rgb=params.do_convert_rgb,
|
| 317 |
+
input_data_format=input_data_format,
|
| 318 |
+
device=device,
|
| 319 |
+
disable_grouping=disable_grouping,
|
| 320 |
+
return_tensors=return_tensors,
|
| 321 |
+
)
|
| 322 |
+
|
| 323 |
+
return data
|
| 324 |
+
|
| 325 |
+
def _preprocess(
|
| 326 |
+
self,
|
| 327 |
+
images: list["torch.Tensor"],
|
| 328 |
+
do_resize: bool,
|
| 329 |
+
size: SizeDict,
|
| 330 |
+
interpolation: Optional["F.InterpolationMode"],
|
| 331 |
+
do_rescale: bool,
|
| 332 |
+
rescale_factor: float,
|
| 333 |
+
do_normalize: bool,
|
| 334 |
+
image_mean: Optional[Union[float, list[float]]],
|
| 335 |
+
image_std: Optional[Union[float, list[float]]],
|
| 336 |
+
patch_size: int,
|
| 337 |
+
temporal_patch_size: int,
|
| 338 |
+
merge_size: int,
|
| 339 |
+
disable_grouping: Optional[bool],
|
| 340 |
+
return_tensors,
|
| 341 |
+
**kwargs,
|
| 342 |
+
):
|
| 343 |
+
# Group images by size for batched resizing
|
| 344 |
+
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
|
| 345 |
+
resized_images_grouped = {}
|
| 346 |
+
for shape, stacked_images in grouped_images.items():
|
| 347 |
+
height, width = stacked_images.shape[-2:]
|
| 348 |
+
if do_resize:
|
| 349 |
+
resized_height, resized_width = smart_resize(
|
| 350 |
+
height,
|
| 351 |
+
width,
|
| 352 |
+
factor=patch_size * merge_size,
|
| 353 |
+
min_pixels=size["shortest_edge"],
|
| 354 |
+
max_pixels=size["longest_edge"],
|
| 355 |
+
)
|
| 356 |
+
stacked_images = self.resize(
|
| 357 |
+
image=stacked_images,
|
| 358 |
+
size=SizeDict(height=resized_height, width=resized_width),
|
| 359 |
+
interpolation=interpolation,
|
| 360 |
+
)
|
| 361 |
+
resized_images_grouped[shape] = stacked_images
|
| 362 |
+
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
|
| 363 |
+
|
| 364 |
+
# Group images by size for further processing
|
| 365 |
+
# Needed in case do_resize is False, or resize returns images with different sizes
|
| 366 |
+
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
|
| 367 |
+
processed_images_grouped = {}
|
| 368 |
+
processed_grids = {}
|
| 369 |
+
for shape, stacked_images in grouped_images.items():
|
| 370 |
+
resized_height, resized_width = stacked_images.shape[-2:]
|
| 371 |
+
# Fused rescale and normalize
|
| 372 |
+
# patches = rescale_and_normalize(
|
| 373 |
+
# stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
|
| 374 |
+
# )
|
| 375 |
+
patches = stacked_images
|
| 376 |
+
if patches.ndim == 4:
|
| 377 |
+
# add a temporal dimension if we have images
|
| 378 |
+
patches = patches.unsqueeze(1)
|
| 379 |
+
if patches.shape[1] % temporal_patch_size != 0:
|
| 380 |
+
repeats = patches[:, -1:].repeat(1, temporal_patch_size - 1, 1, 1, 1)
|
| 381 |
+
patches = torch.cat([patches, repeats], dim=1)
|
| 382 |
+
batch_size, grid_t, channel = patches.shape[:3]
|
| 383 |
+
grid_t = grid_t // temporal_patch_size
|
| 384 |
+
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
|
| 385 |
+
|
| 386 |
+
patches = patches.view(
|
| 387 |
+
batch_size,
|
| 388 |
+
grid_t,
|
| 389 |
+
temporal_patch_size,
|
| 390 |
+
channel,
|
| 391 |
+
grid_h // merge_size,
|
| 392 |
+
merge_size,
|
| 393 |
+
patch_size,
|
| 394 |
+
grid_w // merge_size,
|
| 395 |
+
merge_size,
|
| 396 |
+
patch_size,
|
| 397 |
+
)
|
| 398 |
+
# Reorder dimensions to group grid and patch information for subsequent flattening.
|
| 399 |
+
# (batch, grid_t, grid_h, grid_w, merge_h, merge_w, channel, temp_patch_size, patch_h, patch_w)
|
| 400 |
+
patches = patches.permute(0, 1, 4, 7, 5, 8, 3, 2, 6, 9)
|
| 401 |
+
flatten_patches = patches.reshape(
|
| 402 |
+
batch_size,
|
| 403 |
+
grid_t * grid_h * grid_w,
|
| 404 |
+
channel * temporal_patch_size * patch_size * patch_size,
|
| 405 |
+
)
|
| 406 |
+
|
| 407 |
+
processed_images_grouped[shape] = flatten_patches
|
| 408 |
+
processed_grids[shape] = [[grid_t, grid_h, grid_w]] * batch_size
|
| 409 |
+
|
| 410 |
+
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
|
| 411 |
+
processed_grids = reorder_images(processed_grids, grouped_images_index)
|
| 412 |
+
pixel_values = torch.cat(processed_images, dim=0)
|
| 413 |
+
image_grid_thw = torch.tensor(processed_grids)
|
| 414 |
+
|
| 415 |
+
return BatchFeature(
|
| 416 |
+
data={"pixel_values": pixel_values,
|
| 417 |
+
"image_grid_thw": image_grid_thw}, tensor_type=return_tensors
|
| 418 |
+
)
|
inference/generate.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import AutoProcessor
|
| 2 |
+
from transformers import AutoModelForCausalLM
|
| 3 |
+
from qwen_vl_utils import process_vision_info
|
| 4 |
+
|
| 5 |
+
model_path="../"
|
| 6 |
+
|
| 7 |
+
print(f"LOAD MODEL FROM: {model_path}")
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
key_mapping = {
|
| 12 |
+
"^visual": "model.visual",
|
| 13 |
+
r"^model(?!\.(language_model|visual))": "model.language_model",
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 18 |
+
model_path,
|
| 19 |
+
trust_remote_code=True,
|
| 20 |
+
torch_dtype='auto',
|
| 21 |
+
key_mapping=key_mapping).eval().cuda()
|
| 22 |
+
|
| 23 |
+
conversation = [
|
| 24 |
+
{
|
| 25 |
+
"role": "system",
|
| 26 |
+
"content": [
|
| 27 |
+
{"type": "text", "text": "你是华为公司开发的多模态大模型,名字是openPangu-VL-7B。你能够处理文本和视觉模态的输入,并给出文本输出。"},
|
| 28 |
+
]
|
| 29 |
+
},
|
| 30 |
+
{
|
| 31 |
+
"role": "user",
|
| 32 |
+
"content": [
|
| 33 |
+
{"type": "text", "text": "你好,你是谁?"},
|
| 34 |
+
]
|
| 35 |
+
}
|
| 36 |
+
]
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True)
|
| 40 |
+
text = processor.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
|
| 41 |
+
|
| 42 |
+
image_inputs, video_inputs = process_vision_info(conversation)
|
| 43 |
+
|
| 44 |
+
inputs = processor(
|
| 45 |
+
text=[text],
|
| 46 |
+
images=image_inputs,
|
| 47 |
+
videos=video_inputs,
|
| 48 |
+
padding=False,
|
| 49 |
+
return_tensors="pt",
|
| 50 |
+
)
|
| 51 |
+
inputs = inputs.to(model.device)
|
| 52 |
+
|
| 53 |
+
generated_ids = model.generate(**inputs, max_new_tokens=128)
|
| 54 |
+
generated_ids_trimmed = [out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]
|
| 55 |
+
res = processor.batch_decode(
|
| 56 |
+
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
| 57 |
+
)
|
| 58 |
+
print(f"OUTPUT: {res}")
|
inference/requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
torch==2.5.1
|
| 2 |
+
torch_npu==2.5.1
|
| 3 |
+
transformers==4.53.2
|
| 4 |
+
qwen_vl_utils==0.0.14
|
inference/vllm_ascend/examples/quick_start.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import base64
|
| 3 |
+
import os
|
| 4 |
+
import requests
|
| 5 |
+
import json
|
| 6 |
+
from PIL import Image, ImageDraw, ImageFont
|
| 7 |
+
import ast
|
| 8 |
+
import re
|
| 9 |
+
def encode_image_to_base64(img_path):
|
| 10 |
+
"""
|
| 11 |
+
:param img_path: image path
|
| 12 |
+
:return: Base64 encoding
|
| 13 |
+
"""
|
| 14 |
+
try:
|
| 15 |
+
with open(img_path, 'rb') as img_file:
|
| 16 |
+
img_data = img_file.read()
|
| 17 |
+
base64_str = base64.b64encode(img_data).decode('utf-8')
|
| 18 |
+
return base64_str
|
| 19 |
+
except Exception as e:
|
| 20 |
+
print(f"Encoding Error: {e}")
|
| 21 |
+
return None
|
| 22 |
+
|
| 23 |
+
def infer_image_with_api(image_path, prompt):
|
| 24 |
+
url = "http://127.0.0.1:8000/v1/chat/completions"
|
| 25 |
+
base64_image = encode_image_to_base64(image_path)
|
| 26 |
+
payload = json.dumps({
|
| 27 |
+
"messages": [
|
| 28 |
+
{
|
| 29 |
+
"role": "system",
|
| 30 |
+
"content": [
|
| 31 |
+
{"type": "text", "text": "你是华为公司开发的多模态大模型,名字是openPangu-VL-7B。你能够处理文本和视觉模态的输入,并给出文本输出。"},
|
| 32 |
+
]
|
| 33 |
+
},
|
| 34 |
+
{
|
| 35 |
+
"role": "user",
|
| 36 |
+
"content": [
|
| 37 |
+
{"type": "image_url", "image_url": {"url": f"data:image/jpg;base64,{base64_image }"}},
|
| 38 |
+
{"type": "text", "text": prompt},
|
| 39 |
+
]
|
| 40 |
+
}
|
| 41 |
+
],
|
| 42 |
+
"model": "pangu_vl",
|
| 43 |
+
"max_tokens": 2048,
|
| 44 |
+
"temperature": 0,
|
| 45 |
+
"stream": False,
|
| 46 |
+
|
| 47 |
+
})
|
| 48 |
+
|
| 49 |
+
headers = {
|
| 50 |
+
'Content-Type': 'application/json'
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
response = requests.request("POST", url, headers=headers, data=payload)
|
| 54 |
+
return json.loads(response.text)["choices"][0]["message"]["content"]
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def infer_message_with_api(prompt):
|
| 58 |
+
url = "http://127.0.0.1:8000/v1/chat/completions"
|
| 59 |
+
payload = json.dumps({
|
| 60 |
+
"messages": [
|
| 61 |
+
{
|
| 62 |
+
"role": "system",
|
| 63 |
+
"content": [
|
| 64 |
+
{"type": "text", "text": "你是华为公司开发的多模态大模型,名字是openPangu-VL-7B。你能够处理文本和视觉模态的输入,并给出文本输出。"},
|
| 65 |
+
]
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"role": "user",
|
| 69 |
+
"content": [
|
| 70 |
+
{"type": "text", "text": prompt},
|
| 71 |
+
]
|
| 72 |
+
}
|
| 73 |
+
],
|
| 74 |
+
"model": "pangu_vl",
|
| 75 |
+
"max_tokens": 2048,
|
| 76 |
+
"temperature": 0,
|
| 77 |
+
"stream": False,
|
| 78 |
+
|
| 79 |
+
})
|
| 80 |
+
|
| 81 |
+
headers = {
|
| 82 |
+
'Content-Type': 'application/json'
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
response = requests.request("POST", url, headers=headers, data=payload)
|
| 86 |
+
return json.loads(response.text)["choices"][0]["message"]["content"]
|
| 87 |
+
|
| 88 |
+
res = infer_message_with_api("你好,你是谁?")
|
| 89 |
+
print(res)
|