File size: 4,650 Bytes
2ff82ee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import numpy as np
from gpu_arch import Chip
from ai import AIAccelerator
from custom_vram import CustomVRAM
from PIL import Image
import requests

def test_ai_integration():
    print("\n--- Testing AI Integration ---")

    # Test 1: Model Loading (Florence-2 model)
    print("\nTest 1: Model Loading (Florence-2)")
    try:
        # Initialize a Chip for model loading
        chip_for_loading = Chip(chip_id=0, vram_size_gb=10)
        ai_accelerator_for_loading = chip_for_loading.ai_accelerator

        # Load BLIP-2 Large model and processor using Hugging Face Auto classes
        from transformers import Blip2ForConditionalGeneration, Blip2Processor
        model_id = "Salesforce/blip2-flan-t5-xxl"
        model = Blip2ForConditionalGeneration.from_pretrained(model_id, device_map="auto")
        processor = Blip2Processor.from_pretrained(model_id)

        ai_accelerator_for_loading.load_model(model_id, model, processor)
        print(f"Model '{model_id}' loaded successfully on chip 0.")
        assert ai_accelerator_for_loading.has_model(model_id), "Model not found in registry after loading."

    except Exception as e:
        print(f"Model loading test failed: {e}")
        return
    # Test 2: Multi-Chip Inference (on all images in sample_task folder)
    print("\nTest 2: Multi-Chip Inference (Florence-2, all images in sample_task)")
    import os
    num_chips = 1 # You can increase this if you want to test with more chips
    chips = []
    ai_accelerators = []

    try:
        # Initialize multiple chips and their AI accelerators
        for i in range(num_chips):
            chip = Chip(chip_id=i, vram_size_gb=1)
            chips.append(chip)
            ai_accelerators.append(chip.ai_accelerator)
            ai_accelerators[i].load_model(model_id, model, processor)
            print(f"Model '{model_id}' loaded successfully on chip {i}.")

        # Get all image files in sample_task folder
        image_folder = os.path.join(os.path.dirname(__file__), '..', 'sample_task')
        image_files = [f for f in os.listdir(image_folder) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.gif'))]
        image_files.sort()
        if not image_files:
            print("No images found in sample_task folder.")
            return

        # Perform inference on each image using all chips
        import time
        for img_name in image_files:
            img_path = os.path.join(image_folder, img_name)
            raw_image = Image.open(img_path).convert('RGB')
            print(f"\nRunning inference for image: {img_name}")
            for i, accelerator in enumerate(ai_accelerators):
                print(f"Performing inference on chip {i}...")
                start_time = time.time()
                result = accelerator.inference(model_id, raw_image)
                elapsed = time.time() - start_time
                print(f"Inference result from chip {i} on {img_name}: {result}")
                print(f"Inference time for chip {i} on {img_name}: {elapsed:.3f} seconds")
                assert result is not None, f"Inference returned None for chip {i} on {img_name}."
                assert isinstance(result, str), f"Inference result from chip {i} on {img_name} is not a string."
        print("Multi-chip inference test on all images successful.")

    except Exception as e:
        print(f"Multi-chip inference test failed: {e}")
        return
        return

    # Test 3: Matrix Operations (using CustomVRAM) - still on a single chip
    # print("\nTest 3: Matrix Operations (using CustomVRAM)")
    # try:
    #     matrix_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
    #     matrix_b = np.array([[5, 6], [7, 8]], dtype=np.float32)

    #     matrix_a_id = ai_accelerator_for_loading.load_matrix(matrix_a, "matrix_A")
    #     matrix_b_id = ai_accelerator_for_loading.load_matrix(matrix_b, "matrix_B")

    #     result_matrix_id = ai_accelerator_for_loading.matrix_multiply(matrix_a_id, matrix_b_id, "result_C")
    #     result_matrix = ai_accelerator_for_loading.get_matrix(result_matrix_id)

    #     print(f"Matrix A:\n{matrix_a}")
    #     print(f"Matrix B:\n{matrix_b}")
    #     print(f"Result Matrix C:\n{result_matrix}")

    #     expected_result = np.dot(matrix_a, matrix_b)
    #     assert np.array_equal(result_matrix, expected_result), "Matrix multiplication result incorrect."
    #     print("Matrix operations test successful.")

    # except Exception as e:
    #     print(f"Matrix operations test failed: {e}")
    #     return

    print("\n--- All AI Integration Tests Completed ---")

if __name__ == "__main__":
    test_ai_integration()