File size: 1,177 Bytes
9d5b280
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
from benchmarking import BenchmarkEvaluator

def main():
    # Configuration
    model_name = "aaditya/Llama3-OpenBioLLM-8B"
    FILE_PATH = "_Benchmarking_DB.json"

    # Initialize evaluator
    evaluator = BenchmarkEvaluator(model_name, model_type)

    try:
        # Run benchmark
        results = evaluator.run_benchmark(FILE_PATH)
        
        # Print results
        print("\nBenchmark Results:")
        print(f"Accuracy: {results['accuracy']:.2%}")
        print(f"Total samples: {results['total_samples']}")
        print(f"Processed samples: {results['processed_samples']}")
        
    except Exception as e:
        logger.error(f"Error during benchmarking: {e}")

if __name__ == "__main__":
    main()


        # instruct_client = VLLMClient(
    #     model_path="meta-llama/Llama-3.2-3B-Instruct",
    #     model_type=ModelType.INSTRUCT
    # )
    
    # # Single prompt with instruct model
    # response = instruct_client.send_message(
    #     system="You are a helpful assistant.",
    #     content="What is the capital of France?",
    #     max_tokens=16000,
    #     temperature=0.0
    # )
    # print("Instruct model response:", response)