hamedbabaeigiglou commited on
Commit
ee5cd87
·
verified ·
1 Parent(s): 7f932dd

minor update to readme

Browse files
Files changed (1) hide show
  1. README.md +27 -11
README.md CHANGED
@@ -58,34 +58,50 @@ ontology.load()
58
  data = ontology.extract()
59
  ```
60
 
 
61
  **How use the loaded dataset for LLM4OL Paradigm task settings?**
62
  ``` python
 
63
  from ontolearner import BIBFRAME, LearnerPipeline, train_test_split
64
 
 
65
  ontology = BIBFRAME()
66
- ontology.load()
67
  data = ontology.extract()
68
 
69
  # Split into train and test sets
70
- train_data, test_data = train_test_split(data, test_size=0.2)
71
 
72
- # Create a learning pipeline (for RAG-based learning)
 
73
  pipeline = LearnerPipeline(
74
- task = "term-typing", # Other options: "taxonomy-discovery" or "non-taxonomy-discovery"
75
- retriever_id = "sentence-transformers/all-MiniLM-L6-v2",
76
- llm_id = "mistralai/Mistral-7B-Instruct-v0.1",
77
- hf_token = "your_huggingface_token" # Only needed for gated models
 
78
  )
79
 
80
- # Train and evaluate
81
- results, metrics = pipeline.fit_predict_evaluate(
82
  train_data=train_data,
83
  test_data=test_data,
84
- top_k=3,
85
- test_limit=10
 
86
  )
 
 
 
 
 
 
 
 
 
87
  ```
88
 
 
89
  For more detailed documentation, see the [![Documentation](https://img.shields.io/badge/Documentation-ontolearner.readthedocs.io-blue)](https://ontolearner.readthedocs.io)
90
 
91
 
 
58
  data = ontology.extract()
59
  ```
60
 
61
+
62
  **How use the loaded dataset for LLM4OL Paradigm task settings?**
63
  ``` python
64
+ # Import core modules from the OntoLearner library
65
  from ontolearner import BIBFRAME, LearnerPipeline, train_test_split
66
 
67
+ # Load the BIBFRAME ontology, which contains concepts related to wines, their properties, and categories
68
  ontology = BIBFRAME()
69
+ ontology.load() # Load entities, types, and structured term annotations from the ontology
70
  data = ontology.extract()
71
 
72
  # Split into train and test sets
73
+ train_data, test_data = train_test_split(data, test_size=0.2, random_state=42)
74
 
75
+ # Initialize a multi-component learning pipeline (retriever + LLM)
76
+ # This configuration enables a Retrieval-Augmented Generation (RAG) setup
77
  pipeline = LearnerPipeline(
78
+ retriever_id='sentence-transformers/all-MiniLM-L6-v2', # Dense retriever model for nearest neighbor search
79
+ llm_id='Qwen/Qwen2.5-0.5B-Instruct', # Lightweight instruction-tuned LLM for reasoning
80
+ hf_token='...', # Hugging Face token for accessing gated models
81
+ batch_size=32, # Batch size for training/prediction if supported
82
+ top_k=5 # Number of top retrievals to include in RAG prompting
83
  )
84
 
85
+ # Run the pipeline: training, prediction, and evaluation in one call
86
+ outputs = pipeline(
87
  train_data=train_data,
88
  test_data=test_data,
89
+ evaluate=True, # Compute metrics like precision, recall, and F1
90
+ task='term-typing' # Specifies the task
91
+ # Other options: "taxonomy-discovery" or "non-taxonomy-discovery"
92
  )
93
+
94
+ # Print final evaluation metrics
95
+ print("Metrics:", outputs['metrics'])
96
+
97
+ # Print the total time taken for the full pipeline execution
98
+ print("Elapsed time:", outputs['elapsed_time'])
99
+
100
+ # Print all outputs (including predictions)
101
+ print(outputs)
102
  ```
103
 
104
+
105
  For more detailed documentation, see the [![Documentation](https://img.shields.io/badge/Documentation-ontolearner.readthedocs.io-blue)](https://ontolearner.readthedocs.io)
106
 
107