samithcs commited on
Commit
ce80d21
·
verified ·
1 Parent(s): d97666e

llm folder added

Browse files
llm/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .model_registry import get_llm
llm/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (197 Bytes). View file
 
llm/__pycache__/llm_base.cpython-313.pyc ADDED
Binary file (691 Bytes). View file
 
llm/__pycache__/model_registry.cpython-313.pyc ADDED
Binary file (1.32 kB). View file
 
llm/llm_base.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ class LLMBase:
2
+ def generate(self, prompt: str, max_new_tokens: int = 200, **kwargs) -> str:
3
+ raise NotImplementedError("Must implement in subclass.")
llm/model_registry.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from llm.llm_base import LLMBase
2
+ from transformers import pipeline
3
+
4
+ class LocalLLM(LLMBase):
5
+ def __init__(self, model_name="TinyLlama/TinyLlama-1.1B-Chat-v1.0", device="cpu", **kwargs):
6
+ self.pipe = pipeline("text-generation", model=model_name, device=device, **kwargs)
7
+ def generate(self, prompt: str, max_new_tokens: int = 200, **kwargs) -> str:
8
+ result = self.pipe(prompt, max_new_tokens=max_new_tokens)
9
+ return result[0]["generated_text"]
10
+
11
+ def get_llm(model_name: str = "TinyLlama/TinyLlama-1.1B-Chat-v1.0", **kwargs):
12
+
13
+ return LocalLLM(model_name=model_name, **kwargs)