Delete vidyut_model.py
Browse files- vidyut_model.py +0 -35
vidyut_model.py
DELETED
|
@@ -1,35 +0,0 @@
|
|
| 1 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
| 2 |
-
import torch
|
| 3 |
-
|
| 4 |
-
# ===== White-label wrapper =====
|
| 5 |
-
class VidyutForCausalLM(AutoModelForCausalLM):
|
| 6 |
-
"""
|
| 7 |
-
Fully white-labeled wrapper.
|
| 8 |
-
Internally uses Mistral, but nothing exposed to users.
|
| 9 |
-
"""
|
| 10 |
-
def __init__(self, *args, **kwargs):
|
| 11 |
-
super().__init__(*args, **kwargs)
|
| 12 |
-
# Optionally override config display name
|
| 13 |
-
if hasattr(self.config, "architecture_name"):
|
| 14 |
-
self.config.name = "Vidyut"
|
| 15 |
-
|
| 16 |
-
def load_vidyut(model_name="rapnss/VIA-01", device="auto"):
|
| 17 |
-
"""
|
| 18 |
-
Loads VIA-01 as Vidyut.
|
| 19 |
-
Users never see Mistral.
|
| 20 |
-
"""
|
| 21 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 22 |
-
model = VidyutForCausalLM.from_pretrained(
|
| 23 |
-
model_name,
|
| 24 |
-
torch_dtype=torch.float16,
|
| 25 |
-
device_map=device
|
| 26 |
-
)
|
| 27 |
-
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 28 |
-
return tokenizer, model, pipe
|
| 29 |
-
|
| 30 |
-
# ===== Example usage =====
|
| 31 |
-
if __name__ == "__main__":
|
| 32 |
-
tokenizer, model, pipe = load_vidyut()
|
| 33 |
-
prompt = "Write a Python function to sort a list:"
|
| 34 |
-
output = pipe(prompt, max_new_tokens=15)[0]['generated_text']
|
| 35 |
-
print(output)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|