NLP-RAG / models /llama_3_8b.py
Qar-Raz's picture
hf-space: deploy branch without frontend/data/results
c7256ee
raw
history blame contribute delete
852 Bytes
from huggingface_hub import InferenceClient
class Llama3_8B:
def __init__(self, token):
self.client = InferenceClient(token=token)
self.model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
def generate_stream(self, prompt, max_tokens=1500, temperature=0.1):
for message in self.client.chat_completion(
model=self.model_id,
messages=[{"role": "user", "content": prompt}],
max_tokens=max_tokens,
temperature=temperature,
stream=True,
):
if message.choices:
content = message.choices[0].delta.content
if content:
yield content
def generate(self, prompt, max_tokens=500, temperature=0.1):
return "".join(self.generate_stream(prompt, max_tokens=max_tokens, temperature=temperature))