Spaces:
Sleeping
Sleeping
File size: 1,635 Bytes
7699942 42dba8e d3a24f2 7699942 42dba8e 6a05b07 2397933 6a05b07 2397933 42dba8e 7699942 6a05b07 2397933 6a05b07 2397933 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 |
from transformers import pipeline
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import os
from huggingface_hub import login
access_token=os.getenv('hf_gated_model_access_token')
login(token=access_token)
def load_model():
global messages
# tokenizer = AutoTokenizer.from_pretrained("google/gemma-3-270m-it")
# model = AutoModelForCausalLM.from_pretrained("google/gemma-3-270m-it")
# return pipeline("text2text-generation", model=model, tokenizer=tokenizer)
prompt="""You are a helpful assistant that converts text into .ics files. You are to not respond in anything other than the raw code for .ics files, and you are not to respond with markdown backticks. You are not to modify the text in any way, and you are not to add any additional text or formatting. Your response should be a valid .ics file content that can be used to create calendar events. In the event that you refuse to answer, you should return "null", without the quotes. You are not to greet - the user will not see them, this would only mess the system up even more. The text you will convert starts below:\n\n"""
messages = [
{"role": "system", "content": prompt},
]
pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-3B-Instruct")
return pipe
def generate_text(text):
global messages
# output = pipe(prompt + text)[0]["generated_text"]
pipe = load_model()
messages += {"role": "user", "content": text}
output = pipe(messages)
if output == "null":
raise Exception("Your input violates guidelines.")
else:
return output |