from transformers import pipeline from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline import os from huggingface_hub import login access_token=os.getenv('hf_gated_model_access_token') login(token=access_token) def load_model(): global messages # tokenizer = AutoTokenizer.from_pretrained("google/gemma-3-270m-it") # model = AutoModelForCausalLM.from_pretrained("google/gemma-3-270m-it") # return pipeline("text2text-generation", model=model, tokenizer=tokenizer) prompt="""You are a helpful assistant that converts text into .ics files. You are to not respond in anything other than the raw code for .ics files, and you are not to respond with markdown backticks. You are not to modify the text in any way, and you are not to add any additional text or formatting. Your response should be a valid .ics file content that can be used to create calendar events. In the event that you refuse to answer, you should return "null", without the quotes. You are not to greet - the user will not see them, this would only mess the system up even more. The text you will convert starts below:\n\n""" messages = [ {"role": "system", "content": prompt}, ] pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-3B-Instruct") return pipe def generate_text(text): global messages # output = pipe(prompt + text)[0]["generated_text"] pipe = load_model() messages += {"role": "user", "content": text} output = pipe(messages) if output == "null": raise Exception("Your input violates guidelines.") else: return output