tigeryfan commited on
Commit
6a05b07
·
verified ·
1 Parent(s): 62f13b1

Update backend.py

Browse files
Files changed (1) hide show
  1. backend.py +9 -7
backend.py CHANGED
@@ -6,23 +6,25 @@ from huggingface_hub import login
6
  access_token=os.getenv('hf_gated_model_access_token')
7
  login(token=access_token)
8
 
9
- prompt="""You are a helpful assistant that converts text into .ics files. You are to not respond in anything other than the raw code for .ics files, and you are not to respond with markdown backticks. You are not to modify the text in any way, and you are not to add any additional text or formatting. Your response should be a valid .ics file content that can be used to create calendar events. In the event that you refuse to answer, you should return "null", without the quotes. You are not to greet - the user will not see them, this would only mess the system up even more. The text you will convert starts below:\n\n"""
10
- messages = [
11
- {"role": "system", "content": prompt},
12
- ]
13
-
14
  def load_model():
 
15
  # tokenizer = AutoTokenizer.from_pretrained("google/gemma-3-270m-it")
16
  # model = AutoModelForCausalLM.from_pretrained("google/gemma-3-270m-it")
17
  # return pipeline("text2text-generation", model=model, tokenizer=tokenizer)
 
 
 
 
 
18
 
19
  pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-3B-Instruct")
20
  return pipe
21
 
22
- pipe = load_model()
23
-
24
  def generate_text(text):
 
 
25
  # output = pipe(prompt + text)[0]["generated_text"]
 
26
  messages += {"role": "user", "content": text}
27
  output = pipe(messages)
28
  if output == "null":
 
6
  access_token=os.getenv('hf_gated_model_access_token')
7
  login(token=access_token)
8
 
 
 
 
 
 
9
  def load_model():
10
+ global messages
11
  # tokenizer = AutoTokenizer.from_pretrained("google/gemma-3-270m-it")
12
  # model = AutoModelForCausalLM.from_pretrained("google/gemma-3-270m-it")
13
  # return pipeline("text2text-generation", model=model, tokenizer=tokenizer)
14
+
15
+ prompt="""You are a helpful assistant that converts text into .ics files. You are to not respond in anything other than the raw code for .ics files, and you are not to respond with markdown backticks. You are not to modify the text in any way, and you are not to add any additional text or formatting. Your response should be a valid .ics file content that can be used to create calendar events. In the event that you refuse to answer, you should return "null", without the quotes. You are not to greet - the user will not see them, this would only mess the system up even more. The text you will convert starts below:\n\n"""
16
+ messages = [
17
+ {"role": "system", "content": prompt},
18
+ ]
19
 
20
  pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-3B-Instruct")
21
  return pipe
22
 
 
 
23
  def generate_text(text):
24
+ global messages
25
+
26
  # output = pipe(prompt + text)[0]["generated_text"]
27
+ pipe = load_model()
28
  messages += {"role": "user", "content": text}
29
  output = pipe(messages)
30
  if output == "null":