Paulwalker4884 commited on
Commit
68780f0
·
1 Parent(s): 3948567

Initial commit

Browse files
Files changed (1) hide show
  1. app.py +7 -9
app.py CHANGED
@@ -20,15 +20,15 @@ if not hf_token:
20
  login(token=hf_token)
21
  logger.debug("Hugging Face login successful")
22
 
23
- # لود مدل Gemma 3 4B
24
- logger.debug("Loading google/gemma-3-4b-it model")
25
  try:
26
- tokenizer = AutoTokenizer.from_pretrained("google/gemma-3-4b-it", cache_dir="/tmp/hf_cache")
27
- model = AutoModelForCausalLM.from_pretrained("google/gemma-3-4b-it", cache_dir="/tmp/hf_cache", torch_dtype=torch.bfloat16, device_map="auto")
28
  code_gen = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, temperature=0.6, top_p=0.95)
29
- logger.debug("google/gemma-3-4b-it model loaded")
30
  except Exception as e:
31
- logger.error(f"Failed to load Gemma 3 model: {e}")
32
  raise
33
 
34
  # آزادسازی حافظه
@@ -71,7 +71,7 @@ def save_to_memory(user_input, task_type, prompt, response):
71
  logger.debug("Saved to DB")
72
  except Exception as e:
73
  logger.error(f"DB Save Error: {e}")
74
- pass # نذار خطای دیتابیس جلوی ادامه کار رو بگیره
75
 
76
  # دریافت تاریخچه
77
  def get_history():
@@ -146,12 +146,10 @@ def christopher(user_input, language, show_history):
146
  logger.debug(f"Processing input: {user_input}, language: {language}, show_history: {show_history}")
147
  try:
148
  task_type = "نوشتن کد کامل"
149
- # تولید پرامپت و کد
150
  prompt, response = generate_code(user_input, language)
151
  if "error" in response.lower():
152
  history = get_history() if show_history else ""
153
  return response, history
154
- # ذخیره تو دیتابیس (اختیاری)
155
  save_to_memory(user_input, task_type, prompt, response)
156
  history = get_history() if show_history else ""
157
  return response, history
 
20
  login(token=hf_token)
21
  logger.debug("Hugging Face login successful")
22
 
23
+ # لود مدل Gemma 3 1B
24
+ logger.debug("Loading google/gemma-3-1b-it model")
25
  try:
26
+ tokenizer = AutoTokenizer.from_pretrained("google/gemma-3-1b-it", cache_dir="/tmp/hf_cache")
27
+ model = AutoModelForCausalLM.from_pretrained("google/gemma-3-1b-it", cache_dir="/tmp/hf_cache", torch_dtype=torch.bfloat16, device_map="auto")
28
  code_gen = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, temperature=0.6, top_p=0.95)
29
+ logger.debug("google/gemma-3-1b-it model loaded")
30
  except Exception as e:
31
+ logger.error(f"Failed to load Gemma 3 1B model: {e}")
32
  raise
33
 
34
  # آزادسازی حافظه
 
71
  logger.debug("Saved to DB")
72
  except Exception as e:
73
  logger.error(f"DB Save Error: {e}")
74
+ pass
75
 
76
  # دریافت تاریخچه
77
  def get_history():
 
146
  logger.debug(f"Processing input: {user_input}, language: {language}, show_history: {show_history}")
147
  try:
148
  task_type = "نوشتن کد کامل"
 
149
  prompt, response = generate_code(user_input, language)
150
  if "error" in response.lower():
151
  history = get_history() if show_history else ""
152
  return response, history
 
153
  save_to_memory(user_input, task_type, prompt, response)
154
  history = get_history() if show_history else ""
155
  return response, history