Paulwalker4884 commited on
Commit
6a9d6ae
·
1 Parent(s): 68780f0

Initial commit

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -20,15 +20,15 @@ if not hf_token:
20
  login(token=hf_token)
21
  logger.debug("Hugging Face login successful")
22
 
23
- # لود مدل Gemma 3 1B
24
- logger.debug("Loading google/gemma-3-1b-it model")
25
  try:
26
- tokenizer = AutoTokenizer.from_pretrained("google/gemma-3-1b-it", cache_dir="/tmp/hf_cache")
27
- model = AutoModelForCausalLM.from_pretrained("google/gemma-3-1b-it", cache_dir="/tmp/hf_cache", torch_dtype=torch.bfloat16, device_map="auto")
28
  code_gen = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, temperature=0.6, top_p=0.95)
29
- logger.debug("google/gemma-3-1b-it model loaded")
30
  except Exception as e:
31
- logger.error(f"Failed to load Gemma 3 1B model: {e}")
32
  raise
33
 
34
  # آزادسازی حافظه
 
20
  login(token=hf_token)
21
  logger.debug("Hugging Face login successful")
22
 
23
+ # لود مدل Qwen2-1.5B-Instruct
24
+ logger.debug("Loading Qwen/Qwen2-1.5B-Instruct model")
25
  try:
26
+ tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-1.5B-Instruct", cache_dir="/tmp/hf_cache")
27
+ model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-1.5B-Instruct", cache_dir="/tmp/hf_cache", torch_dtype=torch.bfloat16, device_map="auto")
28
  code_gen = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, temperature=0.6, top_p=0.95)
29
+ logger.debug("Qwen/Qwen2-1.5B-Instruct model loaded")
30
  except Exception as e:
31
+ logger.error(f"Failed to load Qwen2-1.5B-Instruct model: {e}")
32
  raise
33
 
34
  # آزادسازی حافظه