Paulwalker4884 commited on
Commit
712d810
·
1 Parent(s): ee33c3e

Initial commit

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -20,15 +20,15 @@ if not hf_token:
20
  login(token=hf_token)
21
  logger.debug("Hugging Face login successful")
22
 
23
- # لود مدل DeepSeek
24
- logger.debug("Loading deepseek/DeepSeek-Coder-6.7B-instruct model")
25
  try:
26
- tokenizer = AutoTokenizer.from_pretrained("deepseek/DeepSeek-Coder-6.7B-instruct", cache_dir="/tmp/hf_cache")
27
- model = AutoModelForCausalLM.from_pretrained("deepseek/DeepSeek-Coder-6.7B-instruct", cache_dir="/tmp/hf_cache", torch_dtype=torch.float16, device_map="auto")
28
  code_gen = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, temperature=0.6, top_p=0.95)
29
- logger.debug("deepseek/DeepSeek-Coder-6.7B-instruct model loaded")
30
  except Exception as e:
31
- logger.error(f"Failed to load DeepSeek model: {e}")
32
  raise
33
 
34
  # آزادسازی حافظه
 
20
  login(token=hf_token)
21
  logger.debug("Hugging Face login successful")
22
 
23
+ # لود مدل CodeLlama
24
+ logger.debug("Loading meta-llama/CodeLlama-7b-Instruct-hf model")
25
  try:
26
+ tokenizer = AutoTokenizer.from_pretrained("meta-llama/CodeLlama-7b-Instruct-hf", cache_dir="/tmp/hf_cache")
27
+ model = AutoModelForCausalLM.from_pretrained("meta-llama/CodeLlama-7b-Instruct-hf", cache_dir="/tmp/hf_cache", torch_dtype=torch.float16, device_map="auto")
28
  code_gen = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, temperature=0.6, top_p=0.95)
29
+ logger.debug("meta-llama/CodeLlama-7b-Instruct-hf model loaded")
30
  except Exception as e:
31
+ logger.error(f"Failed to load CodeLlama model: {e}")
32
  raise
33
 
34
  # آزادسازی حافظه