Paulwalker4884 commited on
Commit
51bf58b
·
1 Parent(s): e9d2f90

Initial commit

Browse files
Files changed (1) hide show
  1. app.py +9 -6
app.py CHANGED
@@ -32,12 +32,12 @@ except Exception as e:
32
  raise
33
 
34
  # لود مدل ترجمه
35
- logger.debug("Loading facebook/m2m100_418M model")
36
  try:
37
- translate_tokenizer = AutoTokenizer.from_pretrained("facebook/m2m100_418M", cache_dir="/tmp/hf_cache")
38
- translate_model = AutoModelForSeq2SeqLM.from_pretrained("facebook/m2m100_418M", cache_dir="/tmp/hf_cache")
39
- translate_gen = pipeline("translation", model=translate_model, tokenizer=translate_tokenizer, src_lang="fa", tgt_lang="en", max_length=300)
40
- logger.debug("facebook/m2m100_418M model loaded")
41
  except Exception as e:
42
  logger.error(f"Failed to load translation model: {e}")
43
  raise
@@ -140,8 +140,11 @@ def translate_and_prompt(text, language):
140
  def generate_code(prompt):
141
  logger.debug(f"Generating code with prompt: {prompt}")
142
  try:
143
- result = code_gen(prompt, max_length=512, num_beams=5)
144
  code_output = result[0]['generated_text']
 
 
 
145
  logger.debug(f"Generated code: {code_output}")
146
  torch.cuda.empty_cache()
147
  return code_output
 
32
  raise
33
 
34
  # لود مدل ترجمه
35
+ logger.debug("Loading Helsinki-NLP/opus-mt-fa-en model")
36
  try:
37
+ translate_tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-fa-en", cache_dir="/tmp/hf_cache")
38
+ translate_model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-fa-en", cache_dir="/tmp/hf_cache")
39
+ translate_gen = pipeline("translation", model=translate_model, tokenizer=translate_tokenizer, max_length=300)
40
+ logger.debug("Helsinki-NLP/opus-mt-fa-en model loaded")
41
  except Exception as e:
42
  logger.error(f"Failed to load translation model: {e}")
43
  raise
 
140
  def generate_code(prompt):
141
  logger.debug(f"Generating code with prompt: {prompt}")
142
  try:
143
+ result = code_gen(prompt, max_length=512, num_beams=5, do_sample=False)
144
  code_output = result[0]['generated_text']
145
+ if not code_output.strip():
146
+ logger.warning("Generated code is empty")
147
+ return "خطا: کد تولیدشده خالی است"
148
  logger.debug(f"Generated code: {code_output}")
149
  torch.cuda.empty_cache()
150
  return code_output