Paulwalker4884 commited on
Commit
569f945
·
1 Parent(s): 6a9d6ae

Initial commit

Browse files
Files changed (2) hide show
  1. app.py +6 -19
  2. requirements.txt +11 -12
app.py CHANGED
@@ -20,15 +20,15 @@ if not hf_token:
20
  login(token=hf_token)
21
  logger.debug("Hugging Face login successful")
22
 
23
- # لود مدل Qwen2-1.5B-Instruct
24
- logger.debug("Loading Qwen/Qwen2-1.5B-Instruct model")
25
  try:
26
- tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-1.5B-Instruct", cache_dir="/tmp/hf_cache")
27
- model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-1.5B-Instruct", cache_dir="/tmp/hf_cache", torch_dtype=torch.bfloat16, device_map="auto")
28
  code_gen = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, temperature=0.6, top_p=0.95)
29
- logger.debug("Qwen/Qwen2-1.5B-Instruct model loaded")
30
  except Exception as e:
31
- logger.error(f"Failed to load Qwen2-1.5B-Instruct model: {e}")
32
  raise
33
 
34
  # آزادسازی حافظه
@@ -97,19 +97,6 @@ def get_history():
97
  logger.error(f"History retrieval error: {e}")
98
  return f"خطا در دریافت تاریخچه: {e}"
99
 
100
- # تابع دیباگ برای تست مدل
101
- def debug_model(text, language):
102
- logger.debug(f"Debugging model with input: {text}, language: {language}")
103
- try:
104
- prompt = f"Write a complete, correct, and well-explained code in {language} to: {text}"
105
- logger.debug(f"Generated prompt: {prompt}")
106
- result = code_gen(prompt, max_new_tokens=512, temperature=0.6, top_p=0.95, do_sample=True)[0]['generated_text']
107
- logger.debug(f"Generated output: {result}")
108
- return f"Debug: Prompt: {prompt}\nOutput: {result}"
109
- except Exception as e:
110
- logger.error(f"Debug error: {e}")
111
- return f"Debug error: {e}"
112
-
113
  # تولید کد
114
  def generate_code(text, language):
115
  logger.debug(f"Processing input: {text}, language: {language}")
 
20
  login(token=hf_token)
21
  logger.debug("Hugging Face login successful")
22
 
23
+ # لود مدل CodeLlama
24
+ logger.debug("Loading meta-llama/CodeLlama-7b-Instruct-hf model")
25
  try:
26
+ tokenizer = AutoTokenizer.from_pretrained("meta-llama/CodeLlama-7b-Instruct-hf", cache_dir="/tmp/hf_cache")
27
+ model = AutoModelForCausalLM.from_pretrained("meta-llama/CodeLlama-7b-Instruct-hf", cache_dir="/tmp/hf_cache", torch_dtype=torch.bfloat16, device_map="auto")
28
  code_gen = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, temperature=0.6, top_p=0.95)
29
+ logger.debug("meta-llama/CodeLlama-7b-Instruct-hf model loaded")
30
  except Exception as e:
31
+ logger.error(f"Failed to load CodeLlama model: {e}")
32
  raise
33
 
34
  # آزادسازی حافظه
 
97
  logger.error(f"History retrieval error: {e}")
98
  return f"خطا در دریافت تاریخچه: {e}"
99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
  # تولید کد
101
  def generate_code(text, language):
102
  logger.debug(f"Processing input: {text}, language: {language}")
requirements.txt CHANGED
@@ -1,12 +1,11 @@
1
- datasets==2.14.5
2
- huggingface-hub>=0.19
3
- hf_xet>=1.0.0,<2.0.0
4
- hf-transfer>=0.1.4
5
- protobuf<4
6
- click<8.1
7
- pydantic==2.7.0
8
- torch==2.0.1
9
- numpy==1.24.3
10
- transformers==4.35.2
11
- gradio==4.31.0
12
- sentencepiece
 
1
+ datasets
2
+ huggingface_hub
3
+ hf_xet
4
+ hf-transfer
5
+ protobuf
6
+ click
7
+ pydantic
8
+ torch
9
+ numpy
10
+ transformers
11
+ gradio