Spaces:
Sleeping
Sleeping
Commit
·
569f945
1
Parent(s):
6a9d6ae
Initial commit
Browse files- app.py +6 -19
- requirements.txt +11 -12
app.py
CHANGED
|
@@ -20,15 +20,15 @@ if not hf_token:
|
|
| 20 |
login(token=hf_token)
|
| 21 |
logger.debug("Hugging Face login successful")
|
| 22 |
|
| 23 |
-
# لود مدل
|
| 24 |
-
logger.debug("Loading
|
| 25 |
try:
|
| 26 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
| 27 |
-
model = AutoModelForCausalLM.from_pretrained("
|
| 28 |
code_gen = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, temperature=0.6, top_p=0.95)
|
| 29 |
-
logger.debug("
|
| 30 |
except Exception as e:
|
| 31 |
-
logger.error(f"Failed to load
|
| 32 |
raise
|
| 33 |
|
| 34 |
# آزادسازی حافظه
|
|
@@ -97,19 +97,6 @@ def get_history():
|
|
| 97 |
logger.error(f"History retrieval error: {e}")
|
| 98 |
return f"خطا در دریافت تاریخچه: {e}"
|
| 99 |
|
| 100 |
-
# تابع دیباگ برای تست مدل
|
| 101 |
-
def debug_model(text, language):
|
| 102 |
-
logger.debug(f"Debugging model with input: {text}, language: {language}")
|
| 103 |
-
try:
|
| 104 |
-
prompt = f"Write a complete, correct, and well-explained code in {language} to: {text}"
|
| 105 |
-
logger.debug(f"Generated prompt: {prompt}")
|
| 106 |
-
result = code_gen(prompt, max_new_tokens=512, temperature=0.6, top_p=0.95, do_sample=True)[0]['generated_text']
|
| 107 |
-
logger.debug(f"Generated output: {result}")
|
| 108 |
-
return f"Debug: Prompt: {prompt}\nOutput: {result}"
|
| 109 |
-
except Exception as e:
|
| 110 |
-
logger.error(f"Debug error: {e}")
|
| 111 |
-
return f"Debug error: {e}"
|
| 112 |
-
|
| 113 |
# تولید کد
|
| 114 |
def generate_code(text, language):
|
| 115 |
logger.debug(f"Processing input: {text}, language: {language}")
|
|
|
|
| 20 |
login(token=hf_token)
|
| 21 |
logger.debug("Hugging Face login successful")
|
| 22 |
|
| 23 |
+
# لود مدل CodeLlama
|
| 24 |
+
logger.debug("Loading meta-llama/CodeLlama-7b-Instruct-hf model")
|
| 25 |
try:
|
| 26 |
+
tokenizer = AutoTokenizer.from_pretrained("meta-llama/CodeLlama-7b-Instruct-hf", cache_dir="/tmp/hf_cache")
|
| 27 |
+
model = AutoModelForCausalLM.from_pretrained("meta-llama/CodeLlama-7b-Instruct-hf", cache_dir="/tmp/hf_cache", torch_dtype=torch.bfloat16, device_map="auto")
|
| 28 |
code_gen = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, temperature=0.6, top_p=0.95)
|
| 29 |
+
logger.debug("meta-llama/CodeLlama-7b-Instruct-hf model loaded")
|
| 30 |
except Exception as e:
|
| 31 |
+
logger.error(f"Failed to load CodeLlama model: {e}")
|
| 32 |
raise
|
| 33 |
|
| 34 |
# آزادسازی حافظه
|
|
|
|
| 97 |
logger.error(f"History retrieval error: {e}")
|
| 98 |
return f"خطا در دریافت تاریخچه: {e}"
|
| 99 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
# تولید کد
|
| 101 |
def generate_code(text, language):
|
| 102 |
logger.debug(f"Processing input: {text}, language: {language}")
|
requirements.txt
CHANGED
|
@@ -1,12 +1,11 @@
|
|
| 1 |
-
datasets
|
| 2 |
-
|
| 3 |
-
hf_xet
|
| 4 |
-
hf-transfer
|
| 5 |
-
protobuf
|
| 6 |
-
click
|
| 7 |
-
pydantic
|
| 8 |
-
torch
|
| 9 |
-
numpy
|
| 10 |
-
transformers
|
| 11 |
-
gradio
|
| 12 |
-
sentencepiece
|
|
|
|
| 1 |
+
datasets
|
| 2 |
+
huggingface_hub
|
| 3 |
+
hf_xet
|
| 4 |
+
hf-transfer
|
| 5 |
+
protobuf
|
| 6 |
+
click
|
| 7 |
+
pydantic
|
| 8 |
+
torch
|
| 9 |
+
numpy
|
| 10 |
+
transformers
|
| 11 |
+
gradio
|
|
|