Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -16,15 +16,15 @@ example = [
|
|
| 16 |
]
|
| 17 |
|
| 18 |
# change model to the finetuned one
|
| 19 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
| 20 |
-
model = AutoModelForCausalLM.from_pretrained("
|
|
|
|
| 21 |
|
| 22 |
def make_doctring(gen_prompt):
|
| 23 |
return gen_prompt + f"\n\n\"\"\"\nExplanation:"
|
| 24 |
|
| 25 |
def code_generation(gen_prompt, max_tokens, temperature=0.6, seed=42):
|
| 26 |
set_seed(seed)
|
| 27 |
-
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 28 |
prompt = make_doctring(gen_prompt)
|
| 29 |
generated_text = pipe(prompt, do_sample=True, top_p=0.95, temperature=temperature, max_new_tokens=max_tokens)[0]['generated_text']
|
| 30 |
return generated_text
|
|
|
|
| 16 |
]
|
| 17 |
|
| 18 |
# change model to the finetuned one
|
| 19 |
+
tokenizer = AutoTokenizer.from_pretrained("loubnabnl/santacoder-code-to-text")
|
| 20 |
+
model = AutoModelForCausalLM.from_pretrained("loubnabnl/santacoder-code-to-text", trust_remote_code=True)
|
| 21 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 22 |
|
| 23 |
def make_doctring(gen_prompt):
|
| 24 |
return gen_prompt + f"\n\n\"\"\"\nExplanation:"
|
| 25 |
|
| 26 |
def code_generation(gen_prompt, max_tokens, temperature=0.6, seed=42):
|
| 27 |
set_seed(seed)
|
|
|
|
| 28 |
prompt = make_doctring(gen_prompt)
|
| 29 |
generated_text = pipe(prompt, do_sample=True, top_p=0.95, temperature=temperature, max_new_tokens=max_tokens)[0]['generated_text']
|
| 30 |
return generated_text
|