Update README.md
Browse files
README.md
CHANGED
|
@@ -44,8 +44,14 @@ Gemma-2B Fine-Tuned Python Model is a deep learning model based on the Gemma-2B
|
|
| 44 |
|
| 45 |
## Inference
|
| 46 |
```python
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
query = input('enter a query:')
|
| 48 |
-
prompt_template = """
|
| 49 |
<start_of_turn>user based on given instruction create a solution\n\nhere are the instruction {query}
|
| 50 |
<end_of_turn>\n<start_of_turn>model
|
| 51 |
"""
|
|
@@ -61,3 +67,4 @@ for i in tokenizer.decode(generated_ids[0], skip_special_tokens=True).split('<en
|
|
| 61 |
ans+=i
|
| 62 |
cleaned_output = output.replace('<start_of_turn>', '')
|
| 63 |
print(cleaned_output)
|
|
|
|
|
|
| 44 |
|
| 45 |
## Inference
|
| 46 |
```python
|
| 47 |
+
# Load model directly
|
| 48 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 49 |
+
|
| 50 |
+
tokenizer = AutoTokenizer.from_pretrained("suriya7/Gemma-2B-Finetuned-Python-Model")
|
| 51 |
+
model = AutoModelForCausalLM.from_pretrained("suriya7/Gemma-2B-Finetuned-Python-Model")
|
| 52 |
+
|
| 53 |
query = input('enter a query:')
|
| 54 |
+
prompt_template = f"""
|
| 55 |
<start_of_turn>user based on given instruction create a solution\n\nhere are the instruction {query}
|
| 56 |
<end_of_turn>\n<start_of_turn>model
|
| 57 |
"""
|
|
|
|
| 67 |
ans+=i
|
| 68 |
cleaned_output = output.replace('<start_of_turn>', '')
|
| 69 |
print(cleaned_output)
|
| 70 |
+
```
|