Upload app (Copy).py
Browse files- app (Copy).py +9 -0
app (Copy).py
CHANGED
|
@@ -173,6 +173,15 @@ def generate_response(model, human_prompt, tokenizer = None):
|
|
| 173 |
|
| 174 |
messages.pop()
|
| 175 |
return english, japanese
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 176 |
|
| 177 |
if __name__ == '__main__':
|
| 178 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
|
| 173 |
|
| 174 |
messages.pop()
|
| 175 |
return english, japanese
|
| 176 |
+
|
| 177 |
+
from llama_cpp import Llama
|
| 178 |
+
|
| 179 |
+
llm = Llama.from_pretrained(
|
| 180 |
+
repo_id="google/gemma-3-4b-it-qat-q4_0-gguf",
|
| 181 |
+
filename='gemma-3-4b-it-q4_0.gguf',
|
| 182 |
+
local_dir='/kaggle/working/model',
|
| 183 |
+
n_ctx = 2048
|
| 184 |
+
)
|
| 185 |
|
| 186 |
if __name__ == '__main__':
|
| 187 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|