Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -78,11 +78,11 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
| 78 |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", trust_remote_code=True)
|
| 79 |
|
| 80 |
# Ensure the model is on the GPU
|
| 81 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 82 |
-
model.to(device)
|
| 83 |
|
| 84 |
# Create Hugging Face Pipeline with the specified model and tokenizer
|
| 85 |
-
hf_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer
|
| 86 |
|
| 87 |
# LangChain LLM using Hugging Face Pipeline
|
| 88 |
llm = HuggingFacePipeline(pipeline=hf_pipeline)
|
|
|
|
| 78 |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", trust_remote_code=True)
|
| 79 |
|
| 80 |
# Ensure the model is on the GPU
|
| 81 |
+
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 82 |
+
#model.to(device)
|
| 83 |
|
| 84 |
# Create Hugging Face Pipeline with the specified model and tokenizer
|
| 85 |
+
hf_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 86 |
|
| 87 |
# LangChain LLM using Hugging Face Pipeline
|
| 88 |
llm = HuggingFacePipeline(pipeline=hf_pipeline)
|