Spaces:
Sleeping
Sleeping
| # Quick test for LLM integration | |
| from transformers import pipeline | |
| import warnings | |
| warnings.filterwarnings('ignore') | |
| print("Testing LLM integration...") | |
| print("Loading model...") | |
| try: | |
| llm_generator = pipeline( | |
| "text-generation", | |
| model="distilgpt2", | |
| max_length=200, | |
| device=-1 | |
| ) | |
| print("✅ Model loaded successfully!") | |
| # Test generation | |
| prompt = """As an agricultural expert for Maharashtra, provide specific farming recommendations: | |
| Weather: 6 warning days with overcast conditions | |
| Severity: WARNING conditions on Saturday, Tuesday, Wednesday, Thursday, Friday, Sunday. | |
| Recommendations for farmers: | |
| 1. Crop protection:""" | |
| print("\nGenerating test recommendation...") | |
| response = llm_generator( | |
| prompt, | |
| max_new_tokens=100, | |
| num_return_sequences=1, | |
| temperature=0.7, | |
| do_sample=True, | |
| pad_token_id=50256 | |
| ) | |
| generated_text = response[0]['generated_text'] | |
| recommendations = generated_text[len(prompt):].strip() | |
| print("\n✅ Generated Recommendation:") | |
| print(recommendations) | |
| print("\n✅ LLM integration test successful!") | |
| except Exception as e: | |
| print(f"❌ Error: {e}") | |