Update app.py
Browse files
app.py
CHANGED
|
@@ -5,18 +5,20 @@ from langchain_openai import ChatOpenAI
|
|
| 5 |
from transformers import pipeline
|
| 6 |
|
| 7 |
# Choose model
|
| 8 |
-
model_name = "
|
| 9 |
|
| 10 |
# Load the chosen LLM model
|
| 11 |
-
llm = pipeline("text-generation", model=
|
| 12 |
|
| 13 |
#Vectara config:
|
| 14 |
# customer_id =
|
| 15 |
# corpus_id =
|
| 16 |
# api_key =
|
|
|
|
| 17 |
|
| 18 |
# DSPy-based prompt generation
|
| 19 |
from dspy.agents import Agent
|
|
|
|
| 20 |
from dspy.utils import SentenceSplitter, SentimentAnalyzer, NamedEntityRecognizer
|
| 21 |
|
| 22 |
def dspy_generate_agent_prompts(prompt):
|
|
@@ -177,10 +179,10 @@ def query_vectara(text):
|
|
| 177 |
# Define the main function to be used with Gradio
|
| 178 |
def generate_outputs(user_prompt):
|
| 179 |
# 1. Process prompt with langchain (replace with your actual implementation)
|
| 180 |
-
processed_prompt =
|
| 181 |
|
| 182 |
# 2. Generate synthetic data using DSPy's distributed computing capabilities
|
| 183 |
-
synthetic_data = generate_synthetic_data_distributed(
|
| 184 |
|
| 185 |
# 3. Combine user prompt and synthetic data
|
| 186 |
combined_data = f"{user_prompt}\n{synthetic_data}"
|
|
|
|
| 5 |
from transformers import pipeline
|
| 6 |
|
| 7 |
# Choose model
|
| 8 |
+
model_name = "dolphin-phi"
|
| 9 |
|
| 10 |
# Load the chosen LLM model
|
| 11 |
+
llm = pipeline("text-generation", model=dolphin-phi)
|
| 12 |
|
| 13 |
#Vectara config:
|
| 14 |
# customer_id =
|
| 15 |
# corpus_id =
|
| 16 |
# api_key =
|
| 17 |
+
import requests
|
| 18 |
|
| 19 |
# DSPy-based prompt generation
|
| 20 |
from dspy.agents import Agent
|
| 21 |
+
from dspy import spawn_processes
|
| 22 |
from dspy.utils import SentenceSplitter, SentimentAnalyzer, NamedEntityRecognizer
|
| 23 |
|
| 24 |
def dspy_generate_agent_prompts(prompt):
|
|
|
|
| 179 |
# Define the main function to be used with Gradio
|
| 180 |
def generate_outputs(user_prompt):
|
| 181 |
# 1. Process prompt with langchain (replace with your actual implementation)
|
| 182 |
+
# processed_prompt = dspy_generate_agent_prompts(user_prompt) # Replaced langchain logic with DSPy function below
|
| 183 |
|
| 184 |
# 2. Generate synthetic data using DSPy's distributed computing capabilities
|
| 185 |
+
synthetic_data = generate_synthetic_data_distributed(user_prompt)
|
| 186 |
|
| 187 |
# 3. Combine user prompt and synthetic data
|
| 188 |
combined_data = f"{user_prompt}\n{synthetic_data}"
|