Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
|
| 2 |
import os
|
| 3 |
import gradio as gr
|
| 4 |
import requests
|
|
@@ -23,23 +22,22 @@ try:
|
|
| 23 |
except Exception:
|
| 24 |
raise
|
| 25 |
|
| 26 |
-
#
|
| 27 |
try:
|
| 28 |
import google.generativeai as genai
|
| 29 |
-
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 30 |
except Exception:
|
| 31 |
ChatGoogleGenerativeAI = None
|
| 32 |
genai = None
|
| 33 |
|
| 34 |
-
# Load
|
| 35 |
load_dotenv()
|
| 36 |
-
|
| 37 |
PINECONE_API_KEY = os.environ.get("PINECONE_API_KEY", "")
|
| 38 |
MLFLOW_TRACKING_URI = os.environ.get("MLFLOW_TRACKING_URI", "http://localhost:5000")
|
| 39 |
GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY", "")
|
| 40 |
-
LITSERVE_ENDPOINT = os.environ.get("LITSERVE_ENDPOINT", "
|
| 41 |
|
| 42 |
-
# DagsHub
|
| 43 |
try:
|
| 44 |
dagshub.init(
|
| 45 |
repo_owner='prathamesh.khade20',
|
|
@@ -52,25 +50,17 @@ except Exception:
|
|
| 52 |
mlflow.set_tracking_uri(MLFLOW_TRACKING_URI)
|
| 53 |
mlflow.set_experiment("Maintenance-RAG-Chatbot")
|
| 54 |
|
| 55 |
-
# -----------
|
| 56 |
-
|
| 57 |
-
mlflow.log_params({
|
| 58 |
-
"pinecone_index": "rag-granite-index",
|
| 59 |
-
"embedding_model": "all-MiniLM-L6-v2",
|
| 60 |
-
"namespace": "rag-ns",
|
| 61 |
-
"top_k": 3,
|
| 62 |
-
"llm_endpoint": LITSERVE_ENDPOINT
|
| 63 |
-
})
|
| 64 |
-
mlflow.log_text("""
|
| 65 |
You are a smart assistant. Based on the provided context, answer the question in 1–2 lines only.
|
| 66 |
If the context has more details, summarize it concisely.
|
| 67 |
Context:
|
| 68 |
{context}
|
| 69 |
Question: {question}
|
| 70 |
Answer:
|
| 71 |
-
"""
|
| 72 |
|
| 73 |
-
# ----------- 1. Custom LLM for LitServe
|
| 74 |
class LitServeLLM(LLM):
|
| 75 |
endpoint_url: str
|
| 76 |
|
|
@@ -117,53 +107,58 @@ embeddings_model = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
|
|
| 117 |
|
| 118 |
# ----------- 4. Context Retrieval -----------
|
| 119 |
def get_retrieved_context(query: str, top_k=3):
|
|
|
|
| 120 |
query_embedding = embeddings_model.embed_query(query)
|
|
|
|
|
|
|
| 121 |
if index is None:
|
| 122 |
return ""
|
|
|
|
|
|
|
| 123 |
results = index.query(
|
| 124 |
namespace="rag-ns",
|
| 125 |
vector=query_embedding,
|
| 126 |
top_k=top_k,
|
| 127 |
include_metadata=True
|
| 128 |
)
|
|
|
|
|
|
|
|
|
|
| 129 |
context_parts = [match['metadata']['text'] for match in results['matches']]
|
| 130 |
return "\n".join(context_parts)
|
| 131 |
|
| 132 |
# ----------- 5. LLM Chain Setup -----------
|
| 133 |
model = LitServeLLM(endpoint_url=LITSERVE_ENDPOINT)
|
| 134 |
-
|
| 135 |
-
prompt = PromptTemplate(
|
| 136 |
-
input_variables=["context", "question"],
|
| 137 |
-
template="""
|
| 138 |
-
You are a smart assistant. Based on the provided context, answer the question in 1–2 lines only.
|
| 139 |
-
If the context has more details, summarize it concisely.
|
| 140 |
-
Context:
|
| 141 |
-
{context}
|
| 142 |
-
Question: {question}
|
| 143 |
-
Answer:
|
| 144 |
-
"""
|
| 145 |
-
)
|
| 146 |
-
|
| 147 |
llm_chain = LLMChain(llm=model, prompt=prompt)
|
| 148 |
|
| 149 |
-
# ----------- 6. RAG Pipeline -----------
|
| 150 |
def rag_pipeline(question):
|
| 151 |
try:
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 161 |
except Exception as e:
|
|
|
|
| 162 |
error_info = {"error": str(e), "question": question, "timestamp": datetime.now().isoformat()}
|
| 163 |
mlflow.log_dict(error_info, "artifacts/pipeline_errors.json")
|
| 164 |
return f"Error: {str(e)}"
|
| 165 |
|
| 166 |
-
# ----------- 7. DeepEval Wrappers -----------
|
| 167 |
class GoogleVertexAI(DeepEvalBaseLLM):
|
| 168 |
def __init__(self, model):
|
| 169 |
self.model = model
|
|
@@ -172,31 +167,21 @@ class GoogleVertexAI(DeepEvalBaseLLM):
|
|
| 172 |
return self.model
|
| 173 |
|
| 174 |
def generate(self, prompt: str) -> str:
|
| 175 |
-
|
| 176 |
-
res = chat_model.invoke(prompt)
|
| 177 |
if hasattr(res, 'content'):
|
| 178 |
return res.content
|
| 179 |
if isinstance(res, dict):
|
| 180 |
return res.get('content') or res.get('text') or str(res)
|
| 181 |
return str(res)
|
| 182 |
|
| 183 |
-
def
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
class LitServeWrapper(DeepEvalBaseLLM):
|
| 187 |
-
def __init__(self, lit_llm: LitServeLLM):
|
| 188 |
-
self.lit_llm = lit_llm
|
| 189 |
-
|
| 190 |
-
def load_model(self):
|
| 191 |
-
return self.lit_llm
|
| 192 |
-
|
| 193 |
-
def generate(self, prompt: str) -> str:
|
| 194 |
-
return self.lit_llm._call(prompt)
|
| 195 |
|
| 196 |
def get_model_name(self):
|
| 197 |
-
return "
|
| 198 |
|
| 199 |
-
#
|
| 200 |
class LengthMetric(BaseMetric):
|
| 201 |
def __init__(self, min_tokens: int = 1, max_tokens: int = 200):
|
| 202 |
self.min_tokens = min_tokens
|
|
@@ -214,6 +199,9 @@ class LengthMetric(BaseMetric):
|
|
| 214 |
self.success = (self.min_tokens <= tokens <= self.max_tokens)
|
| 215 |
return self.score
|
| 216 |
|
|
|
|
|
|
|
|
|
|
| 217 |
def is_successful(self):
|
| 218 |
return self.success
|
| 219 |
|
|
@@ -221,73 +209,113 @@ class LengthMetric(BaseMetric):
|
|
| 221 |
def name(self):
|
| 222 |
return "Length Metric"
|
| 223 |
|
| 224 |
-
# -----------
|
| 225 |
-
def
|
| 226 |
-
if
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
def run_deepeval_tests(test_cases: List[LLMTestCase], eval_model_choice: str = 'gemini'):
|
| 234 |
-
model_wrapper = get_deepeval_model(eval_model_choice)
|
| 235 |
answer_relevancy_metric = AnswerRelevancyMetric(threshold=0.5, model=model_wrapper)
|
| 236 |
hallucination_metric = HallucinationMetric(threshold=0.5, model=model_wrapper)
|
| 237 |
length_metric = LengthMetric(min_tokens=3, max_tokens=200)
|
| 238 |
|
| 239 |
results = []
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
mlflow.
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 255 |
return results
|
| 256 |
|
| 257 |
-
# -----------
|
| 258 |
with gr.Blocks() as demo:
|
| 259 |
-
gr.Markdown("# 🛠️ Maintenance AI Assistant +
|
| 260 |
|
| 261 |
with gr.Tabs():
|
| 262 |
with gr.TabItem("Chat (RAG)"):
|
|
|
|
|
|
|
|
|
|
| 263 |
question_input = gr.Textbox(label="Ask your maintenance question")
|
| 264 |
answer_output = gr.Textbox(label="AI Response")
|
| 265 |
ask_button = gr.Button("Get Answer")
|
| 266 |
|
| 267 |
-
def
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 271 |
|
| 272 |
with gr.TabItem("DeepEval — Model Tests"):
|
|
|
|
|
|
|
| 273 |
tc_input = gr.Textbox(label="Test Input (prompt)")
|
| 274 |
-
tc_actual = gr.Textbox(label="Actual Output (leave empty to auto-generate)")
|
| 275 |
tc_context = gr.Textbox(label="Context (optional)")
|
| 276 |
-
|
| 277 |
-
|
| 278 |
run_button = gr.Button("Run DeepEval")
|
| 279 |
eval_output = gr.JSON(label="Evaluation Results")
|
| 280 |
|
| 281 |
-
def run_single_eval(inp, actual, context, autogen
|
| 282 |
if autogen or not actual.strip():
|
| 283 |
actual_output = rag_pipeline(inp)
|
| 284 |
else:
|
| 285 |
actual_output = actual
|
| 286 |
-
|
| 287 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 288 |
return results
|
| 289 |
|
| 290 |
-
run_button.click(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 291 |
|
| 292 |
if __name__ == "__main__":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 293 |
demo.launch()
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
import gradio as gr
|
| 3 |
import requests
|
|
|
|
| 22 |
except Exception:
|
| 23 |
raise
|
| 24 |
|
| 25 |
+
# Gemini imports (evaluation only)
|
| 26 |
try:
|
| 27 |
import google.generativeai as genai
|
| 28 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 29 |
except Exception:
|
| 30 |
ChatGoogleGenerativeAI = None
|
| 31 |
genai = None
|
| 32 |
|
| 33 |
+
# Load env vars
|
| 34 |
load_dotenv()
|
|
|
|
| 35 |
PINECONE_API_KEY = os.environ.get("PINECONE_API_KEY", "")
|
| 36 |
MLFLOW_TRACKING_URI = os.environ.get("MLFLOW_TRACKING_URI", "http://localhost:5000")
|
| 37 |
GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY", "")
|
| 38 |
+
LITSERVE_ENDPOINT = os.environ.get("LITSERVE_ENDPOINT", "")
|
| 39 |
|
| 40 |
+
# DagsHub + MLflow setup
|
| 41 |
try:
|
| 42 |
dagshub.init(
|
| 43 |
repo_owner='prathamesh.khade20',
|
|
|
|
| 50 |
mlflow.set_tracking_uri(MLFLOW_TRACKING_URI)
|
| 51 |
mlflow.set_experiment("Maintenance-RAG-Chatbot")
|
| 52 |
|
| 53 |
+
# ----------- Prompt template -----------
|
| 54 |
+
prompt_template = """
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
You are a smart assistant. Based on the provided context, answer the question in 1–2 lines only.
|
| 56 |
If the context has more details, summarize it concisely.
|
| 57 |
Context:
|
| 58 |
{context}
|
| 59 |
Question: {question}
|
| 60 |
Answer:
|
| 61 |
+
"""
|
| 62 |
|
| 63 |
+
# ----------- 1. Custom LLM for LitServe (Lightning AI generator) -----------
|
| 64 |
class LitServeLLM(LLM):
|
| 65 |
endpoint_url: str
|
| 66 |
|
|
|
|
| 107 |
|
| 108 |
# ----------- 4. Context Retrieval -----------
|
| 109 |
def get_retrieved_context(query: str, top_k=3):
|
| 110 |
+
start_time = time.time()
|
| 111 |
query_embedding = embeddings_model.embed_query(query)
|
| 112 |
+
mlflow.log_metric("embedding_latency", time.time() - start_time)
|
| 113 |
+
|
| 114 |
if index is None:
|
| 115 |
return ""
|
| 116 |
+
|
| 117 |
+
start_time = time.time()
|
| 118 |
results = index.query(
|
| 119 |
namespace="rag-ns",
|
| 120 |
vector=query_embedding,
|
| 121 |
top_k=top_k,
|
| 122 |
include_metadata=True
|
| 123 |
)
|
| 124 |
+
mlflow.log_metric("pinecone_latency", time.time() - start_time)
|
| 125 |
+
mlflow.log_metric("retrieved_chunks", len(results['matches']))
|
| 126 |
+
|
| 127 |
context_parts = [match['metadata']['text'] for match in results['matches']]
|
| 128 |
return "\n".join(context_parts)
|
| 129 |
|
| 130 |
# ----------- 5. LLM Chain Setup -----------
|
| 131 |
model = LitServeLLM(endpoint_url=LITSERVE_ENDPOINT)
|
| 132 |
+
prompt = PromptTemplate(input_variables=["context", "question"], template=prompt_template)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 133 |
llm_chain = LLMChain(llm=model, prompt=prompt)
|
| 134 |
|
| 135 |
+
# ----------- 6. RAG Pipeline (Lightning AI) -----------
|
| 136 |
def rag_pipeline(question):
|
| 137 |
try:
|
| 138 |
+
with mlflow.start_run(run_name=f"Query-{datetime.now().strftime('%H%M%S')}", nested=True):
|
| 139 |
+
mlflow.log_param("user_question", question)
|
| 140 |
+
retrieved_context = get_retrieved_context(question)
|
| 141 |
+
mlflow.log_text(retrieved_context, "artifacts/retrieved_context.txt")
|
| 142 |
+
|
| 143 |
+
start_time = time.time()
|
| 144 |
+
response_obj = llm_chain.invoke({"context": retrieved_context, "question": question})
|
| 145 |
+
response = response_obj.get("text") if isinstance(response_obj, dict) else getattr(response_obj, "text", str(response_obj))
|
| 146 |
+
response = response.strip()
|
| 147 |
+
|
| 148 |
+
if "Answer:" in response:
|
| 149 |
+
response = response.split("Answer:", 1)[-1].strip()
|
| 150 |
+
|
| 151 |
+
mlflow.log_metric("response_latency", time.time() - start_time)
|
| 152 |
+
mlflow.log_metric("response_length", len(response))
|
| 153 |
+
mlflow.log_text(response, "artifacts/response.txt")
|
| 154 |
+
return response
|
| 155 |
except Exception as e:
|
| 156 |
+
mlflow.log_metric("pipeline_errors", 1)
|
| 157 |
error_info = {"error": str(e), "question": question, "timestamp": datetime.now().isoformat()}
|
| 158 |
mlflow.log_dict(error_info, "artifacts/pipeline_errors.json")
|
| 159 |
return f"Error: {str(e)}"
|
| 160 |
|
| 161 |
+
# ----------- 7. DeepEval Wrappers (Gemini evaluator only) -----------
|
| 162 |
class GoogleVertexAI(DeepEvalBaseLLM):
|
| 163 |
def __init__(self, model):
|
| 164 |
self.model = model
|
|
|
|
| 167 |
return self.model
|
| 168 |
|
| 169 |
def generate(self, prompt: str) -> str:
|
| 170 |
+
res = self.model.invoke(prompt)
|
|
|
|
| 171 |
if hasattr(res, 'content'):
|
| 172 |
return res.content
|
| 173 |
if isinstance(res, dict):
|
| 174 |
return res.get('content') or res.get('text') or str(res)
|
| 175 |
return str(res)
|
| 176 |
|
| 177 |
+
async def a_generate(self, prompt: str) -> str:
|
| 178 |
+
res = await self.model.ainvoke(prompt)
|
| 179 |
+
return getattr(res, 'content', str(res))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 180 |
|
| 181 |
def get_model_name(self):
|
| 182 |
+
return "Gemini Evaluator"
|
| 183 |
|
| 184 |
+
# Length-based utility metric
|
| 185 |
class LengthMetric(BaseMetric):
|
| 186 |
def __init__(self, min_tokens: int = 1, max_tokens: int = 200):
|
| 187 |
self.min_tokens = min_tokens
|
|
|
|
| 199 |
self.success = (self.min_tokens <= tokens <= self.max_tokens)
|
| 200 |
return self.score
|
| 201 |
|
| 202 |
+
async def a_measure(self, test_case: LLMTestCase):
|
| 203 |
+
return self.measure(test_case)
|
| 204 |
+
|
| 205 |
def is_successful(self):
|
| 206 |
return self.success
|
| 207 |
|
|
|
|
| 209 |
def name(self):
|
| 210 |
return "Length Metric"
|
| 211 |
|
| 212 |
+
# ----------- 8. Run DeepEval Tests (Gemini only) -----------
|
| 213 |
+
def run_deepeval_tests(test_cases: List[LLMTestCase]):
|
| 214 |
+
if ChatGoogleGenerativeAI is None or not GOOGLE_API_KEY:
|
| 215 |
+
raise RuntimeError("Gemini API not available — set GOOGLE_API_KEY")
|
| 216 |
+
|
| 217 |
+
genai.configure(api_key=GOOGLE_API_KEY)
|
| 218 |
+
chat_model = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=GOOGLE_API_KEY)
|
| 219 |
+
model_wrapper = GoogleVertexAI(model=chat_model)
|
| 220 |
+
|
|
|
|
|
|
|
| 221 |
answer_relevancy_metric = AnswerRelevancyMetric(threshold=0.5, model=model_wrapper)
|
| 222 |
hallucination_metric = HallucinationMetric(threshold=0.5, model=model_wrapper)
|
| 223 |
length_metric = LengthMetric(min_tokens=3, max_tokens=200)
|
| 224 |
|
| 225 |
results = []
|
| 226 |
+
with mlflow.start_run(run_name=f"DeepEval-{datetime.now().strftime('%H%M%S')}", nested=True):
|
| 227 |
+
for i, tc in enumerate(test_cases):
|
| 228 |
+
mlflow.log_param(f"tc_{i}_input", tc.input)
|
| 229 |
+
mlflow.log_param(f"tc_{i}_actual", tc.actual_output)
|
| 230 |
+
if tc.context:
|
| 231 |
+
mlflow.log_text("\n".join(tc.context), f"artifacts/tc_{i}_context.txt")
|
| 232 |
+
|
| 233 |
+
answer_relevancy_metric.measure(tc)
|
| 234 |
+
hallucination_metric.measure(tc)
|
| 235 |
+
length_metric.measure(tc)
|
| 236 |
+
|
| 237 |
+
entry = {
|
| 238 |
+
"input": tc.input,
|
| 239 |
+
"actual_output": tc.actual_output,
|
| 240 |
+
"context": tc.context,
|
| 241 |
+
"answer_relevancy_score": answer_relevancy_metric.score,
|
| 242 |
+
"hallucination_score": hallucination_metric.score,
|
| 243 |
+
"length_score": length_metric.score
|
| 244 |
+
}
|
| 245 |
+
|
| 246 |
+
mlflow.log_metric(f"tc_{i}_answer_relevancy", answer_relevancy_metric.score)
|
| 247 |
+
mlflow.log_metric(f"tc_{i}_hallucination", hallucination_metric.score)
|
| 248 |
+
mlflow.log_metric(f"tc_{i}_length", length_metric.score)
|
| 249 |
+
|
| 250 |
+
results.append(entry)
|
| 251 |
return results
|
| 252 |
|
| 253 |
+
# ----------- 9. Gradio UI -----------
|
| 254 |
with gr.Blocks() as demo:
|
| 255 |
+
gr.Markdown("# 🛠️ Maintenance AI Assistant (Lightning AI Generator + Gemini Evaluator)")
|
| 256 |
|
| 257 |
with gr.Tabs():
|
| 258 |
with gr.TabItem("Chat (RAG)"):
|
| 259 |
+
usage_counter = gr.State(value=0)
|
| 260 |
+
session_start = gr.State(value=datetime.now().isoformat())
|
| 261 |
+
|
| 262 |
question_input = gr.Textbox(label="Ask your maintenance question")
|
| 263 |
answer_output = gr.Textbox(label="AI Response")
|
| 264 |
ask_button = gr.Button("Get Answer")
|
| 265 |
|
| 266 |
+
def track_usage(question, count, session_start):
|
| 267 |
+
count += 1
|
| 268 |
+
with mlflow.start_run(run_name=f"User-Interaction-{count}", nested=True):
|
| 269 |
+
mlflow.log_param("question", question)
|
| 270 |
+
mlflow.log_param("session_start", session_start)
|
| 271 |
+
response = rag_pipeline(question)
|
| 272 |
+
mlflow.log_metric("total_queries", count)
|
| 273 |
+
return response, count, session_start
|
| 274 |
+
|
| 275 |
+
ask_button.click(
|
| 276 |
+
track_usage,
|
| 277 |
+
inputs=[question_input, usage_counter, session_start],
|
| 278 |
+
outputs=[answer_output, usage_counter, session_start]
|
| 279 |
+
)
|
| 280 |
|
| 281 |
with gr.TabItem("DeepEval — Model Tests"):
|
| 282 |
+
gr.Markdown("### Evaluate with Gemini (no expected output needed)")
|
| 283 |
+
|
| 284 |
tc_input = gr.Textbox(label="Test Input (prompt)")
|
| 285 |
+
tc_actual = gr.Textbox(label="Actual Output (leave empty to auto-generate via Lightning AI)")
|
| 286 |
tc_context = gr.Textbox(label="Context (optional)")
|
| 287 |
+
|
| 288 |
+
auto_generate = gr.Checkbox(label="Auto-generate actual output from RAG", value=True)
|
| 289 |
run_button = gr.Button("Run DeepEval")
|
| 290 |
eval_output = gr.JSON(label="Evaluation Results")
|
| 291 |
|
| 292 |
+
def run_single_eval(inp, actual, context, autogen):
|
| 293 |
if autogen or not actual.strip():
|
| 294 |
actual_output = rag_pipeline(inp)
|
| 295 |
else:
|
| 296 |
actual_output = actual
|
| 297 |
+
|
| 298 |
+
tc = LLMTestCase(
|
| 299 |
+
input=inp,
|
| 300 |
+
actual_output=actual_output,
|
| 301 |
+
expected_output=None,
|
| 302 |
+
context=[context] if context else None
|
| 303 |
+
)
|
| 304 |
+
results = run_deepeval_tests([tc])
|
| 305 |
return results
|
| 306 |
|
| 307 |
+
run_button.click(
|
| 308 |
+
run_single_eval,
|
| 309 |
+
inputs=[tc_input, tc_actual, tc_context, auto_generate],
|
| 310 |
+
outputs=[eval_output]
|
| 311 |
+
)
|
| 312 |
|
| 313 |
if __name__ == "__main__":
|
| 314 |
+
with mlflow.start_run(run_name="Deployment-Info"):
|
| 315 |
+
mlflow.log_params({
|
| 316 |
+
"app_version": "1.4.0",
|
| 317 |
+
"deployment_platform": "Lightning AI / HuggingFace Space",
|
| 318 |
+
"deployment_time": datetime.now().isoformat(),
|
| 319 |
+
"code_version": os.getenv("GIT_COMMIT", "dev")
|
| 320 |
+
})
|
| 321 |
demo.launch()
|