# -*- coding: utf-8 -*- """app.ipynb Automatically generated by Colab. Original file is located at https://colab.research.google.com/drive/1neCwOaMM4pn-eYYwZ6zYIdhhNdiwIG5K """ import gradio as gr from openai import OpenAI import os # Initialize Groq client client = OpenAI( api_key=os.environ["GROQ_API_KEY"], base_url="https://api.groq.com/openai/v1" ) # Function to send prompts to Groq's Llama 3 model def lcpp_llm(prompt, max_tokens=512, temperature=0.3, stop=None): response = client.chat.completions.create( model="llama3-8b-8192", messages=[{"role": "user", "content": prompt}], max_tokens=max_tokens, temperature=temperature, stop=stop ) return response.choices[0].message.content.strip() def pos_function(user_query): POS_SYSTEM_MESSAGE=""" SYSTEM: You are a linguistics expert and NLP model trained to analyze the grammatical structure of English text. Your task is to perform Part-of-Speech (POS) tagging for the following customer review. For each word, return its POS tag in a clean, aligned format. USER: """ prompt=POS_SYSTEM_MESSAGE+user_query output = lcpp_llm(prompt, max_tokens=512) return output def ner_function(user_query): NER_SYSTEM_MESSAGE=""" SYSTEM: You are an Information Extraction Specialist AI that extracts meaningful entities from customer feedback. Identify all named entities in the review and classify them into the appropriate types (e.g., PERSON, ORGANIZATION, PRODUCT, LOCATION, DATE, etc.). Return your output in a structured table with two columns: Entity and Label. Do not ask any follow up questions. USER: """ prompt=NER_SYSTEM_MESSAGE+user_query output = lcpp_llm(prompt, max_tokens=512) return output def analysis_function(user_query): ANALYSIS_SYSTEM_MESSAGE=""" SYSTEM: You are a Customer Experience Analyst AI. Analyze the customer review below and create a concise table with three columns: 1. Category (Product or Service) mentioned in comment 2. Sentiment (Positive, Negative, or Mixed) 3. Insight (What is going well or needs improvement) Only output the table Focus on clear business insights. Do not ask any follow up questions. USER: """ prompt=ANALYSIS_SYSTEM_MESSAGE+user_query output= lcpp_llm(prompt, max_tokens=512) # Keep only the first table if repeated if output.count("| Category | Sentiment | Insight |") > 1: first_table = output.split("| Category | Sentiment | Insight |", 1)[1] first_table = "| Category | Sentiment | Insight |\n" + first_table.split("SYSTEM:")[0].strip() return first_table else: return output # Gradio interface def full_analysis(user_query): pos = pos_function(user_query) ner = ner_function(user_query) analysis = analysis_function(user_query) return pos, ner, analysis iface = gr.Interface( fn=full_analysis, inputs=gr.Textbox(lines=5, label="Enter Customer Review"), outputs=[ gr.Textbox(label="POS Tags"), gr.Textbox(label="Named Entities"), gr.Textbox(label="Review Sentiment & Insights") ], title="Customer Review Analyzer with Llama 3 (POS + NER + Sentiment)", description="This tool uses Meta-Llama-3-8B-Instruct (GGUF) to extract POS tags, named entities, and sentiment insights from customer reviews." ) iface.launch()