rashid01 commited on
Commit
6c9afb1
·
verified ·
1 Parent(s): 6f36c14

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -47
app.py CHANGED
@@ -1,51 +1,31 @@
1
- import requests
2
- from langchain import PromptTemplate
3
-
4
- # Define a simple prompt template
5
- prompt_template = PromptTemplate(
6
- input_variables=['name'],
7
- template='Hello {name}, how can I assist you today?'
8
- )
9
-
10
- # Define a function to interact with Google Gemini API
11
- def generate_response(name):
12
- # Construct the prompt
13
- prompt = prompt_template.format(name=name)
14
-
15
- # Define API endpoint and headers
16
- api_url = 'https://your-google-gemini-endpoint.com/v1/generate'
17
- headers = {
18
- 'Authorization': f'Bearer YOUR_GEMINI_API_KEY',
19
- 'Content-Type': 'application/json'
20
- }
21
- payload = {
22
- 'prompt': prompt
23
- }
24
-
25
- # Send the request to the API
26
- try:
27
- print(f'Sending request to: {api_url}')
28
- response = requests.post(api_url, headers=headers, json=payload)
29
- print(f'Response status code: {response.status_code}')
30
- if response.status_code == 200:
31
- result = response.json()
32
- generated_text = result.get('text', 'No response text available.')
33
  else:
34
- generated_text = f'Error: Unexpected status code {response.status_code}'
35
- except requests.exceptions.RequestException as e:
36
- generated_text = f'Error: {e}'
37
-
38
- print(f'Tracking: LLM Request - Name: {name}, Prompt: {prompt}')
39
- print(f'Tracking: LLM Response - Response: {generated_text}')
40
-
41
- return generated_text
42
-
43
- # Example usage
44
- if __name__ == "__main__":
45
- name = "Alice"
46
- response = generate_response(name)
47
- print(f"Generated Response: {response}")
48
-
49
 
50
 
51
 
 
1
+ import streamlit as st
2
+ from langsmith import LangsmithClient
3
+
4
+ # Initialize the Langsmith client with your API key
5
+ api_key = st.secrets["LANGSMITH_API_KEY"]
6
+ client = LangsmithClient(api_key)
7
+
8
+ st.title("Custom Customer Support Bot")
9
+
10
+ def generate_response(query):
11
+ response = client.chat(
12
+ model="text-davinci-003", # You can choose an appropriate model
13
+ prompt=f"Customer query: {query}\n\nResponse:",
14
+ max_tokens=150
15
+ )
16
+ return response['choices'][0]['text'].strip()
17
+
18
+ with st.form("support_form"):
19
+ user_query = st.text_area("Enter your query here:")
20
+ submitted = st.form_submit_button("Submit")
21
+
22
+ if submitted:
23
+ if user_query:
24
+ response = generate_response(user_query)
25
+ st.write("**Bot Response:**")
26
+ st.write(response)
 
 
 
 
 
 
27
  else:
28
+ st.error("Please enter a query to get a response.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
 
31