Hk4crprasad commited on
Commit
92c935c
·
verified ·
1 Parent(s): dc0be55

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +116 -112
app.py CHANGED
@@ -23,117 +23,6 @@ from langchain.prompts import PromptTemplate
23
  load_dotenv()
24
  import datetime
25
 
26
-
27
- def get_pdf_text(pdf_docs):
28
- text=""
29
- for pdf in pdf_docs:
30
- pdf_reader= PdfReader(pdf)
31
- for page in pdf_reader.pages:
32
- text+= page.extract_text()
33
- return text
34
-
35
-
36
-
37
- def get_text_chunks(text):
38
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
39
- chunks = text_splitter.split_text(text)
40
- return chunks
41
-
42
-
43
- def get_vector_store(text_chunks):
44
- embeddings = GoogleGenerativeAIEmbeddings(model = "models/embedding-001")
45
- vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
46
- vector_store.save_local("faiss_index")
47
-
48
-
49
- def get_conversational_chain():
50
-
51
- prompt_template = """
52
- Try always to answer only available in the context and please always give perfect answer from the available context , if i say Summerize then Summerize , and if ask key points then give the key points, if i ask who are you then answer 'i am BHAI(Best High-Quality Artificial Intelligence) Made by hk4crprasad' and always give the perfect and correct output, if i say that tell me more about something in the context then tell accordingly.,
53
- Context:\n {context}?\n
54
- Question: \n{question}\n
55
-
56
- Answer:
57
- """
58
-
59
- model = ChatGoogleGenerativeAI(model="gemini-pro",
60
- temperature=0.5)
61
-
62
- prompt = PromptTemplate(template = prompt_template, input_variables = ["context", "question"])
63
- chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
64
-
65
- return chain
66
-
67
-
68
-
69
- def user_input(user_question):
70
- embeddings = GoogleGenerativeAIEmbeddings(model = "models/embedding-001")
71
-
72
- new_db = FAISS.load_local("faiss_index", embeddings)
73
- docs = new_db.similarity_search(user_question)
74
-
75
- chain = get_conversational_chain()
76
-
77
-
78
- pdfans = chain(
79
- {"input_documents":docs, "question": user_question}
80
- , return_only_outputs=True)
81
-
82
- return pdfans["output_text"]
83
-
84
-
85
- url = "https://y39t47-8080.csb.app/search"
86
-
87
- weathe_key = os.getenv("WEATHER_KEY")
88
- def print_weather_data(api_key, location):
89
- base_url = "http://api.weatherstack.com/forecast"
90
- params = {"access_key": api_key, "query": location}
91
-
92
- try:
93
- response = requests.get(base_url, params=params)
94
- data = response.json()
95
-
96
- if response.status_code == 200:
97
- # Extract relevant weather information
98
- weather_info = data.get("current", {})
99
- location_info = data.get("location", {})
100
- temperature = weather_info.get("temperature")
101
- description = weather_info.get("weather_descriptions", [])[0]
102
- humidity = weather_info.get("humidity")
103
- localtime = location_info.get("localtime")
104
- country = location_info.get("country")
105
-
106
- # Extract forecast information
107
- forecast_info = data.get("forecast", {})
108
- forecast_str = ""
109
- if forecast_info:
110
- forecast_date = list(forecast_info.keys())[0]
111
- forecast_temp_min = forecast_info[forecast_date].get("mintemp")
112
- forecast_temp_max = forecast_info[forecast_date].get("maxtemp")
113
- forecast_avg_temp = forecast_info[forecast_date].get("avgtemp")
114
-
115
- forecast_str = f"\nForecast for {forecast_date}: Min Temp {forecast_temp_min}°C, Max Temp {forecast_temp_max}°C, Avg Temp {forecast_avg_temp}°C"
116
-
117
- # Build and return the weather report
118
- report = (
119
- f"Weather Report for {location}, {country}\n"
120
- f"Temperature: {temperature}°C\n"
121
- f"Condition: {description}\n"
122
- f"Humidity: {humidity}%\n"
123
- f"Observed at: {localtime}{forecast_str}"
124
- )
125
- print(report)
126
- return report
127
-
128
- else:
129
- return f"Error: {response.status_code}, {data.get('error', {}).get('info', 'Unknown error')}"
130
-
131
- except Exception as e:
132
- return f"An error occurred: {e}"
133
-
134
- #Je t'aime plus que les mots,
135
- #Plus que les sentiments,
136
- #Plus que la vie elle-même
137
  history=[
138
  {
139
  "parts": [
@@ -567,6 +456,121 @@ history=[
567
  ],
568
  "role": "model"
569
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
570
  ]
571
  st.set_page_config(
572
  page_title="BHAI Chat",
@@ -778,7 +782,7 @@ if pdf_mode:
778
  else:
779
  pdf_docs = None
780
 
781
- if pdf_docs:
782
  prompt = st.chat_input("Write your questions according to the pdf")
783
  if prompt:
784
  prmt = {'role': 'user', 'parts':[prompt]}
 
23
  load_dotenv()
24
  import datetime
25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  history=[
27
  {
28
  "parts": [
 
456
  ],
457
  "role": "model"
458
  },
459
+
460
+
461
+
462
+
463
+
464
+ def get_pdf_text(pdf_docs):
465
+ text=""
466
+ for pdf in pdf_docs:
467
+ pdf_reader= PdfReader(pdf)
468
+ for page in pdf_reader.pages:
469
+ text+= page.extract_text()
470
+ return text
471
+
472
+
473
+
474
+ def get_text_chunks(text):
475
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
476
+ chunks = text_splitter.split_text(text)
477
+ return chunks
478
+
479
+
480
+ def get_vector_store(text_chunks):
481
+ embeddings = GoogleGenerativeAIEmbeddings(model = "models/embedding-001")
482
+ vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
483
+ vector_store.save_local("faiss_index")
484
+
485
+
486
+ def get_conversational_chain():
487
+
488
+ prompt_template = """
489
+ Try always to answer only available in the context and please always give perfect answer from the available context , if i say Summerize then Summerize , and if ask key points then give the key points, if i ask who are you then answer 'i am BHAI(Best High-Quality Artificial Intelligence) Made by hk4crprasad' and always give the perfect and correct output, if i say that tell me more about something in the context then tell accordingly.,
490
+ Context:\n {context}?\n
491
+ Question: \n{question}\n
492
+
493
+ Answer:
494
+ """
495
+
496
+ model = ChatGoogleGenerativeAI(model="gemini-pro",
497
+ temperature=0.5)
498
+
499
+ prompt = PromptTemplate(template = prompt_template, input_variables = ["context", "question"])
500
+ chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
501
+
502
+ return chain
503
+
504
+
505
+
506
+ def user_input(user_question):
507
+ embeddings = GoogleGenerativeAIEmbeddings(model = "models/embedding-001")
508
+
509
+ new_db = FAISS.load_local("faiss_index", embeddings)
510
+ docs = new_db.similarity_search(user_question)
511
+
512
+ chain = get_conversational_chain()
513
+
514
+
515
+ pdfans = chain(
516
+ {"input_documents":docs, "question": user_question}
517
+ , return_only_outputs=True)
518
+
519
+ return pdfans["output_text"]
520
+
521
+
522
+ url = "https://y39t47-8080.csb.app/search"
523
+
524
+ weathe_key = os.getenv("WEATHER_KEY")
525
+ def print_weather_data(api_key, location):
526
+ base_url = "http://api.weatherstack.com/forecast"
527
+ params = {"access_key": api_key, "query": location}
528
+
529
+ try:
530
+ response = requests.get(base_url, params=params)
531
+ data = response.json()
532
+
533
+ if response.status_code == 200:
534
+ # Extract relevant weather information
535
+ weather_info = data.get("current", {})
536
+ location_info = data.get("location", {})
537
+ temperature = weather_info.get("temperature")
538
+ description = weather_info.get("weather_descriptions", [])[0]
539
+ humidity = weather_info.get("humidity")
540
+ localtime = location_info.get("localtime")
541
+ country = location_info.get("country")
542
+
543
+ # Extract forecast information
544
+ forecast_info = data.get("forecast", {})
545
+ forecast_str = ""
546
+ if forecast_info:
547
+ forecast_date = list(forecast_info.keys())[0]
548
+ forecast_temp_min = forecast_info[forecast_date].get("mintemp")
549
+ forecast_temp_max = forecast_info[forecast_date].get("maxtemp")
550
+ forecast_avg_temp = forecast_info[forecast_date].get("avgtemp")
551
+
552
+ forecast_str = f"\nForecast for {forecast_date}: Min Temp {forecast_temp_min}°C, Max Temp {forecast_temp_max}°C, Avg Temp {forecast_avg_temp}°C"
553
+
554
+ # Build and return the weather report
555
+ report = (
556
+ f"Weather Report for {location}, {country}\n"
557
+ f"Temperature: {temperature}°C\n"
558
+ f"Condition: {description}\n"
559
+ f"Humidity: {humidity}%\n"
560
+ f"Observed at: {localtime}{forecast_str}"
561
+ )
562
+ print(report)
563
+ return report
564
+
565
+ else:
566
+ return f"Error: {response.status_code}, {data.get('error', {}).get('info', 'Unknown error')}"
567
+
568
+ except Exception as e:
569
+ return f"An error occurred: {e}"
570
+
571
+ #Je t'aime plus que les mots,
572
+ #Plus que les sentiments,
573
+ #Plus que la vie elle-même
574
  ]
575
  st.set_page_config(
576
  page_title="BHAI Chat",
 
782
  else:
783
  pdf_docs = None
784
 
785
+ if pdf_mode:
786
  prompt = st.chat_input("Write your questions according to the pdf")
787
  if prompt:
788
  prmt = {'role': 'user', 'parts':[prompt]}