Update verifier.py
Browse files- verifier.py +18 -74
verifier.py
CHANGED
|
@@ -4,7 +4,9 @@ def verifier_page():
|
|
| 4 |
from firebase_admin import credentials, firestore
|
| 5 |
import pandas as pd
|
| 6 |
import PyPDF2
|
| 7 |
-
|
|
|
|
|
|
|
| 8 |
# Initialize Firebase
|
| 9 |
cred = credentials.Certificate('serviceAccountKey.json')
|
| 10 |
if not firebase_admin._apps:
|
|
@@ -33,9 +35,11 @@ def verifier_page():
|
|
| 33 |
# When a file is selected, display details and hide the table
|
| 34 |
st.title(f"File Details: {st.session_state.selected_file}")
|
| 35 |
st.write(f"Text: {st.session_state.selected_text}")
|
|
|
|
| 36 |
else:
|
| 37 |
# Automatically load and display the data table on page load
|
| 38 |
data = fetch_data(fields_to_fetch)
|
|
|
|
| 39 |
if data:
|
| 40 |
df = pd.DataFrame(data)
|
| 41 |
|
|
@@ -57,81 +61,21 @@ def verifier_page():
|
|
| 57 |
st.session_state.selected_file = row['filename']
|
| 58 |
st.session_state.selected_text = row['text']
|
| 59 |
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
# Commented out IPython magic to ensure Python compatibility.
|
| 64 |
-
# %pip install google-generativeai
|
| 65 |
-
|
| 66 |
-
import pathlib
|
| 67 |
-
import textwrap
|
| 68 |
-
|
| 69 |
-
import google.generativeai as genai
|
| 70 |
-
|
| 71 |
-
from IPython.display import display
|
| 72 |
-
from IPython.display import Markdown
|
| 73 |
-
|
| 74 |
-
def extract_text_from_pdf(uploaded_file, start_page, end_page):
|
| 75 |
-
if uploaded_file is None:
|
| 76 |
-
return "" # Return an empty string if no file is uploaded
|
| 77 |
-
|
| 78 |
-
reader = PyPDF2.PdfReader(uploaded_file)
|
| 79 |
-
num_pages = len(reader.pages)
|
| 80 |
-
|
| 81 |
-
if start_page < 0 or start_page >= num_pages:
|
| 82 |
-
start_page = 0
|
| 83 |
-
if end_page < start_page or end_page >= num_pages:
|
| 84 |
-
end_page = num_pages - 1
|
| 85 |
-
|
| 86 |
-
text = ''
|
| 87 |
-
for page_num in range(start_page, end_page + 1):
|
| 88 |
-
page = reader.pages[page_num]
|
| 89 |
-
text += page.extract_text()
|
| 90 |
-
return text
|
| 91 |
-
|
| 92 |
-
pdf_path = 'VCS-Standard.pdf'
|
| 93 |
-
start_page = 0 # Start extracting from the first page (0-based index)
|
| 94 |
-
end_page = 93 # Extract up to the third page (0-based index)
|
| 95 |
-
vcs_text = extract_text_from_pdf(pdf_path, start_page, end_page)
|
| 96 |
-
print(vcs_text)
|
| 97 |
-
|
| 98 |
-
pdf_path = 'VCS-Methodology-Requirements.pdf'
|
| 99 |
-
start_page = 0 # Start extracting from the first page (0-based index)
|
| 100 |
-
end_page = 89 # Extract up to the third page (0-based index)
|
| 101 |
-
methodology_text = extract_text_from_pdf(pdf_path, start_page, end_page)
|
| 102 |
-
print(methodology_text)
|
| 103 |
-
|
| 104 |
-
pdf_path = 'VCS-Project-Description-Template-v4.4-FINAL2.docx.pdf'
|
| 105 |
-
start_page = 0 # Start extracting from the first page (0-based index)
|
| 106 |
-
end_page = 34 # Extract up to the third page (0-based index)
|
| 107 |
-
template_text = extract_text_from_pdf(pdf_path, start_page, end_page)
|
| 108 |
-
print(template_text)
|
| 109 |
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
return Markdown(textwrap.indent(text, '> ', predicate=lambda _: True))
|
| 113 |
|
|
|
|
| 114 |
|
|
|
|
|
|
|
| 115 |
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
for m in genai.list_models():
|
| 120 |
-
if 'generateContent' in m.supported_generation_methods:
|
| 121 |
-
print(m.name)
|
| 122 |
-
|
| 123 |
-
#For text-only prompts, use a Gemini 1.5 model or the Gemini 1.0 Pro model:
|
| 124 |
-
model = genai.GenerativeModel('gemini-1.5-flash-latest')
|
| 125 |
-
|
| 126 |
-
# Commented out IPython magic to ensure Python compatibility.
|
| 127 |
-
response = model.generate_content("You are a project verifier officer at Verra, the leading registry for projects used to generate carbon credits. Your job is to look into project submissions from project developers who create an implement nature-based solutions in order to generate carbon credits. You go through the content of the project submissions to investigate whether the submission fits into the vcs standards, methodology requirements, and touches everything on the project description template. A verifier has to compare the submission to these 3 main criteria. As a verifier, I want you to evaluate the project submission below based on the resources listed below. The output should be in the format of summary of the project submission, the level of adherence to the standards, what needs to be fixed, and notes for improvement for project developers. The output needs to have project-specific feedback. You can bolster your feedback with quotes from the submission or referencing numbers mentioned in the submission. Here is the project submission:" + st.session_state.selected_text + "Here is the vcs standards:" + vcs_text + "Here is the methodology requirement:" + methodology_text + "Here is the project description template:" + template_text)
|
| 128 |
-
to_markdown(response.text)
|
| 129 |
-
st.session_state.selected_text = response.text
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
# Clear the data table and show file details
|
| 133 |
-
data_placeholder.empty()
|
| 134 |
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
|
|
|
| 4 |
from firebase_admin import credentials, firestore
|
| 5 |
import pandas as pd
|
| 6 |
import PyPDF2
|
| 7 |
+
import google.generativeai as genai
|
| 8 |
+
import textwrap
|
| 9 |
+
|
| 10 |
# Initialize Firebase
|
| 11 |
cred = credentials.Certificate('serviceAccountKey.json')
|
| 12 |
if not firebase_admin._apps:
|
|
|
|
| 35 |
# When a file is selected, display details and hide the table
|
| 36 |
st.title(f"File Details: {st.session_state.selected_file}")
|
| 37 |
st.write(f"Text: {st.session_state.selected_text}")
|
| 38 |
+
st.write(f"AI Response: {st.session_state.selected_ai}")
|
| 39 |
else:
|
| 40 |
# Automatically load and display the data table on page load
|
| 41 |
data = fetch_data(fields_to_fetch)
|
| 42 |
+
|
| 43 |
if data:
|
| 44 |
df = pd.DataFrame(data)
|
| 45 |
|
|
|
|
| 61 |
st.session_state.selected_file = row['filename']
|
| 62 |
st.session_state.selected_text = row['text']
|
| 63 |
|
| 64 |
+
# Deploy LLM and use 'text' as the input
|
| 65 |
+
GOOGLE_API_KEY = "AIzaSyC7TpzrIH_3-dppWE8exqdZX3DAdE6cy8w"
|
| 66 |
+
genai.configure(api_key=GOOGLE_API_KEY)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
|
| 68 |
+
# Example of working with LLM models (Gemini 1.5)
|
| 69 |
+
model = genai.GenerativeModel('gemini-1.5-flash-latest')
|
|
|
|
| 70 |
|
| 71 |
+
response = model.generate_content("You are a project verifier officer... (your text here)" + row['text'])
|
| 72 |
|
| 73 |
+
# Save the response and filename in session state
|
| 74 |
+
st.session_state.selected_ai = response.text
|
| 75 |
|
| 76 |
+
# Clear the data table and show file details
|
| 77 |
+
data_placeholder.empty()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
|
| 79 |
+
# Display the file details
|
| 80 |
+
st.write(f"Selected File: {st.session_state.selected_file}")
|
| 81 |
+
st.write(f"AI Response: {st.session_state.selected_ai}")
|