Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import base64
|
| 2 |
+
import os
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
from openai import OpenAI
|
| 5 |
+
import gradio as gr
|
| 6 |
+
import oci
|
| 7 |
+
|
| 8 |
+
# === OpenAI API Setup ===
|
| 9 |
+
openai_api_key = os.environ.get("OPENAI_API_KEY")
|
| 10 |
+
if not openai_api_key:
|
| 11 |
+
raise ValueError("OPENAI_API_KEY environment variable is not set.")
|
| 12 |
+
|
| 13 |
+
client = OpenAI(api_key=openai_api_key)
|
| 14 |
+
|
| 15 |
+
# === OCI Object Storage Setup ===
|
| 16 |
+
oci_config = {
|
| 17 |
+
"user": os.environ.get("OCI_USER"),
|
| 18 |
+
"tenancy": os.environ.get("OCI_TENANCY"),
|
| 19 |
+
"fingerprint": os.environ.get("OCI_FINGERPRINT"),
|
| 20 |
+
"region": os.environ.get("OCI_REGION"),
|
| 21 |
+
"key_content": os.environ.get("OCI_PRIVATE_KEY")
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
namespace = os.environ.get("OCI_NAMESPACE")
|
| 25 |
+
bucket_name = os.environ.get("OCI_BUCKET_NAME")
|
| 26 |
+
|
| 27 |
+
try:
|
| 28 |
+
object_storage = oci.object_storage.ObjectStorageClient(oci_config)
|
| 29 |
+
except Exception as e:
|
| 30 |
+
print("Failed to initialize OCI Object Storage client:", e)
|
| 31 |
+
|
| 32 |
+
# === Prompts ===
|
| 33 |
+
system_prompt = (
|
| 34 |
+
"You are a detail-oriented assistant that specializes in transcribing and polishing "
|
| 35 |
+
"handwritten notes from images. Your goal is to turn rough, casual, or handwritten "
|
| 36 |
+
"content into clean, structured, and professional-looking text that sounds like it "
|
| 37 |
+
"was written by a human—not an AI. You do not include icons, emojis, or suggest next "
|
| 38 |
+
"steps unless explicitly instructed."
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
user_prompt_template = (
|
| 42 |
+
"You will receive an image of handwritten notes. Transcribe the content accurately, "
|
| 43 |
+
"correcting any spelling or grammar issues. Then, organize it clearly with headings, "
|
| 44 |
+
"bullet points, and proper formatting. Maintain the original intent and voice of the "
|
| 45 |
+
"author, but enhance readability and flow. Do not add embellishments or AI-style phrasing."
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
# === Encode uploaded bytes ===
|
| 49 |
+
def encode_image_to_base64(file_bytes):
|
| 50 |
+
return base64.b64encode(file_bytes).decode("utf-8")
|
| 51 |
+
|
| 52 |
+
# === Upload transcription result to OCI ===
|
| 53 |
+
def upload_to_object_storage(user_name, text):
|
| 54 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 55 |
+
filename = f"{user_name.replace(' ', '_')}_{timestamp}.txt"
|
| 56 |
+
object_storage.put_object(
|
| 57 |
+
namespace_name=namespace,
|
| 58 |
+
bucket_name=bucket_name,
|
| 59 |
+
object_name=filename,
|
| 60 |
+
put_object_body=text.encode("utf-8")
|
| 61 |
+
)
|
| 62 |
+
return filename
|
| 63 |
+
|
| 64 |
+
# === List files in object storage ===
|
| 65 |
+
def list_object_store():
|
| 66 |
+
try:
|
| 67 |
+
objects = object_storage.list_objects(namespace, bucket_name)
|
| 68 |
+
return "\n".join([obj.name for obj in objects.data.objects])
|
| 69 |
+
except Exception as e:
|
| 70 |
+
return f"Failed to list objects: {str(e)}"
|
| 71 |
+
|
| 72 |
+
# === Transcription logic ===
|
| 73 |
+
def transcribe_image(file_bytes, user_name):
|
| 74 |
+
if not file_bytes:
|
| 75 |
+
return "No image uploaded."
|
| 76 |
+
|
| 77 |
+
encoded = encode_image_to_base64(file_bytes)
|
| 78 |
+
image_url = f"data:image/jpeg;base64,{encoded}"
|
| 79 |
+
|
| 80 |
+
response = client.chat.completions.create(
|
| 81 |
+
model="gpt-4-turbo",
|
| 82 |
+
messages=[
|
| 83 |
+
{"role": "system", "content": system_prompt},
|
| 84 |
+
{"role": "user", "content": [
|
| 85 |
+
{"type": "text", "text": user_prompt_template},
|
| 86 |
+
{"type": "image_url", "image_url": {"url": image_url}}
|
| 87 |
+
]}
|
| 88 |
+
],
|
| 89 |
+
max_tokens=1500
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 93 |
+
result = f"🗓️ Transcribed on: {timestamp}\n\n{response.choices[0].message.content}"
|
| 94 |
+
upload_to_object_storage(user_name, result)
|
| 95 |
+
return result
|
| 96 |
+
|
| 97 |
+
# === Gradio Interface ===
|
| 98 |
+
with gr.Blocks() as app:
|
| 99 |
+
gr.Markdown("## Handwritten Note Transcriber\nUpload a handwritten note image for professional transcription and auto-upload to OCI Object Storage.")
|
| 100 |
+
|
| 101 |
+
with gr.Row():
|
| 102 |
+
user_dropdown = gr.Dropdown(
|
| 103 |
+
choices=["Jim Goodwin", "Zahabiya Ali rampurawala", "Keith Gauvin"],
|
| 104 |
+
label="Who is uploading this?"
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
input_file = gr.File(label="Upload image", type="binary", file_types=[".jpg", ".jpeg", ".png"])
|
| 108 |
+
output_text = gr.Textbox(label="Transcription Output", lines=30)
|
| 109 |
+
|
| 110 |
+
input_file.change(fn=transcribe_image, inputs=[input_file, user_dropdown], outputs=output_text)
|
| 111 |
+
|
| 112 |
+
gr.Button("List Object Store").click(fn=list_object_store, outputs=gr.Textbox(label="Object Store Contents"))
|
| 113 |
+
|
| 114 |
+
# === Launch App ===
|
| 115 |
+
if __name__ == "__main__":
|
| 116 |
+
app.launch(share=True)
|