anyonehomep1mane commited on
Commit
5637ddb
·
0 Parent(s):

Initial Changes

Browse files
.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ *.keras filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ .env
2
+ venv
3
+ handwriting_dataset
4
+ heavy_handwriting_dataset
5
+ .vscode
Dockerfile ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10.13-slim
2
+
3
+ ENV PYTHONDONTWRITEBYTECODE=1 \
4
+ PYTHONUNBUFFERED=1 \
5
+ CUDA_VISIBLE_DEVICES=-1 \
6
+ TF_ENABLE_ONEDNN_OPTS=0
7
+
8
+ WORKDIR /app
9
+
10
+ RUN apt-get update && apt-get install -y \
11
+ build-essential \
12
+ git \
13
+ && rm -rf /var/lib/apt/lists/*
14
+
15
+ COPY requirements.txt .
16
+
17
+ RUN pip install --upgrade pip && \
18
+ pip install --no-cache-dir -r requirements.txt
19
+
20
+ COPY . .
21
+
22
+ CMD ["python", "app.py"]
RAG_Documents/Agreeableness - PDF Information.pdf ADDED
Binary file (54.2 kB). View file
 
RAG_Documents/Conscientiousness - PDF Information.pdf ADDED
Binary file (52.5 kB). View file
 
RAG_Documents/Extraversion - PDF Information.pdf ADDED
Binary file (51.7 kB). View file
 
RAG_Documents/Neuroticism - PDF Information.pdf ADDED
Binary file (53 kB). View file
 
RAG_Documents/Openness - PDF Information.pdf ADDED
Binary file (51.7 kB). View file
 
app.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import sys
2
+ # import tensorflow as tf
3
+ # import gradio as gr
4
+
5
+ # print("Python:", sys.version)
6
+ # print("TF:", tf.__version__)
7
+
8
+ # def hello():
9
+ # return "Gradio + TensorFlow OK"
10
+
11
+ # gr.Interface(fn=hello, inputs=None, outputs="text").launch()
12
+
13
+ import os
14
+ os.environ.pop("TF_USE_LEGACY_KERAS", None)
15
+
16
+ import gradio as gr
17
+ from common.gradio.common import full_analysis
18
+
19
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
20
+
21
+ gr.Markdown("# Handwriting → Big Five Personality Prediction")
22
+ gr.Markdown("Upload any image of handwriting → model will try to predict personality trait")
23
+
24
+ with gr.Row():
25
+ with gr.Column():
26
+ image_input = gr.Image(
27
+ type="pil",
28
+ label="Upload handwriting image",
29
+ sources=["upload"],
30
+ height=380
31
+ )
32
+
33
+ with gr.Column():
34
+ gr.Markdown("### Prediction")
35
+ prediction_output = gr.Markdown(value="Upload image and click Analyze...")
36
+
37
+ gr.Markdown("### Personality Description")
38
+ summary_output = gr.Markdown(value="Description will appear here...")
39
+
40
+ btn = gr.Button("Analyze", variant="primary")
41
+ btn.click(
42
+ fn=full_analysis,
43
+ inputs=image_input,
44
+ outputs=[prediction_output, summary_output]
45
+ )
46
+
47
+ image_input.change(
48
+ fn=full_analysis,
49
+ inputs=image_input,
50
+ outputs=[prediction_output, summary_output]
51
+ )
52
+
53
+
54
+ if __name__ == "__main__":
55
+ demo.launch(server_name="0.0.0.0", server_port=7860)
base_Dockerfile ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
+ # you will also find guides on how best to write your Dockerfile
3
+
4
+ FROM python:3.10.13-slim
5
+
6
+ RUN useradd -m -u 1000 user
7
+ USER user
8
+ ENV PATH="/home/user/.local/bin:$PATH"
9
+
10
+ WORKDIR /app
11
+
12
+ COPY --chown=user ./requirements.txt requirements.txt
13
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
14
+
15
+ COPY --chown=user . /app
16
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
base_README.md ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Automated Signature Analysis Docker
3
+ emoji: 📉
4
+ colorFrom: blue
5
+ colorTo: red
6
+ sdk: docker
7
+ pinned: false
8
+ ---
9
+
10
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
base_app.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import platform
3
+ from fastapi import FastAPI
4
+
5
+ app = FastAPI()
6
+
7
+ @app.get("/")
8
+ def greet_json():
9
+ return {"Hello": "World!"}
10
+
11
+ @app.get("/version")
12
+ def get_version():
13
+ return {
14
+ "python_version": sys.version.split()[0],
15
+ "platform": platform.platform(),
16
+ "full_version": sys.version
17
+ }
base_requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ fastapi
2
+ uvicorn[standard]
common/gradio/common.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import tensorflow as tf
3
+ import warnings
4
+ from common.rag.common import generate_personality_summary
5
+
6
+ import os
7
+ os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
8
+
9
+ _model = None
10
+
11
+
12
+ def predict_handwriting(image):
13
+ """
14
+ Preprocess uploaded image exactly the way model expects
15
+ """
16
+
17
+ global _model
18
+
19
+ if _model is None:
20
+ _model = tf.keras.models.load_model(
21
+ "signature_model_tfdata.keras",
22
+ compile=False
23
+ )
24
+
25
+ if image is None:
26
+ return "Please upload an image.", ""
27
+
28
+ try:
29
+ img = tf.keras.preprocessing.image.img_to_array(image)
30
+
31
+ if img.shape[-1] == 4:
32
+ img = img[..., :3]
33
+ elif img.shape[-1] == 1:
34
+ pass
35
+ elif img.shape[-1] != 3:
36
+ return "Unsupported image format (channels).", ""
37
+
38
+
39
+ if img.shape[-1] == 3:
40
+ img = tf.image.rgb_to_grayscale(img)
41
+
42
+ IMG_SIZE = 224
43
+ img = tf.image.resize(img, [IMG_SIZE, IMG_SIZE])
44
+ img = img / 255.0
45
+ img = tf.image.grayscale_to_rgb(img)
46
+ img = tf.expand_dims(img, axis=0)
47
+
48
+
49
+ predictions = _model.predict(img, verbose=0)[0]
50
+ predicted_idx = np.argmax(predictions)
51
+ confidence = float(predictions[predicted_idx]) * 100
52
+
53
+ CLASS_NAMES = [
54
+ "Agreeableness",
55
+ "Conscientiousness",
56
+ "Extraversion",
57
+ "Neuroticism",
58
+ "Openness"
59
+ ]
60
+
61
+ trait = CLASS_NAMES[predicted_idx]
62
+
63
+ result = f"**Predicted Personality Trait**\n{trait}\n\n**Confidence**: {confidence:.2f}%", trait
64
+ return result
65
+
66
+ except Exception as e:
67
+ import traceback
68
+ return f"Error during prediction:\n{str(e)}", ""
69
+
70
+ def full_analysis(image):
71
+ if image is None:
72
+ return "Please upload an image.", ""
73
+
74
+ prediction_text, trait = predict_handwriting(image)
75
+ summary = generate_personality_summary(trait) if trait else ""
76
+ return prediction_text, summary
common/gradio/signature_model_tfdata.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02a50f5ec38a218287eb03e2f70a468237ff0a998110175f65e87f676fca0d41
3
+ size 23635350
common/rag/common.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.prompts import ChatPromptTemplate
2
+ from langchain_core.output_parsers import StrOutputParser
3
+ from common.rag.embeddings import fetch_relevant_document
4
+ from common.rag.models import load_huggingface_model
5
+
6
+
7
+ def generate_personality_summary(trait):
8
+ """
9
+ Generate a graphological personality summary for a given trait/topic using RAG.
10
+
11
+ This function performs a Retrieval-Augmented Generation (RAG) query to analyze
12
+ handwriting characteristics and derive corresponding personality traits/psychological
13
+ interpretations — but **only** from information explicitly present in retrieved documents.
14
+
15
+ Important:
16
+ The analysis is strictly limited to the content found in the vector store.
17
+ No external/pre-trained graphological knowledge is used by the LLM.
18
+
19
+ Parameters
20
+ ----------
21
+ trait : str or None
22
+ The personality trait, psychological characteristic, behavioral pattern or
23
+ topic for which handwriting analysis should be retrieved and interpreted.
24
+ Examples: "introversion", "high ambition", "emotional instability", "leadership"
25
+
26
+ Returns
27
+ -------
28
+ str
29
+ Graphological analysis containing:
30
+ - observed handwriting features (if any were found)
31
+ - their professional graphological interpretation
32
+ - overall personality impression
33
+ OR one of the following safety messages:
34
+ - "The provided context contains insufficient information for handwriting analysis"
35
+ - empty string (when trait is None)
36
+
37
+ Notes
38
+ -----
39
+ - The function is intentionally very strict about hallucination prevention.
40
+ - Quality of the result depends heavily on the relevance and richness of documents
41
+ stored in the vector database for the given trait.
42
+ """
43
+
44
+ if trait is None:
45
+ return ""
46
+
47
+ system_message = """
48
+ You are a highly experienced professional graphologist with a PhD in Graphology and more than 20 years of practical experience in forensic and psychological handwriting analysis.
49
+
50
+ Your only task is to analyze handwriting features and give interpretations STRICTLY based on the information provided in the retrieved context/transcript.
51
+
52
+ Rules you must follow:
53
+ • Never use knowledge or assumptions from your training data
54
+ • Never invent or assume handwriting characteristics that are not explicitly described in the provided context
55
+ • If the context contains insufficient information for a meaningful analysis → answer only: "The provided context contains insufficient information for handwriting analysis"
56
+ • Use professional graphological terminology
57
+ • Structure your answer clearly: first describe observed features, then psychological/personality interpretation (if enough data)
58
+
59
+ Be objective, precise, and stay 100% within the provided context.
60
+ """
61
+
62
+ question = f"Analyze the handwriting features and personality traits of a person characterized as: {trait}, using ONLY the information present in the provided context."
63
+
64
+ context = fetch_relevant_document(topic=trait)
65
+
66
+ model = load_huggingface_model()
67
+
68
+ rag_prompt = ChatPromptTemplate.from_messages([
69
+ ("system", "{system_message}"),
70
+ ("human", """Context information:\n\n{context}\n\nQuestion:\n\n{question}\n\nTopic:{topic}\n\nAnswer:""")
71
+ ])
72
+
73
+ simple_rag_chain = (
74
+ rag_prompt
75
+ | model
76
+ | StrOutputParser()
77
+ )
78
+
79
+ answer = simple_rag_chain.invoke({
80
+ "system_message": system_message,
81
+ "context": context,
82
+ "question": question,
83
+ "topic": trait
84
+ })
85
+
86
+ return answer
common/rag/document_loader.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ warnings.filterwarnings(action='ignore')
3
+ from langchain_community.document_loaders import PyPDFDirectoryLoader
4
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
5
+
6
+ from dotenv import load_dotenv
7
+ load_dotenv()
8
+
9
+
10
+ def fetch_document_chunks():
11
+ """
12
+ Load and split all PDF files from the designated folder into manageable text chunks.
13
+
14
+ This function serves as the document ingestion step for the RAG pipeline.
15
+ It:
16
+ - Loads every PDF file found in the ./RAG_Documents directory
17
+ - Splits documents into overlapping chunks optimized for vector embedding
18
+ and retrieval in graphology/handwriting analysis context
19
+
20
+ Configuration (hardcoded):
21
+ - Source folder: ./RAG_Documents
22
+ - Chunk size: 850 characters
23
+ - Chunk overlap: 120 characters
24
+ - Splitter: RecursiveCharacterTextSplitter with common separators
25
+ - Includes start_index metadata for potential future reference/traceability
26
+
27
+ Returns
28
+ -------
29
+ list[langchain_core.documents.Document]
30
+ List of document chunks ready to be embedded and stored in vector database.
31
+ Each chunk contains:
32
+ - page_content: the text fragment
33
+ - metadata: source file, page number, start_index
34
+
35
+ Raises
36
+ ------
37
+ FileNotFoundError
38
+ If the ./RAG_Documents directory does not exist
39
+ ValueError
40
+ If no PDF files are found or directory is empty
41
+
42
+ Notes
43
+ -----
44
+ - This function loads and splits documents **every time it is called**.
45
+ - In production, consider caching the chunks or using a persistent vector store
46
+ to avoid repeated disk I/O and splitting.
47
+ - Current parameters (850/120) are reasonable for most sentence-transformers
48
+ models and graphology-related documents.
49
+ """
50
+
51
+ PDF_FOLDER = "./RAG_Documents"
52
+ CHUNK_SIZE = 850
53
+ CHUNK_OVERLAP = 120
54
+
55
+ loader = PyPDFDirectoryLoader(PDF_FOLDER)
56
+ docs = loader.load()
57
+
58
+ text_splitter = RecursiveCharacterTextSplitter(
59
+ chunk_size=CHUNK_SIZE,
60
+ chunk_overlap=CHUNK_OVERLAP,
61
+ length_function=len,
62
+ separators=["\n\n", "\n", ". ", " ", ""],
63
+ add_start_index=True
64
+ )
65
+
66
+ chunks = text_splitter.split_documents(docs)
67
+ return chunks
common/rag/embeddings.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ warnings.filterwarnings(action='ignore')
3
+ import torch
4
+ from langchain_community.vectorstores import FAISS
5
+ from langchain_huggingface import HuggingFaceEmbeddings
6
+ from common.rag.document_loader import fetch_document_chunks
7
+ from dotenv import load_dotenv
8
+ load_dotenv()
9
+
10
+
11
+ def fetch_vectorstore_retriever():
12
+ """
13
+ Create and return a FAISS-based retriever for graphology/handwriting analysis documents.
14
+
15
+ This function:
16
+ - Loads sentence-transformers/all-MiniLM-L6-v2 embeddings (GPU if available)
17
+ - Builds a FAISS vector store from document chunks obtained via fetch_document_chunks()
18
+ - Returns a similarity search retriever configured to return top 10 most relevant chunks
19
+
20
+ Returns
21
+ -------
22
+ langchain_core.retrievers.BaseRetriever
23
+ Configured FAISS retriever ready to be used with .invoke() or .get_relevant_documents()
24
+
25
+ Notes
26
+ -----
27
+ - The vector store is **recreated from scratch every time** this function is called.
28
+ - This can be slow on first run or when document collection is large.
29
+ - Consider caching/persisting the vectorstore in production for better performance.
30
+ - Uses normalize_embeddings=True → cosine similarity is used internally.
31
+ """
32
+
33
+ embeddings = HuggingFaceEmbeddings(
34
+ model_name="sentence-transformers/all-MiniLM-L6-v2",
35
+ model_kwargs={'device': 'cuda' if torch.cuda.is_available() else 'cpu'},
36
+ encode_kwargs={'normalize_embeddings': True}
37
+ )
38
+
39
+ vectorstore = FAISS.from_documents(
40
+ documents=fetch_document_chunks(),
41
+ embedding=embeddings
42
+ )
43
+
44
+ retriever = vectorstore.as_retriever(
45
+ search_type="similarity",
46
+ search_kwargs={"k": 10}
47
+ )
48
+
49
+ return retriever
50
+
51
+
52
+ def fetch_relevant_document(topic="None"):
53
+ """
54
+ Retrieve relevant document chunks for graphological analysis of a specific topic/trait.
55
+
56
+ Constructs a detailed, structured query optimized for finding handwriting analysis content,
57
+ then retrieves the top 10 most similar document chunks from the FAISS vector store.
58
+
59
+ Parameters
60
+ ----------
61
+ topic : str, default="None"
62
+ Personality trait, psychological characteristic, writing style aspect or any topic
63
+ for which handwriting analysis information is requested.
64
+ Examples: "ambition", "emotional stability", "aggressiveness", "introversion"
65
+
66
+ Returns
67
+ -------
68
+ str
69
+ Concatenated string containing up to 10 relevant document chunks, each prefixed
70
+ with "[Document N]" for clear identification in the RAG context.
71
+ Returns empty context string if topic is "None" or no relevant chunks are found.
72
+
73
+ Notes
74
+ -----
75
+ - The query is intentionally very specific and structured to improve retrieval quality
76
+ for handwriting/graphology related content.
77
+ - Uses similarity (cosine) search with k=10 (top 10 results).
78
+ - The returned context is meant to be directly passed into a RAG prompt for LLM analysis.
79
+ """
80
+
81
+ retriever = fetch_vectorstore_retriever()
82
+ query = (
83
+ f"Handwriting sample analysis for: {topic}\n"
84
+ "Extract and summarize: \n"
85
+ "- Observed writing style characteristics (slant, pressure, size, speed, spacing, margins, baseline, letter forms, connections, etc.)\n"
86
+ "- Graphological interpretations of personality traits linked to those features\n"
87
+ "- Overall psychological or personality impression"
88
+ )
89
+ docs = retriever.invoke(query)
90
+ context = "\n\n".join(f"[Document {i+1}]\n{doc.page_content}\n" for i, doc in enumerate(docs))
91
+ return context
common/rag/models.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ warnings.filterwarnings(action='ignore')
3
+ from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
4
+ from dotenv import load_dotenv
5
+ load_dotenv()
6
+
7
+ def load_huggingface_model():
8
+ """
9
+ Initialize and return a Hugging Face chat model wrapper for RAG-based graphology analysis.
10
+
11
+ Creates a HuggingFaceEndpoint instance connected to the Qwen2.5-7B-Instruct model
12
+ and wraps it with ChatHuggingFace for conversational compatibility with LangChain.
13
+
14
+ Configuration:
15
+ - Model: Qwen/Qwen2.5-7B-Instruct (7B parameter instruction-tuned model)
16
+ - Temperature: 0.65 (balanced between creativity and coherence)
17
+ - Max new tokens: 1024
18
+ - Top-p: 0.92 (nucleus sampling)
19
+ - Repetition penalty: 1.05 (light discouragement of repetitions)
20
+
21
+ Returns
22
+ -------
23
+ ChatHuggingFace
24
+ Configured LangChain-compatible chat model ready to be used in chains
25
+
26
+ Notes
27
+ -----
28
+ - Requires HUGGINGFACEHUB_API_TOKEN to be set in environment variables
29
+ (loaded via dotenv)
30
+ - Uses inference endpoint (cloud-based inference) — no local GPU/CPU loading
31
+ - Model is reloaded every time this function is called
32
+ - Current settings are optimized for structured, precise graphological analysis
33
+ with controlled creativity
34
+ - Consider adjusting temperature/max_new_tokens based on response length needs
35
+
36
+ Raises
37
+ ------
38
+ ValueError
39
+ If HUGGINGFACEHUB_API_TOKEN is missing or invalid
40
+ """
41
+
42
+ chat_llm = HuggingFaceEndpoint(
43
+ repo_id="Qwen/Qwen2.5-7B-Instruct",
44
+ task="text-generation",
45
+ temperature=0.65,
46
+ max_new_tokens=1024,
47
+ top_p=0.92,
48
+ repetition_penalty=1.05
49
+ )
50
+ model = ChatHuggingFace(llm=chat_llm)
51
+ return model
common/rag/prompts.py ADDED
File without changes
requirements.txt ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py
2
+ aiofiles
3
+ aiohappyeyeballs
4
+ aiohttp
5
+ aiosignal
6
+ altair
7
+ annotated-doc
8
+ annotated-types
9
+ anyio
10
+ asttokens
11
+ astunparse
12
+ async-timeout
13
+ attrs
14
+ blinker
15
+ brotli
16
+ cachetools
17
+ certifi
18
+ charset-normalizer
19
+ click
20
+ cloudpickle
21
+ colorama
22
+ comm
23
+ contourpy
24
+ cycler
25
+ dataclasses-json
26
+ debugpy
27
+ decorator
28
+ distro
29
+ exceptiongroup
30
+ executing
31
+ faiss-cpu
32
+ fastapi
33
+ ffmpy
34
+ filelock
35
+ filetype
36
+ flatbuffers
37
+ fonttools
38
+ frozenlist
39
+ fsspec
40
+ gast
41
+ gitdb
42
+ GitPython
43
+ google-auth
44
+ google-auth-oauthlib
45
+ google-genai
46
+ google-pasta
47
+ # gradio
48
+ gradio_client
49
+ greenlet
50
+ groovy
51
+ grpcio
52
+ h11
53
+ h5py
54
+ hf-xet
55
+ httpcore
56
+ httpx
57
+ httpx-sse
58
+ huggingface-hub
59
+ idna
60
+ ImageIO
61
+ ipykernel
62
+ ipython
63
+ jedi
64
+ Jinja2
65
+ jiter
66
+ joblib
67
+ jsonpatch
68
+ jsonpointer
69
+ jsonschema
70
+ jsonschema-specifications
71
+ jupyter_client
72
+ jupyter_core
73
+ keras
74
+ kiwisolver
75
+ langchain
76
+ langchain-classic
77
+ langchain-community
78
+ langchain-core
79
+ langchain-google-genai
80
+ langchain-huggingface
81
+ langchain-openai
82
+ langchain-text-splitters
83
+ langgraph
84
+ langgraph-checkpoint
85
+ langgraph-prebuilt
86
+ langgraph-sdk
87
+ langsmith
88
+ lazy_loader
89
+ libclang
90
+ lime
91
+ llvmlite
92
+ Markdown
93
+ markdown-it-py
94
+ MarkupSafe
95
+ marshmallow
96
+ matplotlib
97
+ matplotlib-inline
98
+ mdurl
99
+ ml_dtypes
100
+ mpmath
101
+ multidict
102
+ mypy_extensions
103
+ namex
104
+ narwhals
105
+ nest-asyncio
106
+ networkx
107
+ numba
108
+ # numpy
109
+ oauthlib
110
+ openai
111
+ opencv-python
112
+ opt_einsum
113
+ optree
114
+ orjson
115
+ ormsgpack
116
+ packaging
117
+ pandas
118
+ parso
119
+ pillow
120
+ platformdirs
121
+ prompt_toolkit
122
+ propcache
123
+ # protobuf
124
+ psutil
125
+ pure_eval
126
+ pyarrow
127
+ pyasn1
128
+ pyasn1_modules
129
+ pydantic
130
+ pydantic-settings
131
+ pydantic_core
132
+ pydeck
133
+ pydub
134
+ Pygments
135
+ pyparsing
136
+ pypdf
137
+ python-dateutil
138
+ python-dotenv
139
+ python-multipart
140
+ pytz
141
+ PyYAML
142
+ pyzmq
143
+ referencing
144
+ regex
145
+ reportlab
146
+ requests
147
+ requests-oauthlib
148
+ requests-toolbelt
149
+ rich
150
+ rpds-py
151
+ rsa
152
+ safehttpx
153
+ safetensors
154
+ scikit-image
155
+ scikit-learn
156
+ scipy
157
+ seaborn
158
+ semantic-version
159
+ sentence-transformers
160
+ shap
161
+ shellingham
162
+ six
163
+ slicer
164
+ smmap
165
+ sniffio
166
+ SQLAlchemy
167
+ stack-data
168
+ starlette
169
+ streamlit
170
+ sympy
171
+ tenacity
172
+ tensorboard
173
+ tensorboard-data-server
174
+ # tensorflow
175
+ tensorflow-estimator
176
+ tensorflow-intel
177
+ # tensorflow-io-gcs-filesystem
178
+ termcolor
179
+ tf_keras
180
+ threadpoolctl
181
+ tifffile
182
+ tiktoken
183
+ tokenizers
184
+ toml
185
+ tomlkit
186
+ tornado
187
+ # torch==2.7.1+cu118
188
+ # torchvision==0.22.1+cu118
189
+ # torchaudio==2.7.1+cu118
190
+ tqdm
191
+ traitlets
192
+ transformers
193
+ typer
194
+ typer-slim
195
+ typing-inspect
196
+ typing-inspection
197
+ typing_extensions
198
+ tzdata
199
+ urllib3
200
+ uuid_utils
201
+ uvicorn
202
+ watchdog
203
+ wcwidth
204
+ websockets
205
+ Werkzeug
206
+ wrapt
207
+ xxhash
208
+ yarl
209
+ zstandard
210
+
211
+ tensorflow==2.15.1
212
+ tf-keras==2.15.1
213
+ numpy<2
214
+ protobuf<4
215
+ gradio>=4.0,<5.0
signature_model_tfdata.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02a50f5ec38a218287eb03e2f70a468237ff0a998110175f65e87f676fca0d41
3
+ size 23635350